2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date
= "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
94 #define DEF_DEV_SIZE_MB 8
98 #define DEF_EVERY_NTH 0
103 #define DEF_LBPWS10 0
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0 0
107 #define DEF_NUM_PARTS 0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB 0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE 1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
126 #define SCSI_DEBUG_OPT_TIMEOUT 4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
129 #define SCSI_DEBUG_OPT_DIF_ERR 32
130 #define SCSI_DEBUG_OPT_DIX_ERR 64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
139 * When "every_nth" < 0 then after "- every_nth" commands:
140 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141 * - a RECOVERED_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143 * - a TRANSPORT_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145 * This will continue until some other action occurs (e.g. the user
146 * writing a new value (other than -1 or 1) to every_nth via sysfs).
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150 * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155 * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
159 /* Can queue up to this number of commands. Typically commands that
160 * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE 255
163 static int scsi_debug_add_host
= DEF_NUM_HOST
;
164 static int scsi_debug_ato
= DEF_ATO
;
165 static int scsi_debug_delay
= DEF_DELAY
;
166 static int scsi_debug_dev_size_mb
= DEF_DEV_SIZE_MB
;
167 static int scsi_debug_dif
= DEF_DIF
;
168 static int scsi_debug_dix
= DEF_DIX
;
169 static int scsi_debug_dsense
= DEF_D_SENSE
;
170 static int scsi_debug_every_nth
= DEF_EVERY_NTH
;
171 static int scsi_debug_fake_rw
= DEF_FAKE_RW
;
172 static int scsi_debug_guard
= DEF_GUARD
;
173 static int scsi_debug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
174 static int scsi_debug_max_luns
= DEF_MAX_LUNS
;
175 static int scsi_debug_max_queue
= SCSI_DEBUG_CANQUEUE
;
176 static int scsi_debug_no_lun_0
= DEF_NO_LUN_0
;
177 static int scsi_debug_no_uld
= 0;
178 static int scsi_debug_num_parts
= DEF_NUM_PARTS
;
179 static int scsi_debug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
180 static int scsi_debug_opt_blks
= DEF_OPT_BLKS
;
181 static int scsi_debug_opts
= DEF_OPTS
;
182 static int scsi_debug_physblk_exp
= DEF_PHYSBLK_EXP
;
183 static int scsi_debug_ptype
= DEF_PTYPE
; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level
= DEF_SCSI_LEVEL
;
185 static int scsi_debug_sector_size
= DEF_SECTOR_SIZE
;
186 static int scsi_debug_virtual_gb
= DEF_VIRTUAL_GB
;
187 static int scsi_debug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
188 static unsigned int scsi_debug_lbpu
= DEF_LBPU
;
189 static unsigned int scsi_debug_lbpws
= DEF_LBPWS
;
190 static unsigned int scsi_debug_lbpws10
= DEF_LBPWS10
;
191 static unsigned int scsi_debug_lbprz
= DEF_LBPRZ
;
192 static unsigned int scsi_debug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
193 static unsigned int scsi_debug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
194 static unsigned int scsi_debug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
195 static unsigned int scsi_debug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
196 static unsigned int scsi_debug_write_same_length
= DEF_WRITESAME_LENGTH
;
197 static bool scsi_debug_removable
= DEF_REMOVABLE
;
199 static int scsi_debug_cmnd_count
= 0;
201 #define DEV_READONLY(TGT) (0)
203 static unsigned int sdebug_store_sectors
;
204 static sector_t sdebug_capacity
; /* in sectors */
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207 may still need them */
208 static int sdebug_heads
; /* heads per disk */
209 static int sdebug_cylinders_per
; /* cylinders per surface */
210 static int sdebug_sectors_per
; /* sectors per cylinder */
212 #define SDEBUG_MAX_PARTS 4
214 #define SDEBUG_SENSE_LEN 32
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
218 static unsigned int scsi_debug_lbp(void)
220 return scsi_debug_lbpu
| scsi_debug_lbpws
| scsi_debug_lbpws10
;
223 struct sdebug_dev_info
{
224 struct list_head dev_list
;
225 unsigned char sense_buff
[SDEBUG_SENSE_LEN
]; /* weak nexus */
226 unsigned int channel
;
229 struct sdebug_host_info
*sdbg_host
;
236 struct sdebug_host_info
{
237 struct list_head host_list
;
238 struct Scsi_Host
*shost
;
240 struct list_head dev_info_list
;
243 #define to_sdebug_host(d) \
244 container_of(d, struct sdebug_host_info, dev)
246 static LIST_HEAD(sdebug_host_list
);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
249 typedef void (* done_funct_t
) (struct scsi_cmnd
*);
251 struct sdebug_queued_cmd
{
253 struct timer_list cmnd_timer
;
254 done_funct_t done_funct
;
255 struct scsi_cmnd
* a_cmnd
;
258 static struct sdebug_queued_cmd queued_arr
[SCSI_DEBUG_CANQUEUE
];
260 static unsigned char * fake_storep
; /* ramdisk storage */
261 static unsigned char *dif_storep
; /* protection info */
262 static void *map_storep
; /* provisioning map */
264 static unsigned long map_size
;
265 static int num_aborts
= 0;
266 static int num_dev_resets
= 0;
267 static int num_bus_resets
= 0;
268 static int num_host_resets
= 0;
269 static int dix_writes
;
270 static int dix_reads
;
271 static int dif_errors
;
273 static DEFINE_SPINLOCK(queued_arr_lock
);
274 static DEFINE_RWLOCK(atomic_rw
);
276 static char sdebug_proc_name
[] = "scsi_debug";
278 static struct bus_type pseudo_lld_bus
;
280 static inline sector_t
dif_offset(sector_t sector
)
285 static struct device_driver sdebug_driverfs_driver
= {
286 .name
= sdebug_proc_name
,
287 .bus
= &pseudo_lld_bus
,
290 static const int check_condition_result
=
291 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
293 static const int illegal_condition_result
=
294 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
296 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
298 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
301 static int sdebug_add_adapter(void);
302 static void sdebug_remove_adapter(void);
304 static void sdebug_max_tgts_luns(void)
306 struct sdebug_host_info
*sdbg_host
;
307 struct Scsi_Host
*hpnt
;
309 spin_lock(&sdebug_host_list_lock
);
310 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
311 hpnt
= sdbg_host
->shost
;
312 if ((hpnt
->this_id
>= 0) &&
313 (scsi_debug_num_tgts
> hpnt
->this_id
))
314 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
316 hpnt
->max_id
= scsi_debug_num_tgts
;
317 /* scsi_debug_max_luns; */
318 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
;
320 spin_unlock(&sdebug_host_list_lock
);
323 static void mk_sense_buffer(struct sdebug_dev_info
*devip
, int key
,
326 unsigned char *sbuff
;
328 sbuff
= devip
->sense_buff
;
329 memset(sbuff
, 0, SDEBUG_SENSE_LEN
);
331 scsi_build_sense_buffer(scsi_debug_dsense
, sbuff
, key
, asc
, asq
);
333 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
334 printk(KERN_INFO
"scsi_debug: [sense_key,asc,ascq]: "
335 "[0x%x,0x%x,0x%x]\n", key
, asc
, asq
);
338 static void get_data_transfer_info(unsigned char *cmd
,
339 unsigned long long *lba
, unsigned int *num
,
345 case VARIABLE_LENGTH_CMD
:
346 *lba
= (u64
)cmd
[19] | (u64
)cmd
[18] << 8 |
347 (u64
)cmd
[17] << 16 | (u64
)cmd
[16] << 24 |
348 (u64
)cmd
[15] << 32 | (u64
)cmd
[14] << 40 |
349 (u64
)cmd
[13] << 48 | (u64
)cmd
[12] << 56;
351 *ei_lba
= (u32
)cmd
[23] | (u32
)cmd
[22] << 8 |
352 (u32
)cmd
[21] << 16 | (u32
)cmd
[20] << 24;
354 *num
= (u32
)cmd
[31] | (u32
)cmd
[30] << 8 | (u32
)cmd
[29] << 16 |
361 *lba
= (u64
)cmd
[9] | (u64
)cmd
[8] << 8 |
362 (u64
)cmd
[7] << 16 | (u64
)cmd
[6] << 24 |
363 (u64
)cmd
[5] << 32 | (u64
)cmd
[4] << 40 |
364 (u64
)cmd
[3] << 48 | (u64
)cmd
[2] << 56;
366 *num
= (u32
)cmd
[13] | (u32
)cmd
[12] << 8 | (u32
)cmd
[11] << 16 |
371 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
374 *num
= (u32
)cmd
[9] | (u32
)cmd
[8] << 8 | (u32
)cmd
[7] << 16 |
381 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
384 *num
= (u32
)cmd
[8] | (u32
)cmd
[7] << 8;
388 *lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
389 (u32
)(cmd
[1] & 0x1f) << 16;
390 *num
= (0 == cmd
[4]) ? 256 : cmd
[4];
397 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
399 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
400 printk(KERN_INFO
"scsi_debug: ioctl: cmd=0x%x\n", cmd
);
403 /* return -ENOTTY; // correct return but upsets fdisk */
406 static int check_readiness(struct scsi_cmnd
* SCpnt
, int reset_only
,
407 struct sdebug_dev_info
* devip
)
410 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
411 printk(KERN_INFO
"scsi_debug: Reporting Unit "
412 "attention: power on reset\n");
414 mk_sense_buffer(devip
, UNIT_ATTENTION
, POWERON_RESET
, 0);
415 return check_condition_result
;
417 if ((0 == reset_only
) && devip
->stopped
) {
418 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
419 printk(KERN_INFO
"scsi_debug: Reporting Not "
420 "ready: initializing command required\n");
421 mk_sense_buffer(devip
, NOT_READY
, LOGICAL_UNIT_NOT_READY
,
423 return check_condition_result
;
428 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
429 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
433 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
437 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
438 return (DID_ERROR
<< 16);
440 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
443 sdb
->resid
-= act_len
;
445 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
450 /* Returns number of bytes fetched into 'arr' or -1 if error. */
451 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
454 if (!scsi_bufflen(scp
))
456 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
459 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
463 static const char * inq_vendor_id
= "Linux ";
464 static const char * inq_product_id
= "scsi_debug ";
465 static const char * inq_product_rev
= "0004";
467 static int inquiry_evpd_83(unsigned char * arr
, int port_group_id
,
468 int target_dev_id
, int dev_id_num
,
469 const char * dev_id_str
,
475 port_a
= target_dev_id
+ 1;
476 /* T10 vendor identifier field format (faked) */
477 arr
[0] = 0x2; /* ASCII */
480 memcpy(&arr
[4], inq_vendor_id
, 8);
481 memcpy(&arr
[12], inq_product_id
, 16);
482 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
483 num
= 8 + 16 + dev_id_str_len
;
486 if (dev_id_num
>= 0) {
487 /* NAA-5, Logical unit identifier (binary) */
488 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
489 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
492 arr
[num
++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
496 arr
[num
++] = (dev_id_num
>> 24);
497 arr
[num
++] = (dev_id_num
>> 16) & 0xff;
498 arr
[num
++] = (dev_id_num
>> 8) & 0xff;
499 arr
[num
++] = dev_id_num
& 0xff;
500 /* Target relative port number */
501 arr
[num
++] = 0x61; /* proto=sas, binary */
502 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
503 arr
[num
++] = 0x0; /* reserved */
504 arr
[num
++] = 0x4; /* length */
505 arr
[num
++] = 0x0; /* reserved */
506 arr
[num
++] = 0x0; /* reserved */
508 arr
[num
++] = 0x1; /* relative port A */
510 /* NAA-5, Target port identifier */
511 arr
[num
++] = 0x61; /* proto=sas, binary */
512 arr
[num
++] = 0x93; /* piv=1, target port, naa */
515 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
519 arr
[num
++] = (port_a
>> 24);
520 arr
[num
++] = (port_a
>> 16) & 0xff;
521 arr
[num
++] = (port_a
>> 8) & 0xff;
522 arr
[num
++] = port_a
& 0xff;
523 /* NAA-5, Target port group identifier */
524 arr
[num
++] = 0x61; /* proto=sas, binary */
525 arr
[num
++] = 0x95; /* piv=1, target port group id */
530 arr
[num
++] = (port_group_id
>> 8) & 0xff;
531 arr
[num
++] = port_group_id
& 0xff;
532 /* NAA-5, Target device identifier */
533 arr
[num
++] = 0x61; /* proto=sas, binary */
534 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
537 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
541 arr
[num
++] = (target_dev_id
>> 24);
542 arr
[num
++] = (target_dev_id
>> 16) & 0xff;
543 arr
[num
++] = (target_dev_id
>> 8) & 0xff;
544 arr
[num
++] = target_dev_id
& 0xff;
545 /* SCSI name string: Target device identifier */
546 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
547 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
550 memcpy(arr
+ num
, "naa.52222220", 12);
552 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
553 memcpy(arr
+ num
, b
, 8);
555 memset(arr
+ num
, 0, 4);
561 static unsigned char vpd84_data
[] = {
562 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
563 0x22,0x22,0x22,0x0,0xbb,0x1,
564 0x22,0x22,0x22,0x0,0xbb,0x2,
567 static int inquiry_evpd_84(unsigned char * arr
)
569 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
570 return sizeof(vpd84_data
);
573 static int inquiry_evpd_85(unsigned char * arr
)
576 const char * na1
= "https://www.kernel.org/config";
577 const char * na2
= "http://www.kernel.org/log";
580 arr
[num
++] = 0x1; /* lu, storage config */
581 arr
[num
++] = 0x0; /* reserved */
586 plen
= ((plen
/ 4) + 1) * 4;
587 arr
[num
++] = plen
; /* length, null termianted, padded */
588 memcpy(arr
+ num
, na1
, olen
);
589 memset(arr
+ num
+ olen
, 0, plen
- olen
);
592 arr
[num
++] = 0x4; /* lu, logging */
593 arr
[num
++] = 0x0; /* reserved */
598 plen
= ((plen
/ 4) + 1) * 4;
599 arr
[num
++] = plen
; /* length, null terminated, padded */
600 memcpy(arr
+ num
, na2
, olen
);
601 memset(arr
+ num
+ olen
, 0, plen
- olen
);
607 /* SCSI ports VPD page */
608 static int inquiry_evpd_88(unsigned char * arr
, int target_dev_id
)
613 port_a
= target_dev_id
+ 1;
615 arr
[num
++] = 0x0; /* reserved */
616 arr
[num
++] = 0x0; /* reserved */
618 arr
[num
++] = 0x1; /* relative port 1 (primary) */
619 memset(arr
+ num
, 0, 6);
622 arr
[num
++] = 12; /* length tp descriptor */
623 /* naa-5 target port identifier (A) */
624 arr
[num
++] = 0x61; /* proto=sas, binary */
625 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
626 arr
[num
++] = 0x0; /* reserved */
627 arr
[num
++] = 0x8; /* length */
628 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
632 arr
[num
++] = (port_a
>> 24);
633 arr
[num
++] = (port_a
>> 16) & 0xff;
634 arr
[num
++] = (port_a
>> 8) & 0xff;
635 arr
[num
++] = port_a
& 0xff;
637 arr
[num
++] = 0x0; /* reserved */
638 arr
[num
++] = 0x0; /* reserved */
640 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
641 memset(arr
+ num
, 0, 6);
644 arr
[num
++] = 12; /* length tp descriptor */
645 /* naa-5 target port identifier (B) */
646 arr
[num
++] = 0x61; /* proto=sas, binary */
647 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
648 arr
[num
++] = 0x0; /* reserved */
649 arr
[num
++] = 0x8; /* length */
650 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
654 arr
[num
++] = (port_b
>> 24);
655 arr
[num
++] = (port_b
>> 16) & 0xff;
656 arr
[num
++] = (port_b
>> 8) & 0xff;
657 arr
[num
++] = port_b
& 0xff;
663 static unsigned char vpd89_data
[] = {
664 /* from 4th byte */ 0,0,0,0,
665 'l','i','n','u','x',' ',' ',' ',
666 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
668 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
670 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
671 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
672 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
673 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
675 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
677 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
679 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
680 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
681 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
683 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
684 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
685 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
687 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
689 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
690 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
691 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
692 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
707 static int inquiry_evpd_89(unsigned char * arr
)
709 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
710 return sizeof(vpd89_data
);
714 /* Block limits VPD page (SBC-3) */
715 static unsigned char vpdb0_data
[] = {
716 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
717 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
718 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
719 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
722 static int inquiry_evpd_b0(unsigned char * arr
)
726 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
728 /* Optimal transfer length granularity */
729 gran
= 1 << scsi_debug_physblk_exp
;
730 arr
[2] = (gran
>> 8) & 0xff;
731 arr
[3] = gran
& 0xff;
733 /* Maximum Transfer Length */
734 if (sdebug_store_sectors
> 0x400) {
735 arr
[4] = (sdebug_store_sectors
>> 24) & 0xff;
736 arr
[5] = (sdebug_store_sectors
>> 16) & 0xff;
737 arr
[6] = (sdebug_store_sectors
>> 8) & 0xff;
738 arr
[7] = sdebug_store_sectors
& 0xff;
741 /* Optimal Transfer Length */
742 put_unaligned_be32(scsi_debug_opt_blks
, &arr
[8]);
744 if (scsi_debug_lbpu
) {
745 /* Maximum Unmap LBA Count */
746 put_unaligned_be32(scsi_debug_unmap_max_blocks
, &arr
[16]);
748 /* Maximum Unmap Block Descriptor Count */
749 put_unaligned_be32(scsi_debug_unmap_max_desc
, &arr
[20]);
752 /* Unmap Granularity Alignment */
753 if (scsi_debug_unmap_alignment
) {
754 put_unaligned_be32(scsi_debug_unmap_alignment
, &arr
[28]);
755 arr
[28] |= 0x80; /* UGAVALID */
758 /* Optimal Unmap Granularity */
759 put_unaligned_be32(scsi_debug_unmap_granularity
, &arr
[24]);
761 /* Maximum WRITE SAME Length */
762 put_unaligned_be64(scsi_debug_write_same_length
, &arr
[32]);
764 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
766 return sizeof(vpdb0_data
);
769 /* Block device characteristics VPD page (SBC-3) */
770 static int inquiry_evpd_b1(unsigned char *arr
)
772 memset(arr
, 0, 0x3c);
774 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
776 arr
[3] = 5; /* less than 1.8" */
781 /* Logical block provisioning VPD page (SBC-3) */
782 static int inquiry_evpd_b2(unsigned char *arr
)
785 arr
[0] = 0; /* threshold exponent */
790 if (scsi_debug_lbpws
)
793 if (scsi_debug_lbpws10
)
796 if (scsi_debug_lbprz
)
802 #define SDEBUG_LONG_INQ_SZ 96
803 #define SDEBUG_MAX_INQ_ARR_SZ 584
805 static int resp_inquiry(struct scsi_cmnd
* scp
, int target
,
806 struct sdebug_dev_info
* devip
)
808 unsigned char pq_pdt
;
810 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
811 int alloc_len
, n
, ret
;
813 alloc_len
= (cmd
[3] << 8) + cmd
[4];
814 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
816 return DID_REQUEUE
<< 16;
818 pq_pdt
= 0x1e; /* present, wlun */
819 else if (scsi_debug_no_lun_0
&& (0 == devip
->lun
))
820 pq_pdt
= 0x7f; /* not present, no device type */
822 pq_pdt
= (scsi_debug_ptype
& 0x1f);
824 if (0x2 & cmd
[1]) { /* CMDDT bit set */
825 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
828 return check_condition_result
;
829 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
830 int lu_id_num
, port_group_id
, target_dev_id
, len
;
832 int host_no
= devip
->sdbg_host
->shost
->host_no
;
834 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
835 (devip
->channel
& 0x7f);
836 if (0 == scsi_debug_vpd_use_hostno
)
838 lu_id_num
= devip
->wlun
? -1 : (((host_no
+ 1) * 2000) +
839 (devip
->target
* 1000) + devip
->lun
);
840 target_dev_id
= ((host_no
+ 1) * 2000) +
841 (devip
->target
* 1000) - 3;
842 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
843 if (0 == cmd
[2]) { /* supported vital product data pages */
844 arr
[1] = cmd
[2]; /*sanity */
846 arr
[n
++] = 0x0; /* this page */
847 arr
[n
++] = 0x80; /* unit serial number */
848 arr
[n
++] = 0x83; /* device identification */
849 arr
[n
++] = 0x84; /* software interface ident. */
850 arr
[n
++] = 0x85; /* management network addresses */
851 arr
[n
++] = 0x86; /* extended inquiry */
852 arr
[n
++] = 0x87; /* mode page policy */
853 arr
[n
++] = 0x88; /* SCSI ports */
854 arr
[n
++] = 0x89; /* ATA information */
855 arr
[n
++] = 0xb0; /* Block limits (SBC) */
856 arr
[n
++] = 0xb1; /* Block characteristics (SBC) */
857 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
859 arr
[3] = n
- 4; /* number of supported VPD pages */
860 } else if (0x80 == cmd
[2]) { /* unit serial number */
861 arr
[1] = cmd
[2]; /*sanity */
863 memcpy(&arr
[4], lu_id_str
, len
);
864 } else if (0x83 == cmd
[2]) { /* device identification */
865 arr
[1] = cmd
[2]; /*sanity */
866 arr
[3] = inquiry_evpd_83(&arr
[4], port_group_id
,
867 target_dev_id
, lu_id_num
,
869 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
870 arr
[1] = cmd
[2]; /*sanity */
871 arr
[3] = inquiry_evpd_84(&arr
[4]);
872 } else if (0x85 == cmd
[2]) { /* Management network addresses */
873 arr
[1] = cmd
[2]; /*sanity */
874 arr
[3] = inquiry_evpd_85(&arr
[4]);
875 } else if (0x86 == cmd
[2]) { /* extended inquiry */
876 arr
[1] = cmd
[2]; /*sanity */
877 arr
[3] = 0x3c; /* number of following entries */
878 if (scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
)
879 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
880 else if (scsi_debug_dif
)
881 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
883 arr
[4] = 0x0; /* no protection stuff */
884 arr
[5] = 0x7; /* head of q, ordered + simple q's */
885 } else if (0x87 == cmd
[2]) { /* mode page policy */
886 arr
[1] = cmd
[2]; /*sanity */
887 arr
[3] = 0x8; /* number of following entries */
888 arr
[4] = 0x2; /* disconnect-reconnect mp */
889 arr
[6] = 0x80; /* mlus, shared */
890 arr
[8] = 0x18; /* protocol specific lu */
891 arr
[10] = 0x82; /* mlus, per initiator port */
892 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
893 arr
[1] = cmd
[2]; /*sanity */
894 arr
[3] = inquiry_evpd_88(&arr
[4], target_dev_id
);
895 } else if (0x89 == cmd
[2]) { /* ATA information */
896 arr
[1] = cmd
[2]; /*sanity */
897 n
= inquiry_evpd_89(&arr
[4]);
900 } else if (0xb0 == cmd
[2]) { /* Block limits (SBC) */
901 arr
[1] = cmd
[2]; /*sanity */
902 arr
[3] = inquiry_evpd_b0(&arr
[4]);
903 } else if (0xb1 == cmd
[2]) { /* Block characteristics (SBC) */
904 arr
[1] = cmd
[2]; /*sanity */
905 arr
[3] = inquiry_evpd_b1(&arr
[4]);
906 } else if (0xb2 == cmd
[2]) { /* Logical Block Prov. (SBC) */
907 arr
[1] = cmd
[2]; /*sanity */
908 arr
[3] = inquiry_evpd_b2(&arr
[4]);
910 /* Illegal request, invalid field in cdb */
911 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
912 INVALID_FIELD_IN_CDB
, 0);
914 return check_condition_result
;
916 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
917 ret
= fill_from_dev_buffer(scp
, arr
,
918 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
922 /* drops through here for a standard inquiry */
923 arr
[1] = scsi_debug_removable
? 0x80 : 0; /* Removable disk */
924 arr
[2] = scsi_debug_scsi_level
;
925 arr
[3] = 2; /* response_data_format==2 */
926 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
927 arr
[5] = scsi_debug_dif
? 1 : 0; /* PROTECT bit */
928 if (0 == scsi_debug_vpd_use_hostno
)
929 arr
[5] = 0x10; /* claim: implicit TGPS */
930 arr
[6] = 0x10; /* claim: MultiP */
931 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
932 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
933 memcpy(&arr
[8], inq_vendor_id
, 8);
934 memcpy(&arr
[16], inq_product_id
, 16);
935 memcpy(&arr
[32], inq_product_rev
, 4);
936 /* version descriptors (2 bytes each) follow */
937 arr
[58] = 0x0; arr
[59] = 0x77; /* SAM-3 ANSI */
938 arr
[60] = 0x3; arr
[61] = 0x14; /* SPC-3 ANSI */
940 if (scsi_debug_ptype
== 0) {
941 arr
[n
++] = 0x3; arr
[n
++] = 0x3d; /* SBC-2 ANSI */
942 } else if (scsi_debug_ptype
== 1) {
943 arr
[n
++] = 0x3; arr
[n
++] = 0x60; /* SSC-2 no version */
945 arr
[n
++] = 0xc; arr
[n
++] = 0xf; /* SAS-1.1 rev 10 */
946 ret
= fill_from_dev_buffer(scp
, arr
,
947 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
952 static int resp_requests(struct scsi_cmnd
* scp
,
953 struct sdebug_dev_info
* devip
)
955 unsigned char * sbuff
;
956 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
957 unsigned char arr
[SDEBUG_SENSE_LEN
];
961 memset(arr
, 0, sizeof(arr
));
962 if (devip
->reset
== 1)
963 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
964 want_dsense
= !!(cmd
[1] & 1) || scsi_debug_dsense
;
965 sbuff
= devip
->sense_buff
;
966 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
969 arr
[1] = 0x0; /* NO_SENSE in sense_key */
970 arr
[2] = THRESHOLD_EXCEEDED
;
971 arr
[3] = 0xff; /* TEST set and MRIE==6 */
974 arr
[2] = 0x0; /* NO_SENSE in sense_key */
975 arr
[7] = 0xa; /* 18 byte sense buffer */
976 arr
[12] = THRESHOLD_EXCEEDED
;
977 arr
[13] = 0xff; /* TEST set and MRIE==6 */
980 memcpy(arr
, sbuff
, SDEBUG_SENSE_LEN
);
981 if ((cmd
[1] & 1) && (! scsi_debug_dsense
)) {
982 /* DESC bit set and sense_buff in fixed format */
983 memset(arr
, 0, sizeof(arr
));
985 arr
[1] = sbuff
[2]; /* sense key */
986 arr
[2] = sbuff
[12]; /* asc */
987 arr
[3] = sbuff
[13]; /* ascq */
991 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
992 return fill_from_dev_buffer(scp
, arr
, len
);
995 static int resp_start_stop(struct scsi_cmnd
* scp
,
996 struct sdebug_dev_info
* devip
)
998 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
999 int power_cond
, errsts
, start
;
1001 if ((errsts
= check_readiness(scp
, 1, devip
)))
1003 power_cond
= (cmd
[4] & 0xf0) >> 4;
1005 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1007 return check_condition_result
;
1010 if (start
== devip
->stopped
)
1011 devip
->stopped
= !start
;
1015 static sector_t
get_sdebug_capacity(void)
1017 if (scsi_debug_virtual_gb
> 0)
1018 return (sector_t
)scsi_debug_virtual_gb
*
1019 (1073741824 / scsi_debug_sector_size
);
1021 return sdebug_store_sectors
;
1024 #define SDEBUG_READCAP_ARR_SZ 8
1025 static int resp_readcap(struct scsi_cmnd
* scp
,
1026 struct sdebug_dev_info
* devip
)
1028 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1032 if ((errsts
= check_readiness(scp
, 1, devip
)))
1034 /* following just in case virtual_gb changed */
1035 sdebug_capacity
= get_sdebug_capacity();
1036 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1037 if (sdebug_capacity
< 0xffffffff) {
1038 capac
= (unsigned int)sdebug_capacity
- 1;
1039 arr
[0] = (capac
>> 24);
1040 arr
[1] = (capac
>> 16) & 0xff;
1041 arr
[2] = (capac
>> 8) & 0xff;
1042 arr
[3] = capac
& 0xff;
1049 arr
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1050 arr
[7] = scsi_debug_sector_size
& 0xff;
1051 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1054 #define SDEBUG_READCAP16_ARR_SZ 32
1055 static int resp_readcap16(struct scsi_cmnd
* scp
,
1056 struct sdebug_dev_info
* devip
)
1058 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1059 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1060 unsigned long long capac
;
1061 int errsts
, k
, alloc_len
;
1063 if ((errsts
= check_readiness(scp
, 1, devip
)))
1065 alloc_len
= ((cmd
[10] << 24) + (cmd
[11] << 16) + (cmd
[12] << 8)
1067 /* following just in case virtual_gb changed */
1068 sdebug_capacity
= get_sdebug_capacity();
1069 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1070 capac
= sdebug_capacity
- 1;
1071 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1072 arr
[7 - k
] = capac
& 0xff;
1073 arr
[8] = (scsi_debug_sector_size
>> 24) & 0xff;
1074 arr
[9] = (scsi_debug_sector_size
>> 16) & 0xff;
1075 arr
[10] = (scsi_debug_sector_size
>> 8) & 0xff;
1076 arr
[11] = scsi_debug_sector_size
& 0xff;
1077 arr
[13] = scsi_debug_physblk_exp
& 0xf;
1078 arr
[14] = (scsi_debug_lowest_aligned
>> 8) & 0x3f;
1080 if (scsi_debug_lbp()) {
1081 arr
[14] |= 0x80; /* LBPME */
1082 if (scsi_debug_lbprz
)
1083 arr
[14] |= 0x40; /* LBPRZ */
1086 arr
[15] = scsi_debug_lowest_aligned
& 0xff;
1088 if (scsi_debug_dif
) {
1089 arr
[12] = (scsi_debug_dif
- 1) << 1; /* P_TYPE */
1090 arr
[12] |= 1; /* PROT_EN */
1093 return fill_from_dev_buffer(scp
, arr
,
1094 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1097 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1099 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1100 struct sdebug_dev_info
* devip
)
1102 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1103 unsigned char * arr
;
1104 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1105 int n
, ret
, alen
, rlen
;
1106 int port_group_a
, port_group_b
, port_a
, port_b
;
1108 alen
= ((cmd
[6] << 24) + (cmd
[7] << 16) + (cmd
[8] << 8)
1111 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1113 return DID_REQUEUE
<< 16;
1115 * EVPD page 0x88 states we have two ports, one
1116 * real and a fake port with no device connected.
1117 * So we create two port groups with one port each
1118 * and set the group with port B to unavailable.
1120 port_a
= 0x1; /* relative port A */
1121 port_b
= 0x2; /* relative port B */
1122 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1123 (devip
->channel
& 0x7f);
1124 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1125 (devip
->channel
& 0x7f) + 0x80;
1128 * The asymmetric access state is cycled according to the host_id.
1131 if (0 == scsi_debug_vpd_use_hostno
) {
1132 arr
[n
++] = host_no
% 3; /* Asymm access state */
1133 arr
[n
++] = 0x0F; /* claim: all states are supported */
1135 arr
[n
++] = 0x0; /* Active/Optimized path */
1136 arr
[n
++] = 0x01; /* claim: only support active/optimized paths */
1138 arr
[n
++] = (port_group_a
>> 8) & 0xff;
1139 arr
[n
++] = port_group_a
& 0xff;
1140 arr
[n
++] = 0; /* Reserved */
1141 arr
[n
++] = 0; /* Status code */
1142 arr
[n
++] = 0; /* Vendor unique */
1143 arr
[n
++] = 0x1; /* One port per group */
1144 arr
[n
++] = 0; /* Reserved */
1145 arr
[n
++] = 0; /* Reserved */
1146 arr
[n
++] = (port_a
>> 8) & 0xff;
1147 arr
[n
++] = port_a
& 0xff;
1148 arr
[n
++] = 3; /* Port unavailable */
1149 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1150 arr
[n
++] = (port_group_b
>> 8) & 0xff;
1151 arr
[n
++] = port_group_b
& 0xff;
1152 arr
[n
++] = 0; /* Reserved */
1153 arr
[n
++] = 0; /* Status code */
1154 arr
[n
++] = 0; /* Vendor unique */
1155 arr
[n
++] = 0x1; /* One port per group */
1156 arr
[n
++] = 0; /* Reserved */
1157 arr
[n
++] = 0; /* Reserved */
1158 arr
[n
++] = (port_b
>> 8) & 0xff;
1159 arr
[n
++] = port_b
& 0xff;
1162 arr
[0] = (rlen
>> 24) & 0xff;
1163 arr
[1] = (rlen
>> 16) & 0xff;
1164 arr
[2] = (rlen
>> 8) & 0xff;
1165 arr
[3] = rlen
& 0xff;
1168 * Return the smallest value of either
1169 * - The allocated length
1170 * - The constructed command length
1171 * - The maximum array size
1174 ret
= fill_from_dev_buffer(scp
, arr
,
1175 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1180 /* <<Following mode page info copied from ST318451LW>> */
1182 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1183 { /* Read-Write Error Recovery page for mode_sense */
1184 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1187 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1189 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1190 return sizeof(err_recov_pg
);
1193 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1194 { /* Disconnect-Reconnect page for mode_sense */
1195 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1196 0, 0, 0, 0, 0, 0, 0, 0};
1198 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1200 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1201 return sizeof(disconnect_pg
);
1204 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1205 { /* Format device page for mode_sense */
1206 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1207 0, 0, 0, 0, 0, 0, 0, 0,
1208 0, 0, 0, 0, 0x40, 0, 0, 0};
1210 memcpy(p
, format_pg
, sizeof(format_pg
));
1211 p
[10] = (sdebug_sectors_per
>> 8) & 0xff;
1212 p
[11] = sdebug_sectors_per
& 0xff;
1213 p
[12] = (scsi_debug_sector_size
>> 8) & 0xff;
1214 p
[13] = scsi_debug_sector_size
& 0xff;
1215 if (scsi_debug_removable
)
1216 p
[20] |= 0x20; /* should agree with INQUIRY */
1218 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1219 return sizeof(format_pg
);
1222 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1223 { /* Caching page for mode_sense */
1224 unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1225 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1227 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1229 memset(p
+ 2, 0, sizeof(caching_pg
) - 2);
1230 return sizeof(caching_pg
);
1233 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1234 { /* Control mode page for mode_sense */
1235 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1237 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1240 if (scsi_debug_dsense
)
1241 ctrl_m_pg
[2] |= 0x4;
1243 ctrl_m_pg
[2] &= ~0x4;
1246 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1248 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1250 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1251 else if (2 == pcontrol
)
1252 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1253 return sizeof(ctrl_m_pg
);
1257 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1258 { /* Informational Exceptions control mode page for mode_sense */
1259 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1261 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1264 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1266 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1267 else if (2 == pcontrol
)
1268 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1269 return sizeof(iec_m_pg
);
1272 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1273 { /* SAS SSP mode page - short format for mode_sense */
1274 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1275 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1277 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1279 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1280 return sizeof(sas_sf_m_pg
);
1284 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1286 { /* SAS phy control and discover mode page for mode_sense */
1287 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1288 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1289 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1290 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1291 0x2, 0, 0, 0, 0, 0, 0, 0,
1292 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1293 0, 0, 0, 0, 0, 0, 0, 0,
1294 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1295 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1296 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1297 0x3, 0, 0, 0, 0, 0, 0, 0,
1298 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1299 0, 0, 0, 0, 0, 0, 0, 0,
1303 port_a
= target_dev_id
+ 1;
1304 port_b
= port_a
+ 1;
1305 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1306 p
[20] = (port_a
>> 24);
1307 p
[21] = (port_a
>> 16) & 0xff;
1308 p
[22] = (port_a
>> 8) & 0xff;
1309 p
[23] = port_a
& 0xff;
1310 p
[48 + 20] = (port_b
>> 24);
1311 p
[48 + 21] = (port_b
>> 16) & 0xff;
1312 p
[48 + 22] = (port_b
>> 8) & 0xff;
1313 p
[48 + 23] = port_b
& 0xff;
1315 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1316 return sizeof(sas_pcd_m_pg
);
1319 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1320 { /* SAS SSP shared protocol specific port mode subpage */
1321 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1322 0, 0, 0, 0, 0, 0, 0, 0,
1325 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1327 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1328 return sizeof(sas_sha_m_pg
);
1331 #define SDEBUG_MAX_MSENSE_SZ 256
1333 static int resp_mode_sense(struct scsi_cmnd
* scp
, int target
,
1334 struct sdebug_dev_info
* devip
)
1336 unsigned char dbd
, llbaa
;
1337 int pcontrol
, pcode
, subpcode
, bd_len
;
1338 unsigned char dev_spec
;
1339 int k
, alloc_len
, msense_6
, offset
, len
, errsts
, target_dev_id
;
1341 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
1342 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1344 if ((errsts
= check_readiness(scp
, 1, devip
)))
1346 dbd
= !!(cmd
[1] & 0x8);
1347 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1348 pcode
= cmd
[2] & 0x3f;
1350 msense_6
= (MODE_SENSE
== cmd
[0]);
1351 llbaa
= msense_6
? 0 : !!(cmd
[1] & 0x10);
1352 if ((0 == scsi_debug_ptype
) && (0 == dbd
))
1353 bd_len
= llbaa
? 16 : 8;
1356 alloc_len
= msense_6
? cmd
[4] : ((cmd
[7] << 8) | cmd
[8]);
1357 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
1358 if (0x3 == pcontrol
) { /* Saving values not supported */
1359 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
,
1361 return check_condition_result
;
1363 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
1364 (devip
->target
* 1000) - 3;
1365 /* set DPOFUA bit for disks */
1366 if (0 == scsi_debug_ptype
)
1367 dev_spec
= (DEV_READONLY(target
) ? 0x80 : 0x0) | 0x10;
1377 arr
[4] = 0x1; /* set LONGLBA bit */
1378 arr
[7] = bd_len
; /* assume 255 or less */
1382 if ((bd_len
> 0) && (!sdebug_capacity
))
1383 sdebug_capacity
= get_sdebug_capacity();
1386 if (sdebug_capacity
> 0xfffffffe) {
1392 ap
[0] = (sdebug_capacity
>> 24) & 0xff;
1393 ap
[1] = (sdebug_capacity
>> 16) & 0xff;
1394 ap
[2] = (sdebug_capacity
>> 8) & 0xff;
1395 ap
[3] = sdebug_capacity
& 0xff;
1397 ap
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1398 ap
[7] = scsi_debug_sector_size
& 0xff;
1401 } else if (16 == bd_len
) {
1402 unsigned long long capac
= sdebug_capacity
;
1404 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1405 ap
[7 - k
] = capac
& 0xff;
1406 ap
[12] = (scsi_debug_sector_size
>> 24) & 0xff;
1407 ap
[13] = (scsi_debug_sector_size
>> 16) & 0xff;
1408 ap
[14] = (scsi_debug_sector_size
>> 8) & 0xff;
1409 ap
[15] = scsi_debug_sector_size
& 0xff;
1414 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
1415 /* TODO: Control Extension page */
1416 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1418 return check_condition_result
;
1421 case 0x1: /* Read-Write error recovery page, direct access */
1422 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1425 case 0x2: /* Disconnect-Reconnect page, all devices */
1426 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
1429 case 0x3: /* Format device page, direct access */
1430 len
= resp_format_pg(ap
, pcontrol
, target
);
1433 case 0x8: /* Caching page, direct access */
1434 len
= resp_caching_pg(ap
, pcontrol
, target
);
1437 case 0xa: /* Control Mode page, all devices */
1438 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
1441 case 0x19: /* if spc==1 then sas phy, control+discover */
1442 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
1443 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1444 INVALID_FIELD_IN_CDB
, 0);
1445 return check_condition_result
;
1448 if ((0x0 == subpcode
) || (0xff == subpcode
))
1449 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1450 if ((0x1 == subpcode
) || (0xff == subpcode
))
1451 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
1453 if ((0x2 == subpcode
) || (0xff == subpcode
))
1454 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1457 case 0x1c: /* Informational Exceptions Mode page, all devices */
1458 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
1461 case 0x3f: /* Read all Mode pages */
1462 if ((0 == subpcode
) || (0xff == subpcode
)) {
1463 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1464 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
1465 len
+= resp_format_pg(ap
+ len
, pcontrol
, target
);
1466 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
1467 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
1468 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1469 if (0xff == subpcode
) {
1470 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
1471 target
, target_dev_id
);
1472 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1474 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
1476 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1477 INVALID_FIELD_IN_CDB
, 0);
1478 return check_condition_result
;
1483 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1485 return check_condition_result
;
1488 arr
[0] = offset
- 1;
1490 arr
[0] = ((offset
- 2) >> 8) & 0xff;
1491 arr
[1] = (offset
- 2) & 0xff;
1493 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
1496 #define SDEBUG_MAX_MSELECT_SZ 512
1498 static int resp_mode_select(struct scsi_cmnd
* scp
, int mselect6
,
1499 struct sdebug_dev_info
* devip
)
1501 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
1502 int param_len
, res
, errsts
, mpage
;
1503 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
1504 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1506 if ((errsts
= check_readiness(scp
, 1, devip
)))
1508 memset(arr
, 0, sizeof(arr
));
1511 param_len
= mselect6
? cmd
[4] : ((cmd
[7] << 8) + cmd
[8]);
1512 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
1513 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1514 INVALID_FIELD_IN_CDB
, 0);
1515 return check_condition_result
;
1517 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
1519 return (DID_ERROR
<< 16);
1520 else if ((res
< param_len
) &&
1521 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
1522 printk(KERN_INFO
"scsi_debug: mode_select: cdb indicated=%d, "
1523 " IO sent=%d bytes\n", param_len
, res
);
1524 md_len
= mselect6
? (arr
[0] + 1) : ((arr
[0] << 8) + arr
[1] + 2);
1525 bd_len
= mselect6
? arr
[3] : ((arr
[6] << 8) + arr
[7]);
1527 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1528 INVALID_FIELD_IN_PARAM_LIST
, 0);
1529 return check_condition_result
;
1531 off
= bd_len
+ (mselect6
? 4 : 8);
1532 mpage
= arr
[off
] & 0x3f;
1533 ps
= !!(arr
[off
] & 0x80);
1535 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1536 INVALID_FIELD_IN_PARAM_LIST
, 0);
1537 return check_condition_result
;
1539 spf
= !!(arr
[off
] & 0x40);
1540 pg_len
= spf
? ((arr
[off
+ 2] << 8) + arr
[off
+ 3] + 4) :
1542 if ((pg_len
+ off
) > param_len
) {
1543 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1544 PARAMETER_LIST_LENGTH_ERR
, 0);
1545 return check_condition_result
;
1548 case 0xa: /* Control Mode page */
1549 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
1550 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
1551 sizeof(ctrl_m_pg
) - 2);
1552 scsi_debug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
1556 case 0x1c: /* Informational Exceptions Mode page */
1557 if (iec_m_pg
[1] == arr
[off
+ 1]) {
1558 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
1559 sizeof(iec_m_pg
) - 2);
1566 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1567 INVALID_FIELD_IN_PARAM_LIST
, 0);
1568 return check_condition_result
;
1571 static int resp_temp_l_pg(unsigned char * arr
)
1573 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1574 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1577 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
1578 return sizeof(temp_l_pg
);
1581 static int resp_ie_l_pg(unsigned char * arr
)
1583 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1586 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
1587 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
1588 arr
[4] = THRESHOLD_EXCEEDED
;
1591 return sizeof(ie_l_pg
);
1594 #define SDEBUG_MAX_LSENSE_SZ 512
1596 static int resp_log_sense(struct scsi_cmnd
* scp
,
1597 struct sdebug_dev_info
* devip
)
1599 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, errsts
, len
, n
;
1600 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
1601 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1603 if ((errsts
= check_readiness(scp
, 1, devip
)))
1605 memset(arr
, 0, sizeof(arr
));
1609 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1610 INVALID_FIELD_IN_CDB
, 0);
1611 return check_condition_result
;
1613 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1614 pcode
= cmd
[2] & 0x3f;
1615 subpcode
= cmd
[3] & 0xff;
1616 alloc_len
= (cmd
[7] << 8) + cmd
[8];
1618 if (0 == subpcode
) {
1620 case 0x0: /* Supported log pages log page */
1622 arr
[n
++] = 0x0; /* this page */
1623 arr
[n
++] = 0xd; /* Temperature */
1624 arr
[n
++] = 0x2f; /* Informational exceptions */
1627 case 0xd: /* Temperature log page */
1628 arr
[3] = resp_temp_l_pg(arr
+ 4);
1630 case 0x2f: /* Informational exceptions log page */
1631 arr
[3] = resp_ie_l_pg(arr
+ 4);
1634 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1635 INVALID_FIELD_IN_CDB
, 0);
1636 return check_condition_result
;
1638 } else if (0xff == subpcode
) {
1642 case 0x0: /* Supported log pages and subpages log page */
1645 arr
[n
++] = 0x0; /* 0,0 page */
1647 arr
[n
++] = 0xff; /* this page */
1649 arr
[n
++] = 0x0; /* Temperature */
1651 arr
[n
++] = 0x0; /* Informational exceptions */
1654 case 0xd: /* Temperature subpages */
1657 arr
[n
++] = 0x0; /* Temperature */
1660 case 0x2f: /* Informational exceptions subpages */
1663 arr
[n
++] = 0x0; /* Informational exceptions */
1667 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1668 INVALID_FIELD_IN_CDB
, 0);
1669 return check_condition_result
;
1672 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1673 INVALID_FIELD_IN_CDB
, 0);
1674 return check_condition_result
;
1676 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
1677 return fill_from_dev_buffer(scp
, arr
,
1678 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1681 static int check_device_access_params(struct sdebug_dev_info
*devi
,
1682 unsigned long long lba
, unsigned int num
)
1684 if (lba
+ num
> sdebug_capacity
) {
1685 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, ADDR_OUT_OF_RANGE
, 0);
1686 return check_condition_result
;
1688 /* transfer length excessive (tie in to block limits VPD page) */
1689 if (num
> sdebug_store_sectors
) {
1690 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
1691 return check_condition_result
;
1696 static int do_device_access(struct scsi_cmnd
*scmd
,
1697 struct sdebug_dev_info
*devi
,
1698 unsigned long long lba
, unsigned int num
, int write
)
1701 unsigned long long block
, rest
= 0;
1702 int (*func
)(struct scsi_cmnd
*, unsigned char *, int);
1704 func
= write
? fetch_to_dev_buffer
: fill_from_dev_buffer
;
1706 block
= do_div(lba
, sdebug_store_sectors
);
1707 if (block
+ num
> sdebug_store_sectors
)
1708 rest
= block
+ num
- sdebug_store_sectors
;
1710 ret
= func(scmd
, fake_storep
+ (block
* scsi_debug_sector_size
),
1711 (num
- rest
) * scsi_debug_sector_size
);
1713 ret
= func(scmd
, fake_storep
, rest
* scsi_debug_sector_size
);
1718 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1719 unsigned int sectors
, u32 ei_lba
)
1721 unsigned int i
, resid
;
1722 struct scatterlist
*psgl
;
1723 struct sd_dif_tuple
*sdt
;
1725 sector_t tmp_sec
= start_sec
;
1728 start_sec
= do_div(tmp_sec
, sdebug_store_sectors
);
1730 sdt
= (struct sd_dif_tuple
*)(dif_storep
+ dif_offset(start_sec
));
1732 for (i
= 0 ; i
< sectors
; i
++) {
1735 if (sdt
[i
].app_tag
== 0xffff)
1738 sector
= start_sec
+ i
;
1740 switch (scsi_debug_guard
) {
1742 csum
= ip_compute_csum(fake_storep
+
1743 sector
* scsi_debug_sector_size
,
1744 scsi_debug_sector_size
);
1747 csum
= crc_t10dif(fake_storep
+
1748 sector
* scsi_debug_sector_size
,
1749 scsi_debug_sector_size
);
1750 csum
= cpu_to_be16(csum
);
1756 if (sdt
[i
].guard_tag
!= csum
) {
1757 printk(KERN_ERR
"%s: GUARD check failed on sector %lu" \
1758 " rcvd 0x%04x, data 0x%04x\n", __func__
,
1759 (unsigned long)sector
,
1760 be16_to_cpu(sdt
[i
].guard_tag
),
1766 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1767 be32_to_cpu(sdt
[i
].ref_tag
) != (sector
& 0xffffffff)) {
1768 printk(KERN_ERR
"%s: REF check failed on sector %lu\n",
1769 __func__
, (unsigned long)sector
);
1774 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1775 be32_to_cpu(sdt
[i
].ref_tag
) != ei_lba
) {
1776 printk(KERN_ERR
"%s: REF check failed on sector %lu\n",
1777 __func__
, (unsigned long)sector
);
1785 resid
= sectors
* 8; /* Bytes of protection data to copy into sgl */
1788 scsi_for_each_prot_sg(SCpnt
, psgl
, scsi_prot_sg_count(SCpnt
), i
) {
1789 int len
= min(psgl
->length
, resid
);
1791 paddr
= kmap_atomic(sg_page(psgl
)) + psgl
->offset
;
1792 memcpy(paddr
, dif_storep
+ dif_offset(sector
), len
);
1795 if (sector
>= sdebug_store_sectors
) {
1798 sector
= do_div(tmp_sec
, sdebug_store_sectors
);
1801 kunmap_atomic(paddr
);
1809 static int resp_read(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
1810 unsigned int num
, struct sdebug_dev_info
*devip
,
1813 unsigned long iflags
;
1816 ret
= check_device_access_params(devip
, lba
, num
);
1820 if ((SCSI_DEBUG_OPT_MEDIUM_ERR
& scsi_debug_opts
) &&
1821 (lba
<= (OPT_MEDIUM_ERR_ADDR
+ OPT_MEDIUM_ERR_NUM
- 1)) &&
1822 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
)) {
1823 /* claim unrecoverable read error */
1824 mk_sense_buffer(devip
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
1825 /* set info field and valid bit for fixed descriptor */
1826 if (0x70 == (devip
->sense_buff
[0] & 0x7f)) {
1827 devip
->sense_buff
[0] |= 0x80; /* Valid bit */
1828 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
1829 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
1830 devip
->sense_buff
[3] = (ret
>> 24) & 0xff;
1831 devip
->sense_buff
[4] = (ret
>> 16) & 0xff;
1832 devip
->sense_buff
[5] = (ret
>> 8) & 0xff;
1833 devip
->sense_buff
[6] = ret
& 0xff;
1835 scsi_set_resid(SCpnt
, scsi_bufflen(SCpnt
));
1836 return check_condition_result
;
1840 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
1841 int prot_ret
= prot_verify_read(SCpnt
, lba
, num
, ei_lba
);
1844 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, prot_ret
);
1845 return illegal_condition_result
;
1849 read_lock_irqsave(&atomic_rw
, iflags
);
1850 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 0);
1851 read_unlock_irqrestore(&atomic_rw
, iflags
);
1855 void dump_sector(unsigned char *buf
, int len
)
1859 printk(KERN_ERR
">>> Sector Dump <<<\n");
1861 for (i
= 0 ; i
< len
; i
+= 16) {
1862 printk(KERN_ERR
"%04d: ", i
);
1864 for (j
= 0 ; j
< 16 ; j
++) {
1865 unsigned char c
= buf
[i
+j
];
1866 if (c
>= 0x20 && c
< 0x7e)
1867 printk(" %c ", buf
[i
+j
]);
1869 printk("%02x ", buf
[i
+j
]);
1876 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1877 unsigned int sectors
, u32 ei_lba
)
1880 struct sd_dif_tuple
*sdt
;
1881 struct scatterlist
*dsgl
= scsi_sglist(SCpnt
);
1882 struct scatterlist
*psgl
= scsi_prot_sglist(SCpnt
);
1883 void *daddr
, *paddr
;
1884 sector_t tmp_sec
= start_sec
;
1887 unsigned short csum
;
1889 sector
= do_div(tmp_sec
, sdebug_store_sectors
);
1891 BUG_ON(scsi_sg_count(SCpnt
) == 0);
1892 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
1894 paddr
= kmap_atomic(sg_page(psgl
)) + psgl
->offset
;
1897 /* For each data page */
1898 scsi_for_each_sg(SCpnt
, dsgl
, scsi_sg_count(SCpnt
), i
) {
1899 daddr
= kmap_atomic(sg_page(dsgl
)) + dsgl
->offset
;
1901 /* For each sector-sized chunk in data page */
1902 for (j
= 0 ; j
< dsgl
->length
; j
+= scsi_debug_sector_size
) {
1904 /* If we're at the end of the current
1905 * protection page advance to the next one
1907 if (ppage_offset
>= psgl
->length
) {
1908 kunmap_atomic(paddr
);
1909 psgl
= sg_next(psgl
);
1910 BUG_ON(psgl
== NULL
);
1911 paddr
= kmap_atomic(sg_page(psgl
))
1916 sdt
= paddr
+ ppage_offset
;
1918 switch (scsi_debug_guard
) {
1920 csum
= ip_compute_csum(daddr
,
1921 scsi_debug_sector_size
);
1924 csum
= cpu_to_be16(crc_t10dif(daddr
,
1925 scsi_debug_sector_size
));
1933 if (sdt
->guard_tag
!= csum
) {
1935 "%s: GUARD check failed on sector %lu " \
1936 "rcvd 0x%04x, calculated 0x%04x\n",
1937 __func__
, (unsigned long)sector
,
1938 be16_to_cpu(sdt
->guard_tag
),
1941 dump_sector(daddr
, scsi_debug_sector_size
);
1945 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1946 be32_to_cpu(sdt
->ref_tag
)
1947 != (start_sec
& 0xffffffff)) {
1949 "%s: REF check failed on sector %lu\n",
1950 __func__
, (unsigned long)sector
);
1952 dump_sector(daddr
, scsi_debug_sector_size
);
1956 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1957 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
1959 "%s: REF check failed on sector %lu\n",
1960 __func__
, (unsigned long)sector
);
1962 dump_sector(daddr
, scsi_debug_sector_size
);
1966 /* Would be great to copy this in bigger
1967 * chunks. However, for the sake of
1968 * correctness we need to verify each sector
1969 * before writing it to "stable" storage
1971 memcpy(dif_storep
+ dif_offset(sector
), sdt
, 8);
1975 if (sector
== sdebug_store_sectors
)
1976 sector
= 0; /* Force wrap */
1980 daddr
+= scsi_debug_sector_size
;
1981 ppage_offset
+= sizeof(struct sd_dif_tuple
);
1984 kunmap_atomic(daddr
);
1987 kunmap_atomic(paddr
);
1995 kunmap_atomic(daddr
);
1996 kunmap_atomic(paddr
);
2000 static unsigned int map_state(sector_t lba
, unsigned int *num
)
2002 unsigned int granularity
, alignment
, mapped
;
2003 sector_t block
, next
, end
;
2005 granularity
= scsi_debug_unmap_granularity
;
2006 alignment
= granularity
- scsi_debug_unmap_alignment
;
2007 block
= lba
+ alignment
;
2008 do_div(block
, granularity
);
2010 mapped
= test_bit(block
, map_storep
);
2013 next
= find_next_zero_bit(map_storep
, map_size
, block
);
2015 next
= find_next_bit(map_storep
, map_size
, block
);
2017 end
= next
* granularity
- scsi_debug_unmap_alignment
;
2023 static void map_region(sector_t lba
, unsigned int len
)
2025 unsigned int granularity
, alignment
;
2026 sector_t end
= lba
+ len
;
2028 granularity
= scsi_debug_unmap_granularity
;
2029 alignment
= granularity
- scsi_debug_unmap_alignment
;
2032 sector_t block
, rem
;
2034 block
= lba
+ alignment
;
2035 rem
= do_div(block
, granularity
);
2037 if (block
< map_size
)
2038 set_bit(block
, map_storep
);
2040 lba
+= granularity
- rem
;
2044 static void unmap_region(sector_t lba
, unsigned int len
)
2046 unsigned int granularity
, alignment
;
2047 sector_t end
= lba
+ len
;
2049 granularity
= scsi_debug_unmap_granularity
;
2050 alignment
= granularity
- scsi_debug_unmap_alignment
;
2053 sector_t block
, rem
;
2055 block
= lba
+ alignment
;
2056 rem
= do_div(block
, granularity
);
2058 if (rem
== 0 && lba
+ granularity
< end
&& block
< map_size
) {
2059 clear_bit(block
, map_storep
);
2060 if (scsi_debug_lbprz
)
2061 memset(fake_storep
+
2062 block
* scsi_debug_sector_size
, 0,
2063 scsi_debug_sector_size
);
2065 lba
+= granularity
- rem
;
2069 static int resp_write(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
2070 unsigned int num
, struct sdebug_dev_info
*devip
,
2073 unsigned long iflags
;
2076 ret
= check_device_access_params(devip
, lba
, num
);
2081 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
2082 int prot_ret
= prot_verify_write(SCpnt
, lba
, num
, ei_lba
);
2085 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2086 return illegal_condition_result
;
2090 write_lock_irqsave(&atomic_rw
, iflags
);
2091 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 1);
2092 if (scsi_debug_unmap_granularity
)
2093 map_region(lba
, num
);
2094 write_unlock_irqrestore(&atomic_rw
, iflags
);
2096 return (DID_ERROR
<< 16);
2097 else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2098 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2099 printk(KERN_INFO
"scsi_debug: write: cdb indicated=%u, "
2100 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2105 static int resp_write_same(struct scsi_cmnd
*scmd
, unsigned long long lba
,
2106 unsigned int num
, struct sdebug_dev_info
*devip
,
2107 u32 ei_lba
, unsigned int unmap
)
2109 unsigned long iflags
;
2110 unsigned long long i
;
2113 ret
= check_device_access_params(devip
, lba
, num
);
2117 if (num
> scsi_debug_write_same_length
) {
2118 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2120 return check_condition_result
;
2123 write_lock_irqsave(&atomic_rw
, iflags
);
2125 if (unmap
&& scsi_debug_unmap_granularity
) {
2126 unmap_region(lba
, num
);
2130 /* Else fetch one logical block */
2131 ret
= fetch_to_dev_buffer(scmd
,
2132 fake_storep
+ (lba
* scsi_debug_sector_size
),
2133 scsi_debug_sector_size
);
2136 write_unlock_irqrestore(&atomic_rw
, iflags
);
2137 return (DID_ERROR
<< 16);
2138 } else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2139 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2140 printk(KERN_INFO
"scsi_debug: write same: cdb indicated=%u, "
2141 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2143 /* Copy first sector to remaining blocks */
2144 for (i
= 1 ; i
< num
; i
++)
2145 memcpy(fake_storep
+ ((lba
+ i
) * scsi_debug_sector_size
),
2146 fake_storep
+ (lba
* scsi_debug_sector_size
),
2147 scsi_debug_sector_size
);
2149 if (scsi_debug_unmap_granularity
)
2150 map_region(lba
, num
);
2152 write_unlock_irqrestore(&atomic_rw
, iflags
);
2157 struct unmap_block_desc
{
2163 static int resp_unmap(struct scsi_cmnd
* scmd
, struct sdebug_dev_info
* devip
)
2166 struct unmap_block_desc
*desc
;
2167 unsigned int i
, payload_len
, descriptors
;
2170 ret
= check_readiness(scmd
, 1, devip
);
2174 payload_len
= get_unaligned_be16(&scmd
->cmnd
[7]);
2175 BUG_ON(scsi_bufflen(scmd
) != payload_len
);
2177 descriptors
= (payload_len
- 8) / 16;
2179 buf
= kmalloc(scsi_bufflen(scmd
), GFP_ATOMIC
);
2181 return check_condition_result
;
2183 scsi_sg_copy_to_buffer(scmd
, buf
, scsi_bufflen(scmd
));
2185 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
2186 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
2188 desc
= (void *)&buf
[8];
2190 for (i
= 0 ; i
< descriptors
; i
++) {
2191 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
2192 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
2194 ret
= check_device_access_params(devip
, lba
, num
);
2198 unmap_region(lba
, num
);
2209 #define SDEBUG_GET_LBA_STATUS_LEN 32
2211 static int resp_get_lba_status(struct scsi_cmnd
* scmd
,
2212 struct sdebug_dev_info
* devip
)
2214 unsigned long long lba
;
2215 unsigned int alloc_len
, mapped
, num
;
2216 unsigned char arr
[SDEBUG_GET_LBA_STATUS_LEN
];
2219 ret
= check_readiness(scmd
, 1, devip
);
2223 lba
= get_unaligned_be64(&scmd
->cmnd
[2]);
2224 alloc_len
= get_unaligned_be32(&scmd
->cmnd
[10]);
2229 ret
= check_device_access_params(devip
, lba
, 1);
2233 mapped
= map_state(lba
, &num
);
2235 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
2236 put_unaligned_be32(20, &arr
[0]); /* Parameter Data Length */
2237 put_unaligned_be64(lba
, &arr
[8]); /* LBA */
2238 put_unaligned_be32(num
, &arr
[16]); /* Number of blocks */
2239 arr
[20] = !mapped
; /* mapped = 0, unmapped = 1 */
2241 return fill_from_dev_buffer(scmd
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
2244 #define SDEBUG_RLUN_ARR_SZ 256
2246 static int resp_report_luns(struct scsi_cmnd
* scp
,
2247 struct sdebug_dev_info
* devip
)
2249 unsigned int alloc_len
;
2250 int lun_cnt
, i
, upper
, num
, n
, wlun
, lun
;
2251 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
2252 int select_report
= (int)cmd
[2];
2253 struct scsi_lun
*one_lun
;
2254 unsigned char arr
[SDEBUG_RLUN_ARR_SZ
];
2255 unsigned char * max_addr
;
2257 alloc_len
= cmd
[9] + (cmd
[8] << 8) + (cmd
[7] << 16) + (cmd
[6] << 24);
2258 if ((alloc_len
< 4) || (select_report
> 2)) {
2259 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2261 return check_condition_result
;
2263 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2264 memset(arr
, 0, SDEBUG_RLUN_ARR_SZ
);
2265 lun_cnt
= scsi_debug_max_luns
;
2266 if (1 == select_report
)
2268 else if (scsi_debug_no_lun_0
&& (lun_cnt
> 0))
2270 wlun
= (select_report
> 0) ? 1 : 0;
2271 num
= lun_cnt
+ wlun
;
2272 arr
[2] = ((sizeof(struct scsi_lun
) * num
) >> 8) & 0xff;
2273 arr
[3] = (sizeof(struct scsi_lun
) * num
) & 0xff;
2274 n
= min((int)((SDEBUG_RLUN_ARR_SZ
- 8) /
2275 sizeof(struct scsi_lun
)), num
);
2280 one_lun
= (struct scsi_lun
*) &arr
[8];
2281 max_addr
= arr
+ SDEBUG_RLUN_ARR_SZ
;
2282 for (i
= 0, lun
= (scsi_debug_no_lun_0
? 1 : 0);
2283 ((i
< lun_cnt
) && ((unsigned char *)(one_lun
+ i
) < max_addr
));
2285 upper
= (lun
>> 8) & 0x3f;
2287 one_lun
[i
].scsi_lun
[0] =
2288 (upper
| (SAM2_LUN_ADDRESS_METHOD
<< 6));
2289 one_lun
[i
].scsi_lun
[1] = lun
& 0xff;
2292 one_lun
[i
].scsi_lun
[0] = (SAM2_WLUN_REPORT_LUNS
>> 8) & 0xff;
2293 one_lun
[i
].scsi_lun
[1] = SAM2_WLUN_REPORT_LUNS
& 0xff;
2296 alloc_len
= (unsigned char *)(one_lun
+ i
) - arr
;
2297 return fill_from_dev_buffer(scp
, arr
,
2298 min((int)alloc_len
, SDEBUG_RLUN_ARR_SZ
));
2301 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
2302 unsigned int num
, struct sdebug_dev_info
*devip
)
2305 unsigned char *kaddr
, *buf
;
2306 unsigned int offset
;
2307 struct scatterlist
*sg
;
2308 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
2310 /* better not to use temporary buffer. */
2311 buf
= kmalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
2315 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
2318 for_each_sg(sdb
->table
.sgl
, sg
, sdb
->table
.nents
, i
) {
2319 kaddr
= (unsigned char *)kmap_atomic(sg_page(sg
));
2323 for (j
= 0; j
< sg
->length
; j
++)
2324 *(kaddr
+ sg
->offset
+ j
) ^= *(buf
+ offset
+ j
);
2326 offset
+= sg
->length
;
2327 kunmap_atomic(kaddr
);
2336 /* When timer goes off this function is called. */
2337 static void timer_intr_handler(unsigned long indx
)
2339 struct sdebug_queued_cmd
* sqcp
;
2340 unsigned long iflags
;
2342 if (indx
>= scsi_debug_max_queue
) {
2343 printk(KERN_ERR
"scsi_debug:timer_intr_handler: indx too "
2347 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2348 sqcp
= &queued_arr
[(int)indx
];
2349 if (! sqcp
->in_use
) {
2350 printk(KERN_ERR
"scsi_debug:timer_intr_handler: Unexpected "
2352 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2356 if (sqcp
->done_funct
) {
2357 sqcp
->a_cmnd
->result
= sqcp
->scsi_result
;
2358 sqcp
->done_funct(sqcp
->a_cmnd
); /* callback to mid level */
2360 sqcp
->done_funct
= NULL
;
2361 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2365 static struct sdebug_dev_info
*
2366 sdebug_device_create(struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
2368 struct sdebug_dev_info
*devip
;
2370 devip
= kzalloc(sizeof(*devip
), flags
);
2372 devip
->sdbg_host
= sdbg_host
;
2373 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
2378 static struct sdebug_dev_info
* devInfoReg(struct scsi_device
* sdev
)
2380 struct sdebug_host_info
* sdbg_host
;
2381 struct sdebug_dev_info
* open_devip
= NULL
;
2382 struct sdebug_dev_info
* devip
=
2383 (struct sdebug_dev_info
*)sdev
->hostdata
;
2387 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
2389 printk(KERN_ERR
"Host info NULL\n");
2392 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
2393 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
2394 (devip
->target
== sdev
->id
) &&
2395 (devip
->lun
== sdev
->lun
))
2398 if ((!devip
->used
) && (!open_devip
))
2402 if (!open_devip
) { /* try and make a new one */
2403 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
2405 printk(KERN_ERR
"%s: out of memory at line %d\n",
2406 __func__
, __LINE__
);
2411 open_devip
->channel
= sdev
->channel
;
2412 open_devip
->target
= sdev
->id
;
2413 open_devip
->lun
= sdev
->lun
;
2414 open_devip
->sdbg_host
= sdbg_host
;
2415 open_devip
->reset
= 1;
2416 open_devip
->used
= 1;
2417 memset(open_devip
->sense_buff
, 0, SDEBUG_SENSE_LEN
);
2418 if (scsi_debug_dsense
)
2419 open_devip
->sense_buff
[0] = 0x72;
2421 open_devip
->sense_buff
[0] = 0x70;
2422 open_devip
->sense_buff
[7] = 0xa;
2424 if (sdev
->lun
== SAM2_WLUN_REPORT_LUNS
)
2425 open_devip
->wlun
= SAM2_WLUN_REPORT_LUNS
& 0xff;
2430 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
2432 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2433 printk(KERN_INFO
"scsi_debug: slave_alloc <%u %u %u %u>\n",
2434 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2435 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
2439 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
2441 struct sdebug_dev_info
*devip
;
2443 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2444 printk(KERN_INFO
"scsi_debug: slave_configure <%u %u %u %u>\n",
2445 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2446 if (sdp
->host
->max_cmd_len
!= SCSI_DEBUG_MAX_CMD_LEN
)
2447 sdp
->host
->max_cmd_len
= SCSI_DEBUG_MAX_CMD_LEN
;
2448 devip
= devInfoReg(sdp
);
2450 return 1; /* no resources, will be marked offline */
2451 sdp
->hostdata
= devip
;
2452 if (sdp
->host
->cmd_per_lun
)
2453 scsi_adjust_queue_depth(sdp
, SDEBUG_TAGGED_QUEUING
,
2454 sdp
->host
->cmd_per_lun
);
2455 blk_queue_max_segment_size(sdp
->request_queue
, 256 * 1024);
2456 if (scsi_debug_no_uld
)
2457 sdp
->no_uld_attach
= 1;
2461 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
2463 struct sdebug_dev_info
*devip
=
2464 (struct sdebug_dev_info
*)sdp
->hostdata
;
2466 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2467 printk(KERN_INFO
"scsi_debug: slave_destroy <%u %u %u %u>\n",
2468 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2470 /* make this slot available for re-use */
2472 sdp
->hostdata
= NULL
;
2476 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2477 static int stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
2479 unsigned long iflags
;
2481 struct sdebug_queued_cmd
*sqcp
;
2483 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2484 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2485 sqcp
= &queued_arr
[k
];
2486 if (sqcp
->in_use
&& (cmnd
== sqcp
->a_cmnd
)) {
2487 del_timer_sync(&sqcp
->cmnd_timer
);
2489 sqcp
->a_cmnd
= NULL
;
2493 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2494 return (k
< scsi_debug_max_queue
) ? 1 : 0;
2497 /* Deletes (stops) timers of all queued commands */
2498 static void stop_all_queued(void)
2500 unsigned long iflags
;
2502 struct sdebug_queued_cmd
*sqcp
;
2504 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2505 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2506 sqcp
= &queued_arr
[k
];
2507 if (sqcp
->in_use
&& sqcp
->a_cmnd
) {
2508 del_timer_sync(&sqcp
->cmnd_timer
);
2510 sqcp
->a_cmnd
= NULL
;
2513 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2516 static int scsi_debug_abort(struct scsi_cmnd
* SCpnt
)
2518 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2519 printk(KERN_INFO
"scsi_debug: abort\n");
2521 stop_queued_cmnd(SCpnt
);
2525 static int scsi_debug_biosparam(struct scsi_device
*sdev
,
2526 struct block_device
* bdev
, sector_t capacity
, int *info
)
2531 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2532 printk(KERN_INFO
"scsi_debug: biosparam\n");
2533 buf
= scsi_bios_ptable(bdev
);
2535 res
= scsi_partsize(buf
, capacity
,
2536 &info
[2], &info
[0], &info
[1]);
2541 info
[0] = sdebug_heads
;
2542 info
[1] = sdebug_sectors_per
;
2543 info
[2] = sdebug_cylinders_per
;
2547 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
2549 struct sdebug_dev_info
* devip
;
2551 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2552 printk(KERN_INFO
"scsi_debug: device_reset\n");
2555 devip
= devInfoReg(SCpnt
->device
);
2562 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
2564 struct sdebug_host_info
*sdbg_host
;
2565 struct sdebug_dev_info
* dev_info
;
2566 struct scsi_device
* sdp
;
2567 struct Scsi_Host
* hp
;
2569 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2570 printk(KERN_INFO
"scsi_debug: bus_reset\n");
2572 if (SCpnt
&& ((sdp
= SCpnt
->device
)) && ((hp
= sdp
->host
))) {
2573 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
2575 list_for_each_entry(dev_info
,
2576 &sdbg_host
->dev_info_list
,
2578 dev_info
->reset
= 1;
2584 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
2586 struct sdebug_host_info
* sdbg_host
;
2587 struct sdebug_dev_info
* dev_info
;
2589 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2590 printk(KERN_INFO
"scsi_debug: host_reset\n");
2592 spin_lock(&sdebug_host_list_lock
);
2593 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
2594 list_for_each_entry(dev_info
, &sdbg_host
->dev_info_list
,
2596 dev_info
->reset
= 1;
2598 spin_unlock(&sdebug_host_list_lock
);
2603 /* Initializes timers in queued array */
2604 static void __init
init_all_queued(void)
2606 unsigned long iflags
;
2608 struct sdebug_queued_cmd
* sqcp
;
2610 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2611 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2612 sqcp
= &queued_arr
[k
];
2613 init_timer(&sqcp
->cmnd_timer
);
2615 sqcp
->a_cmnd
= NULL
;
2617 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2620 static void __init
sdebug_build_parts(unsigned char *ramp
,
2621 unsigned long store_size
)
2623 struct partition
* pp
;
2624 int starts
[SDEBUG_MAX_PARTS
+ 2];
2625 int sectors_per_part
, num_sectors
, k
;
2626 int heads_by_sects
, start_sec
, end_sec
;
2628 /* assume partition table already zeroed */
2629 if ((scsi_debug_num_parts
< 1) || (store_size
< 1048576))
2631 if (scsi_debug_num_parts
> SDEBUG_MAX_PARTS
) {
2632 scsi_debug_num_parts
= SDEBUG_MAX_PARTS
;
2633 printk(KERN_WARNING
"scsi_debug:build_parts: reducing "
2634 "partitions to %d\n", SDEBUG_MAX_PARTS
);
2636 num_sectors
= (int)sdebug_store_sectors
;
2637 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
2638 / scsi_debug_num_parts
;
2639 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
2640 starts
[0] = sdebug_sectors_per
;
2641 for (k
= 1; k
< scsi_debug_num_parts
; ++k
)
2642 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
2644 starts
[scsi_debug_num_parts
] = num_sectors
;
2645 starts
[scsi_debug_num_parts
+ 1] = 0;
2647 ramp
[510] = 0x55; /* magic partition markings */
2649 pp
= (struct partition
*)(ramp
+ 0x1be);
2650 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
2651 start_sec
= starts
[k
];
2652 end_sec
= starts
[k
+ 1] - 1;
2655 pp
->cyl
= start_sec
/ heads_by_sects
;
2656 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
2657 / sdebug_sectors_per
;
2658 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
2660 pp
->end_cyl
= end_sec
/ heads_by_sects
;
2661 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
2662 / sdebug_sectors_per
;
2663 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
2665 pp
->start_sect
= start_sec
;
2666 pp
->nr_sects
= end_sec
- start_sec
+ 1;
2667 pp
->sys_ind
= 0x83; /* plain Linux partition */
2671 static int schedule_resp(struct scsi_cmnd
* cmnd
,
2672 struct sdebug_dev_info
* devip
,
2673 done_funct_t done
, int scsi_result
, int delta_jiff
)
2675 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmnd
) {
2677 struct scsi_device
* sdp
= cmnd
->device
;
2679 printk(KERN_INFO
"scsi_debug: <%u %u %u %u> "
2680 "non-zero result=0x%x\n", sdp
->host
->host_no
,
2681 sdp
->channel
, sdp
->id
, sdp
->lun
, scsi_result
);
2684 if (cmnd
&& devip
) {
2685 /* simulate autosense by this driver */
2686 if (SAM_STAT_CHECK_CONDITION
== (scsi_result
& 0xff))
2687 memcpy(cmnd
->sense_buffer
, devip
->sense_buff
,
2688 (SCSI_SENSE_BUFFERSIZE
> SDEBUG_SENSE_LEN
) ?
2689 SDEBUG_SENSE_LEN
: SCSI_SENSE_BUFFERSIZE
);
2691 if (delta_jiff
<= 0) {
2693 cmnd
->result
= scsi_result
;
2698 unsigned long iflags
;
2700 struct sdebug_queued_cmd
* sqcp
= NULL
;
2702 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2703 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2704 sqcp
= &queued_arr
[k
];
2708 if (k
>= scsi_debug_max_queue
) {
2709 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2710 printk(KERN_WARNING
"scsi_debug: can_queue exceeded\n");
2711 return 1; /* report busy to mid level */
2714 sqcp
->a_cmnd
= cmnd
;
2715 sqcp
->scsi_result
= scsi_result
;
2716 sqcp
->done_funct
= done
;
2717 sqcp
->cmnd_timer
.function
= timer_intr_handler
;
2718 sqcp
->cmnd_timer
.data
= k
;
2719 sqcp
->cmnd_timer
.expires
= jiffies
+ delta_jiff
;
2720 add_timer(&sqcp
->cmnd_timer
);
2721 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2727 /* Note: The following macros create attribute files in the
2728 /sys/module/scsi_debug/parameters directory. Unfortunately this
2729 driver is unaware of a change and cannot trigger auxiliary actions
2730 as it can when the corresponding attribute in the
2731 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2733 module_param_named(add_host
, scsi_debug_add_host
, int, S_IRUGO
| S_IWUSR
);
2734 module_param_named(ato
, scsi_debug_ato
, int, S_IRUGO
);
2735 module_param_named(delay
, scsi_debug_delay
, int, S_IRUGO
| S_IWUSR
);
2736 module_param_named(dev_size_mb
, scsi_debug_dev_size_mb
, int, S_IRUGO
);
2737 module_param_named(dif
, scsi_debug_dif
, int, S_IRUGO
);
2738 module_param_named(dix
, scsi_debug_dix
, int, S_IRUGO
);
2739 module_param_named(dsense
, scsi_debug_dsense
, int, S_IRUGO
| S_IWUSR
);
2740 module_param_named(every_nth
, scsi_debug_every_nth
, int, S_IRUGO
| S_IWUSR
);
2741 module_param_named(fake_rw
, scsi_debug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
2742 module_param_named(guard
, scsi_debug_guard
, int, S_IRUGO
);
2743 module_param_named(lbpu
, scsi_debug_lbpu
, int, S_IRUGO
);
2744 module_param_named(lbpws
, scsi_debug_lbpws
, int, S_IRUGO
);
2745 module_param_named(lbpws10
, scsi_debug_lbpws10
, int, S_IRUGO
);
2746 module_param_named(lbprz
, scsi_debug_lbprz
, int, S_IRUGO
);
2747 module_param_named(lowest_aligned
, scsi_debug_lowest_aligned
, int, S_IRUGO
);
2748 module_param_named(max_luns
, scsi_debug_max_luns
, int, S_IRUGO
| S_IWUSR
);
2749 module_param_named(max_queue
, scsi_debug_max_queue
, int, S_IRUGO
| S_IWUSR
);
2750 module_param_named(no_lun_0
, scsi_debug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
2751 module_param_named(no_uld
, scsi_debug_no_uld
, int, S_IRUGO
);
2752 module_param_named(num_parts
, scsi_debug_num_parts
, int, S_IRUGO
);
2753 module_param_named(num_tgts
, scsi_debug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
2754 module_param_named(opt_blks
, scsi_debug_opt_blks
, int, S_IRUGO
);
2755 module_param_named(opts
, scsi_debug_opts
, int, S_IRUGO
| S_IWUSR
);
2756 module_param_named(physblk_exp
, scsi_debug_physblk_exp
, int, S_IRUGO
);
2757 module_param_named(ptype
, scsi_debug_ptype
, int, S_IRUGO
| S_IWUSR
);
2758 module_param_named(removable
, scsi_debug_removable
, bool, S_IRUGO
| S_IWUSR
);
2759 module_param_named(scsi_level
, scsi_debug_scsi_level
, int, S_IRUGO
);
2760 module_param_named(sector_size
, scsi_debug_sector_size
, int, S_IRUGO
);
2761 module_param_named(unmap_alignment
, scsi_debug_unmap_alignment
, int, S_IRUGO
);
2762 module_param_named(unmap_granularity
, scsi_debug_unmap_granularity
, int, S_IRUGO
);
2763 module_param_named(unmap_max_blocks
, scsi_debug_unmap_max_blocks
, int, S_IRUGO
);
2764 module_param_named(unmap_max_desc
, scsi_debug_unmap_max_desc
, int, S_IRUGO
);
2765 module_param_named(virtual_gb
, scsi_debug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
2766 module_param_named(vpd_use_hostno
, scsi_debug_vpd_use_hostno
, int,
2768 module_param_named(write_same_length
, scsi_debug_write_same_length
, int,
2771 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2772 MODULE_DESCRIPTION("SCSI debug adapter driver");
2773 MODULE_LICENSE("GPL");
2774 MODULE_VERSION(SCSI_DEBUG_VERSION
);
2776 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
2777 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
2778 MODULE_PARM_DESC(delay
, "# of jiffies to delay response(def=1)");
2779 MODULE_PARM_DESC(dev_size_mb
, "size in MB of ram shared by devs(def=8)");
2780 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
2781 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
2782 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
2783 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
2784 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
2785 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
2786 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
2787 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2788 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2789 MODULE_PARM_DESC(lbprz
, "unmapped blocks return 0 on read (def=1)");
2790 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
2791 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
2792 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to 255(def))");
2793 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
2794 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
2795 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
2796 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
2797 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in block (def=64)");
2798 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2799 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
2800 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
2801 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
2802 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=5[SPC-3])");
2803 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
2804 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
2805 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
2806 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2807 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
2808 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2809 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2810 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2812 static char sdebug_info
[256];
2814 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
2816 sprintf(sdebug_info
, "scsi_debug, version %s [%s], "
2817 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION
,
2818 scsi_debug_version_date
, scsi_debug_dev_size_mb
,
2823 /* scsi_debug_proc_info
2824 * Used if the driver currently has no own support for /proc/scsi
2826 static int scsi_debug_proc_info(struct Scsi_Host
*host
, char *buffer
, char **start
, off_t offset
,
2827 int length
, int inout
)
2829 int len
, pos
, begin
;
2832 orig_length
= length
;
2836 int minLen
= length
> 15 ? 15 : length
;
2838 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2840 memcpy(arr
, buffer
, minLen
);
2842 if (1 != sscanf(arr
, "%d", &pos
))
2844 scsi_debug_opts
= pos
;
2845 if (scsi_debug_every_nth
!= 0)
2846 scsi_debug_cmnd_count
= 0;
2850 pos
= len
= sprintf(buffer
, "scsi_debug adapter driver, version "
2852 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2853 "every_nth=%d(curr:%d)\n"
2854 "delay=%d, max_luns=%d, scsi_level=%d\n"
2855 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2856 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2857 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2858 SCSI_DEBUG_VERSION
, scsi_debug_version_date
, scsi_debug_num_tgts
,
2859 scsi_debug_dev_size_mb
, scsi_debug_opts
, scsi_debug_every_nth
,
2860 scsi_debug_cmnd_count
, scsi_debug_delay
,
2861 scsi_debug_max_luns
, scsi_debug_scsi_level
,
2862 scsi_debug_sector_size
, sdebug_cylinders_per
, sdebug_heads
,
2863 sdebug_sectors_per
, num_aborts
, num_dev_resets
, num_bus_resets
,
2864 num_host_resets
, dix_reads
, dix_writes
, dif_errors
);
2869 *start
= buffer
+ (offset
- begin
); /* Start of wanted data */
2870 len
-= (offset
- begin
);
2876 static ssize_t
sdebug_delay_show(struct device_driver
* ddp
, char * buf
)
2878 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_delay
);
2881 static ssize_t
sdebug_delay_store(struct device_driver
* ddp
,
2882 const char * buf
, size_t count
)
2887 if (1 == sscanf(buf
, "%10s", work
)) {
2888 if ((1 == sscanf(work
, "%d", &delay
)) && (delay
>= 0)) {
2889 scsi_debug_delay
= delay
;
2895 DRIVER_ATTR(delay
, S_IRUGO
| S_IWUSR
, sdebug_delay_show
,
2896 sdebug_delay_store
);
2898 static ssize_t
sdebug_opts_show(struct device_driver
* ddp
, char * buf
)
2900 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", scsi_debug_opts
);
2903 static ssize_t
sdebug_opts_store(struct device_driver
* ddp
,
2904 const char * buf
, size_t count
)
2909 if (1 == sscanf(buf
, "%10s", work
)) {
2910 if (0 == strnicmp(work
,"0x", 2)) {
2911 if (1 == sscanf(&work
[2], "%x", &opts
))
2914 if (1 == sscanf(work
, "%d", &opts
))
2920 scsi_debug_opts
= opts
;
2921 scsi_debug_cmnd_count
= 0;
2924 DRIVER_ATTR(opts
, S_IRUGO
| S_IWUSR
, sdebug_opts_show
,
2927 static ssize_t
sdebug_ptype_show(struct device_driver
* ddp
, char * buf
)
2929 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ptype
);
2931 static ssize_t
sdebug_ptype_store(struct device_driver
* ddp
,
2932 const char * buf
, size_t count
)
2936 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2937 scsi_debug_ptype
= n
;
2942 DRIVER_ATTR(ptype
, S_IRUGO
| S_IWUSR
, sdebug_ptype_show
, sdebug_ptype_store
);
2944 static ssize_t
sdebug_dsense_show(struct device_driver
* ddp
, char * buf
)
2946 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dsense
);
2948 static ssize_t
sdebug_dsense_store(struct device_driver
* ddp
,
2949 const char * buf
, size_t count
)
2953 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2954 scsi_debug_dsense
= n
;
2959 DRIVER_ATTR(dsense
, S_IRUGO
| S_IWUSR
, sdebug_dsense_show
,
2960 sdebug_dsense_store
);
2962 static ssize_t
sdebug_fake_rw_show(struct device_driver
* ddp
, char * buf
)
2964 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_fake_rw
);
2966 static ssize_t
sdebug_fake_rw_store(struct device_driver
* ddp
,
2967 const char * buf
, size_t count
)
2971 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2972 scsi_debug_fake_rw
= n
;
2977 DRIVER_ATTR(fake_rw
, S_IRUGO
| S_IWUSR
, sdebug_fake_rw_show
,
2978 sdebug_fake_rw_store
);
2980 static ssize_t
sdebug_no_lun_0_show(struct device_driver
* ddp
, char * buf
)
2982 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_lun_0
);
2984 static ssize_t
sdebug_no_lun_0_store(struct device_driver
* ddp
,
2985 const char * buf
, size_t count
)
2989 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2990 scsi_debug_no_lun_0
= n
;
2995 DRIVER_ATTR(no_lun_0
, S_IRUGO
| S_IWUSR
, sdebug_no_lun_0_show
,
2996 sdebug_no_lun_0_store
);
2998 static ssize_t
sdebug_num_tgts_show(struct device_driver
* ddp
, char * buf
)
3000 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_tgts
);
3002 static ssize_t
sdebug_num_tgts_store(struct device_driver
* ddp
,
3003 const char * buf
, size_t count
)
3007 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3008 scsi_debug_num_tgts
= n
;
3009 sdebug_max_tgts_luns();
3014 DRIVER_ATTR(num_tgts
, S_IRUGO
| S_IWUSR
, sdebug_num_tgts_show
,
3015 sdebug_num_tgts_store
);
3017 static ssize_t
sdebug_dev_size_mb_show(struct device_driver
* ddp
, char * buf
)
3019 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dev_size_mb
);
3021 DRIVER_ATTR(dev_size_mb
, S_IRUGO
, sdebug_dev_size_mb_show
, NULL
);
3023 static ssize_t
sdebug_num_parts_show(struct device_driver
* ddp
, char * buf
)
3025 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_parts
);
3027 DRIVER_ATTR(num_parts
, S_IRUGO
, sdebug_num_parts_show
, NULL
);
3029 static ssize_t
sdebug_every_nth_show(struct device_driver
* ddp
, char * buf
)
3031 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_every_nth
);
3033 static ssize_t
sdebug_every_nth_store(struct device_driver
* ddp
,
3034 const char * buf
, size_t count
)
3038 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
3039 scsi_debug_every_nth
= nth
;
3040 scsi_debug_cmnd_count
= 0;
3045 DRIVER_ATTR(every_nth
, S_IRUGO
| S_IWUSR
, sdebug_every_nth_show
,
3046 sdebug_every_nth_store
);
3048 static ssize_t
sdebug_max_luns_show(struct device_driver
* ddp
, char * buf
)
3050 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_luns
);
3052 static ssize_t
sdebug_max_luns_store(struct device_driver
* ddp
,
3053 const char * buf
, size_t count
)
3057 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3058 scsi_debug_max_luns
= n
;
3059 sdebug_max_tgts_luns();
3064 DRIVER_ATTR(max_luns
, S_IRUGO
| S_IWUSR
, sdebug_max_luns_show
,
3065 sdebug_max_luns_store
);
3067 static ssize_t
sdebug_max_queue_show(struct device_driver
* ddp
, char * buf
)
3069 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_queue
);
3071 static ssize_t
sdebug_max_queue_store(struct device_driver
* ddp
,
3072 const char * buf
, size_t count
)
3076 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
3077 (n
<= SCSI_DEBUG_CANQUEUE
)) {
3078 scsi_debug_max_queue
= n
;
3083 DRIVER_ATTR(max_queue
, S_IRUGO
| S_IWUSR
, sdebug_max_queue_show
,
3084 sdebug_max_queue_store
);
3086 static ssize_t
sdebug_no_uld_show(struct device_driver
* ddp
, char * buf
)
3088 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_uld
);
3090 DRIVER_ATTR(no_uld
, S_IRUGO
, sdebug_no_uld_show
, NULL
);
3092 static ssize_t
sdebug_scsi_level_show(struct device_driver
* ddp
, char * buf
)
3094 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_scsi_level
);
3096 DRIVER_ATTR(scsi_level
, S_IRUGO
, sdebug_scsi_level_show
, NULL
);
3098 static ssize_t
sdebug_virtual_gb_show(struct device_driver
* ddp
, char * buf
)
3100 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_virtual_gb
);
3102 static ssize_t
sdebug_virtual_gb_store(struct device_driver
* ddp
,
3103 const char * buf
, size_t count
)
3107 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3108 scsi_debug_virtual_gb
= n
;
3110 sdebug_capacity
= get_sdebug_capacity();
3116 DRIVER_ATTR(virtual_gb
, S_IRUGO
| S_IWUSR
, sdebug_virtual_gb_show
,
3117 sdebug_virtual_gb_store
);
3119 static ssize_t
sdebug_add_host_show(struct device_driver
* ddp
, char * buf
)
3121 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_add_host
);
3124 static ssize_t
sdebug_add_host_store(struct device_driver
* ddp
,
3125 const char * buf
, size_t count
)
3129 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
3131 if (delta_hosts
> 0) {
3133 sdebug_add_adapter();
3134 } while (--delta_hosts
);
3135 } else if (delta_hosts
< 0) {
3137 sdebug_remove_adapter();
3138 } while (++delta_hosts
);
3142 DRIVER_ATTR(add_host
, S_IRUGO
| S_IWUSR
, sdebug_add_host_show
,
3143 sdebug_add_host_store
);
3145 static ssize_t
sdebug_vpd_use_hostno_show(struct device_driver
* ddp
,
3148 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_vpd_use_hostno
);
3150 static ssize_t
sdebug_vpd_use_hostno_store(struct device_driver
* ddp
,
3151 const char * buf
, size_t count
)
3155 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3156 scsi_debug_vpd_use_hostno
= n
;
3161 DRIVER_ATTR(vpd_use_hostno
, S_IRUGO
| S_IWUSR
, sdebug_vpd_use_hostno_show
,
3162 sdebug_vpd_use_hostno_store
);
3164 static ssize_t
sdebug_sector_size_show(struct device_driver
* ddp
, char * buf
)
3166 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_sector_size
);
3168 DRIVER_ATTR(sector_size
, S_IRUGO
, sdebug_sector_size_show
, NULL
);
3170 static ssize_t
sdebug_dix_show(struct device_driver
*ddp
, char *buf
)
3172 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dix
);
3174 DRIVER_ATTR(dix
, S_IRUGO
, sdebug_dix_show
, NULL
);
3176 static ssize_t
sdebug_dif_show(struct device_driver
*ddp
, char *buf
)
3178 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dif
);
3180 DRIVER_ATTR(dif
, S_IRUGO
, sdebug_dif_show
, NULL
);
3182 static ssize_t
sdebug_guard_show(struct device_driver
*ddp
, char *buf
)
3184 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_guard
);
3186 DRIVER_ATTR(guard
, S_IRUGO
, sdebug_guard_show
, NULL
);
3188 static ssize_t
sdebug_ato_show(struct device_driver
*ddp
, char *buf
)
3190 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ato
);
3192 DRIVER_ATTR(ato
, S_IRUGO
, sdebug_ato_show
, NULL
);
3194 static ssize_t
sdebug_map_show(struct device_driver
*ddp
, char *buf
)
3198 if (!scsi_debug_lbp())
3199 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
3200 sdebug_store_sectors
);
3202 count
= bitmap_scnlistprintf(buf
, PAGE_SIZE
, map_storep
, map_size
);
3204 buf
[count
++] = '\n';
3209 DRIVER_ATTR(map
, S_IRUGO
, sdebug_map_show
, NULL
);
3211 static ssize_t
sdebug_removable_show(struct device_driver
*ddp
,
3214 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_removable
? 1 : 0);
3216 static ssize_t
sdebug_removable_store(struct device_driver
*ddp
,
3217 const char *buf
, size_t count
)
3221 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3222 scsi_debug_removable
= (n
> 0);
3227 DRIVER_ATTR(removable
, S_IRUGO
| S_IWUSR
, sdebug_removable_show
,
3228 sdebug_removable_store
);
3231 /* Note: The following function creates attribute files in the
3232 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3233 files (over those found in the /sys/module/scsi_debug/parameters
3234 directory) is that auxiliary actions can be triggered when an attribute
3235 is changed. For example see: sdebug_add_host_store() above.
3237 static int do_create_driverfs_files(void)
3241 ret
= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_add_host
);
3242 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_delay
);
3243 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dev_size_mb
);
3244 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dsense
);
3245 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_every_nth
);
3246 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_fake_rw
);
3247 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_max_luns
);
3248 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_max_queue
);
3249 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_no_lun_0
);
3250 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_no_uld
);
3251 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_num_parts
);
3252 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_num_tgts
);
3253 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_ptype
);
3254 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_opts
);
3255 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_removable
);
3256 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_scsi_level
);
3257 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_virtual_gb
);
3258 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_vpd_use_hostno
);
3259 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_sector_size
);
3260 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dix
);
3261 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dif
);
3262 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_guard
);
3263 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_ato
);
3264 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_map
);
3268 static void do_remove_driverfs_files(void)
3270 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_map
);
3271 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_ato
);
3272 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_guard
);
3273 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dif
);
3274 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dix
);
3275 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_sector_size
);
3276 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_vpd_use_hostno
);
3277 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_virtual_gb
);
3278 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_scsi_level
);
3279 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_opts
);
3280 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_ptype
);
3281 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_removable
);
3282 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_num_tgts
);
3283 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_num_parts
);
3284 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_no_uld
);
3285 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_no_lun_0
);
3286 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_max_queue
);
3287 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_max_luns
);
3288 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_fake_rw
);
3289 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_every_nth
);
3290 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dsense
);
3291 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dev_size_mb
);
3292 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_delay
);
3293 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_add_host
);
3296 struct device
*pseudo_primary
;
3298 static int __init
scsi_debug_init(void)
3305 switch (scsi_debug_sector_size
) {
3312 printk(KERN_ERR
"scsi_debug_init: invalid sector_size %d\n",
3313 scsi_debug_sector_size
);
3317 switch (scsi_debug_dif
) {
3319 case SD_DIF_TYPE0_PROTECTION
:
3320 case SD_DIF_TYPE1_PROTECTION
:
3321 case SD_DIF_TYPE2_PROTECTION
:
3322 case SD_DIF_TYPE3_PROTECTION
:
3326 printk(KERN_ERR
"scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3330 if (scsi_debug_guard
> 1) {
3331 printk(KERN_ERR
"scsi_debug_init: guard must be 0 or 1\n");
3335 if (scsi_debug_ato
> 1) {
3336 printk(KERN_ERR
"scsi_debug_init: ato must be 0 or 1\n");
3340 if (scsi_debug_physblk_exp
> 15) {
3341 printk(KERN_ERR
"scsi_debug_init: invalid physblk_exp %u\n",
3342 scsi_debug_physblk_exp
);
3346 if (scsi_debug_lowest_aligned
> 0x3fff) {
3347 printk(KERN_ERR
"scsi_debug_init: lowest_aligned too big: %u\n",
3348 scsi_debug_lowest_aligned
);
3352 if (scsi_debug_dev_size_mb
< 1)
3353 scsi_debug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
3354 sz
= (unsigned long)scsi_debug_dev_size_mb
* 1048576;
3355 sdebug_store_sectors
= sz
/ scsi_debug_sector_size
;
3356 sdebug_capacity
= get_sdebug_capacity();
3358 /* play around with geometry, don't waste too much on track 0 */
3360 sdebug_sectors_per
= 32;
3361 if (scsi_debug_dev_size_mb
>= 16)
3363 else if (scsi_debug_dev_size_mb
>= 256)
3365 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3366 (sdebug_sectors_per
* sdebug_heads
);
3367 if (sdebug_cylinders_per
>= 1024) {
3368 /* other LLDs do this; implies >= 1GB ram disk ... */
3370 sdebug_sectors_per
= 63;
3371 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3372 (sdebug_sectors_per
* sdebug_heads
);
3375 fake_storep
= vmalloc(sz
);
3376 if (NULL
== fake_storep
) {
3377 printk(KERN_ERR
"scsi_debug_init: out of memory, 1\n");
3380 memset(fake_storep
, 0, sz
);
3381 if (scsi_debug_num_parts
> 0)
3382 sdebug_build_parts(fake_storep
, sz
);
3384 if (scsi_debug_dif
) {
3387 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
3388 dif_storep
= vmalloc(dif_size
);
3390 printk(KERN_ERR
"scsi_debug_init: dif_storep %u bytes @ %p\n",
3391 dif_size
, dif_storep
);
3393 if (dif_storep
== NULL
) {
3394 printk(KERN_ERR
"scsi_debug_init: out of mem. (DIX)\n");
3399 memset(dif_storep
, 0xff, dif_size
);
3402 /* Logical Block Provisioning */
3403 if (scsi_debug_lbp()) {
3404 unsigned int map_bytes
;
3406 scsi_debug_unmap_max_blocks
=
3407 clamp(scsi_debug_unmap_max_blocks
, 0U, 0xffffffffU
);
3409 scsi_debug_unmap_max_desc
=
3410 clamp(scsi_debug_unmap_max_desc
, 0U, 256U);
3412 scsi_debug_unmap_granularity
=
3413 clamp(scsi_debug_unmap_granularity
, 1U, 0xffffffffU
);
3415 if (scsi_debug_unmap_alignment
&&
3416 scsi_debug_unmap_granularity
< scsi_debug_unmap_alignment
) {
3418 "%s: ERR: unmap_granularity < unmap_alignment\n",
3423 map_size
= (sdebug_store_sectors
/ scsi_debug_unmap_granularity
);
3424 map_bytes
= map_size
>> 3;
3425 map_storep
= vmalloc(map_bytes
);
3427 printk(KERN_INFO
"scsi_debug_init: %lu provisioning blocks\n",
3430 if (map_storep
== NULL
) {
3431 printk(KERN_ERR
"scsi_debug_init: out of mem. (MAP)\n");
3436 memset(map_storep
, 0x0, map_bytes
);
3438 /* Map first 1KB for partition table */
3439 if (scsi_debug_num_parts
)
3443 pseudo_primary
= root_device_register("pseudo_0");
3444 if (IS_ERR(pseudo_primary
)) {
3445 printk(KERN_WARNING
"scsi_debug: root_device_register() error\n");
3446 ret
= PTR_ERR(pseudo_primary
);
3449 ret
= bus_register(&pseudo_lld_bus
);
3451 printk(KERN_WARNING
"scsi_debug: bus_register error: %d\n",
3455 ret
= driver_register(&sdebug_driverfs_driver
);
3457 printk(KERN_WARNING
"scsi_debug: driver_register error: %d\n",
3461 ret
= do_create_driverfs_files();
3463 printk(KERN_WARNING
"scsi_debug: driver_create_file error: %d\n",
3470 host_to_add
= scsi_debug_add_host
;
3471 scsi_debug_add_host
= 0;
3473 for (k
= 0; k
< host_to_add
; k
++) {
3474 if (sdebug_add_adapter()) {
3475 printk(KERN_ERR
"scsi_debug_init: "
3476 "sdebug_add_adapter failed k=%d\n", k
);
3481 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
3482 printk(KERN_INFO
"scsi_debug_init: built %d host(s)\n",
3483 scsi_debug_add_host
);
3488 do_remove_driverfs_files();
3489 driver_unregister(&sdebug_driverfs_driver
);
3491 bus_unregister(&pseudo_lld_bus
);
3493 root_device_unregister(pseudo_primary
);
3504 static void __exit
scsi_debug_exit(void)
3506 int k
= scsi_debug_add_host
;
3510 sdebug_remove_adapter();
3511 do_remove_driverfs_files();
3512 driver_unregister(&sdebug_driverfs_driver
);
3513 bus_unregister(&pseudo_lld_bus
);
3514 root_device_unregister(pseudo_primary
);
3522 device_initcall(scsi_debug_init
);
3523 module_exit(scsi_debug_exit
);
3525 static void sdebug_release_adapter(struct device
* dev
)
3527 struct sdebug_host_info
*sdbg_host
;
3529 sdbg_host
= to_sdebug_host(dev
);
3533 static int sdebug_add_adapter(void)
3535 int k
, devs_per_host
;
3537 struct sdebug_host_info
*sdbg_host
;
3538 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
3540 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
3541 if (NULL
== sdbg_host
) {
3542 printk(KERN_ERR
"%s: out of memory at line %d\n",
3543 __func__
, __LINE__
);
3547 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
3549 devs_per_host
= scsi_debug_num_tgts
* scsi_debug_max_luns
;
3550 for (k
= 0; k
< devs_per_host
; k
++) {
3551 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
3552 if (!sdbg_devinfo
) {
3553 printk(KERN_ERR
"%s: out of memory at line %d\n",
3554 __func__
, __LINE__
);
3560 spin_lock(&sdebug_host_list_lock
);
3561 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
3562 spin_unlock(&sdebug_host_list_lock
);
3564 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
3565 sdbg_host
->dev
.parent
= pseudo_primary
;
3566 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
3567 dev_set_name(&sdbg_host
->dev
, "adapter%d", scsi_debug_add_host
);
3569 error
= device_register(&sdbg_host
->dev
);
3574 ++scsi_debug_add_host
;
3578 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
3580 list_del(&sdbg_devinfo
->dev_list
);
3581 kfree(sdbg_devinfo
);
3588 static void sdebug_remove_adapter(void)
3590 struct sdebug_host_info
* sdbg_host
= NULL
;
3592 spin_lock(&sdebug_host_list_lock
);
3593 if (!list_empty(&sdebug_host_list
)) {
3594 sdbg_host
= list_entry(sdebug_host_list
.prev
,
3595 struct sdebug_host_info
, host_list
);
3596 list_del(&sdbg_host
->host_list
);
3598 spin_unlock(&sdebug_host_list_lock
);
3603 device_unregister(&sdbg_host
->dev
);
3604 --scsi_debug_add_host
;
3608 int scsi_debug_queuecommand_lck(struct scsi_cmnd
*SCpnt
, done_funct_t done
)
3610 unsigned char *cmd
= (unsigned char *) SCpnt
->cmnd
;
3613 unsigned long long lba
;
3616 int target
= SCpnt
->device
->id
;
3617 struct sdebug_dev_info
*devip
= NULL
;
3618 int inj_recovered
= 0;
3619 int inj_transport
= 0;
3622 int delay_override
= 0;
3625 scsi_set_resid(SCpnt
, 0);
3626 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmd
) {
3627 printk(KERN_INFO
"scsi_debug: cmd ");
3628 for (k
= 0, len
= SCpnt
->cmd_len
; k
< len
; ++k
)
3629 printk("%02x ", (int)cmd
[k
]);
3633 if (target
== SCpnt
->device
->host
->hostt
->this_id
) {
3634 printk(KERN_INFO
"scsi_debug: initiator's id used as "
3636 return schedule_resp(SCpnt
, NULL
, done
,
3637 DID_NO_CONNECT
<< 16, 0);
3640 if ((SCpnt
->device
->lun
>= scsi_debug_max_luns
) &&
3641 (SCpnt
->device
->lun
!= SAM2_WLUN_REPORT_LUNS
))
3642 return schedule_resp(SCpnt
, NULL
, done
,
3643 DID_NO_CONNECT
<< 16, 0);
3644 devip
= devInfoReg(SCpnt
->device
);
3646 return schedule_resp(SCpnt
, NULL
, done
,
3647 DID_NO_CONNECT
<< 16, 0);
3649 if ((scsi_debug_every_nth
!= 0) &&
3650 (++scsi_debug_cmnd_count
>= abs(scsi_debug_every_nth
))) {
3651 scsi_debug_cmnd_count
= 0;
3652 if (scsi_debug_every_nth
< -1)
3653 scsi_debug_every_nth
= -1;
3654 if (SCSI_DEBUG_OPT_TIMEOUT
& scsi_debug_opts
)
3655 return 0; /* ignore command causing timeout */
3656 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT
& scsi_debug_opts
&&
3657 scsi_medium_access_command(SCpnt
))
3658 return 0; /* time out reads and writes */
3659 else if (SCSI_DEBUG_OPT_RECOVERED_ERR
& scsi_debug_opts
)
3660 inj_recovered
= 1; /* to reads and writes below */
3661 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& scsi_debug_opts
)
3662 inj_transport
= 1; /* to reads and writes below */
3663 else if (SCSI_DEBUG_OPT_DIF_ERR
& scsi_debug_opts
)
3664 inj_dif
= 1; /* to reads and writes below */
3665 else if (SCSI_DEBUG_OPT_DIX_ERR
& scsi_debug_opts
)
3666 inj_dix
= 1; /* to reads and writes below */
3673 case TEST_UNIT_READY
:
3675 break; /* only allowable wlun commands */
3677 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3678 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x "
3679 "not supported for wlun\n", *cmd
);
3680 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3682 errsts
= check_condition_result
;
3683 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3689 case INQUIRY
: /* mandatory, ignore unit attention */
3691 errsts
= resp_inquiry(SCpnt
, target
, devip
);
3693 case REQUEST_SENSE
: /* mandatory, ignore unit attention */
3695 errsts
= resp_requests(SCpnt
, devip
);
3697 case REZERO_UNIT
: /* actually this is REWIND for SSC */
3699 errsts
= resp_start_stop(SCpnt
, devip
);
3701 case ALLOW_MEDIUM_REMOVAL
:
3702 errsts
= check_readiness(SCpnt
, 1, devip
);
3705 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3706 printk(KERN_INFO
"scsi_debug: Medium removal %s\n",
3707 cmd
[4] ? "inhibited" : "enabled");
3709 case SEND_DIAGNOSTIC
: /* mandatory */
3710 errsts
= check_readiness(SCpnt
, 1, devip
);
3712 case TEST_UNIT_READY
: /* mandatory */
3714 errsts
= check_readiness(SCpnt
, 0, devip
);
3717 errsts
= check_readiness(SCpnt
, 1, devip
);
3720 errsts
= check_readiness(SCpnt
, 1, devip
);
3723 errsts
= check_readiness(SCpnt
, 1, devip
);
3726 errsts
= check_readiness(SCpnt
, 1, devip
);
3729 errsts
= resp_readcap(SCpnt
, devip
);
3731 case SERVICE_ACTION_IN
:
3732 if (cmd
[1] == SAI_READ_CAPACITY_16
)
3733 errsts
= resp_readcap16(SCpnt
, devip
);
3734 else if (cmd
[1] == SAI_GET_LBA_STATUS
) {
3736 if (scsi_debug_lbp() == 0) {
3737 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3738 INVALID_COMMAND_OPCODE
, 0);
3739 errsts
= check_condition_result
;
3741 errsts
= resp_get_lba_status(SCpnt
, devip
);
3743 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3745 errsts
= check_condition_result
;
3748 case MAINTENANCE_IN
:
3749 if (MI_REPORT_TARGET_PGS
!= cmd
[1]) {
3750 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3752 errsts
= check_condition_result
;
3755 errsts
= resp_report_tgtpgs(SCpnt
, devip
);
3760 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3761 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3763 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3764 INVALID_COMMAND_OPCODE
, 0);
3765 errsts
= check_condition_result
;
3769 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3770 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3771 (cmd
[1] & 0xe0) == 0)
3772 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3777 errsts
= check_readiness(SCpnt
, 0, devip
);
3780 if (scsi_debug_fake_rw
)
3782 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3783 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3784 if (inj_recovered
&& (0 == errsts
)) {
3785 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3786 THRESHOLD_EXCEEDED
, 0);
3787 errsts
= check_condition_result
;
3788 } else if (inj_transport
&& (0 == errsts
)) {
3789 mk_sense_buffer(devip
, ABORTED_COMMAND
,
3790 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
3791 errsts
= check_condition_result
;
3792 } else if (inj_dif
&& (0 == errsts
)) {
3793 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3794 errsts
= illegal_condition_result
;
3795 } else if (inj_dix
&& (0 == errsts
)) {
3796 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3797 errsts
= illegal_condition_result
;
3800 case REPORT_LUNS
: /* mandatory, ignore unit attention */
3802 errsts
= resp_report_luns(SCpnt
, devip
);
3804 case VERIFY
: /* 10 byte SBC-2 command */
3805 errsts
= check_readiness(SCpnt
, 0, devip
);
3810 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3811 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3813 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3814 INVALID_COMMAND_OPCODE
, 0);
3815 errsts
= check_condition_result
;
3819 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3820 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3821 (cmd
[1] & 0xe0) == 0)
3822 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3827 errsts
= check_readiness(SCpnt
, 0, devip
);
3830 if (scsi_debug_fake_rw
)
3832 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3833 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3834 if (inj_recovered
&& (0 == errsts
)) {
3835 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3836 THRESHOLD_EXCEEDED
, 0);
3837 errsts
= check_condition_result
;
3838 } else if (inj_dif
&& (0 == errsts
)) {
3839 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3840 errsts
= illegal_condition_result
;
3841 } else if (inj_dix
&& (0 == errsts
)) {
3842 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3843 errsts
= illegal_condition_result
;
3849 if ((*cmd
== WRITE_SAME_16
&& scsi_debug_lbpws
== 0) ||
3850 (*cmd
== WRITE_SAME
&& scsi_debug_lbpws10
== 0)) {
3851 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3852 INVALID_FIELD_IN_CDB
, 0);
3853 errsts
= check_condition_result
;
3859 errsts
= check_readiness(SCpnt
, 0, devip
);
3862 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3863 errsts
= resp_write_same(SCpnt
, lba
, num
, devip
, ei_lba
, unmap
);
3866 errsts
= check_readiness(SCpnt
, 0, devip
);
3870 if (scsi_debug_unmap_max_desc
== 0 || scsi_debug_lbpu
== 0) {
3871 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3872 INVALID_COMMAND_OPCODE
, 0);
3873 errsts
= check_condition_result
;
3875 errsts
= resp_unmap(SCpnt
, devip
);
3879 errsts
= resp_mode_sense(SCpnt
, target
, devip
);
3882 errsts
= resp_mode_select(SCpnt
, 1, devip
);
3884 case MODE_SELECT_10
:
3885 errsts
= resp_mode_select(SCpnt
, 0, devip
);
3888 errsts
= resp_log_sense(SCpnt
, devip
);
3890 case SYNCHRONIZE_CACHE
:
3892 errsts
= check_readiness(SCpnt
, 0, devip
);
3895 errsts
= check_readiness(SCpnt
, 1, devip
);
3897 case XDWRITEREAD_10
:
3898 if (!scsi_bidi_cmnd(SCpnt
)) {
3899 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3900 INVALID_FIELD_IN_CDB
, 0);
3901 errsts
= check_condition_result
;
3905 errsts
= check_readiness(SCpnt
, 0, devip
);
3908 if (scsi_debug_fake_rw
)
3910 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3911 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3914 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3917 errsts
= resp_xdwriteread(SCpnt
, lba
, num
, devip
);
3919 case VARIABLE_LENGTH_CMD
:
3920 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
) {
3922 if ((cmd
[10] & 0xe0) == 0)
3924 "Unprotected RD/WR to DIF device\n");
3926 if (cmd
[9] == READ_32
) {
3927 BUG_ON(SCpnt
->cmd_len
< 32);
3931 if (cmd
[9] == WRITE_32
) {
3932 BUG_ON(SCpnt
->cmd_len
< 32);
3937 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3938 INVALID_FIELD_IN_CDB
, 0);
3939 errsts
= check_condition_result
;
3943 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3944 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x not "
3945 "supported\n", *cmd
);
3946 errsts
= check_readiness(SCpnt
, 1, devip
);
3948 break; /* Unit attention takes precedence */
3949 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
3950 errsts
= check_condition_result
;
3953 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3954 (delay_override
? 0 : scsi_debug_delay
));
3957 static DEF_SCSI_QCMD(scsi_debug_queuecommand
)
3959 static struct scsi_host_template sdebug_driver_template
= {
3960 .proc_info
= scsi_debug_proc_info
,
3961 .proc_name
= sdebug_proc_name
,
3962 .name
= "SCSI DEBUG",
3963 .info
= scsi_debug_info
,
3964 .slave_alloc
= scsi_debug_slave_alloc
,
3965 .slave_configure
= scsi_debug_slave_configure
,
3966 .slave_destroy
= scsi_debug_slave_destroy
,
3967 .ioctl
= scsi_debug_ioctl
,
3968 .queuecommand
= scsi_debug_queuecommand
,
3969 .eh_abort_handler
= scsi_debug_abort
,
3970 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
3971 .eh_device_reset_handler
= scsi_debug_device_reset
,
3972 .eh_host_reset_handler
= scsi_debug_host_reset
,
3973 .bios_param
= scsi_debug_biosparam
,
3974 .can_queue
= SCSI_DEBUG_CANQUEUE
,
3976 .sg_tablesize
= 256,
3978 .max_sectors
= 0xffff,
3979 .use_clustering
= DISABLE_CLUSTERING
,
3980 .module
= THIS_MODULE
,
3983 static int sdebug_driver_probe(struct device
* dev
)
3986 struct sdebug_host_info
*sdbg_host
;
3987 struct Scsi_Host
*hpnt
;
3990 sdbg_host
= to_sdebug_host(dev
);
3992 sdebug_driver_template
.can_queue
= scsi_debug_max_queue
;
3993 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
3995 printk(KERN_ERR
"%s: scsi_register failed\n", __func__
);
4000 sdbg_host
->shost
= hpnt
;
4001 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
4002 if ((hpnt
->this_id
>= 0) && (scsi_debug_num_tgts
> hpnt
->this_id
))
4003 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
4005 hpnt
->max_id
= scsi_debug_num_tgts
;
4006 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
; /* = scsi_debug_max_luns; */
4010 switch (scsi_debug_dif
) {
4012 case SD_DIF_TYPE1_PROTECTION
:
4013 host_prot
= SHOST_DIF_TYPE1_PROTECTION
;
4015 host_prot
|= SHOST_DIX_TYPE1_PROTECTION
;
4018 case SD_DIF_TYPE2_PROTECTION
:
4019 host_prot
= SHOST_DIF_TYPE2_PROTECTION
;
4021 host_prot
|= SHOST_DIX_TYPE2_PROTECTION
;
4024 case SD_DIF_TYPE3_PROTECTION
:
4025 host_prot
= SHOST_DIF_TYPE3_PROTECTION
;
4027 host_prot
|= SHOST_DIX_TYPE3_PROTECTION
;
4032 host_prot
|= SHOST_DIX_TYPE0_PROTECTION
;
4036 scsi_host_set_prot(hpnt
, host_prot
);
4038 printk(KERN_INFO
"scsi_debug: host protection%s%s%s%s%s%s%s\n",
4039 (host_prot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
4040 (host_prot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
4041 (host_prot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
4042 (host_prot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
4043 (host_prot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
4044 (host_prot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
4045 (host_prot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
4047 if (scsi_debug_guard
== 1)
4048 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
4050 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
4052 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
4054 printk(KERN_ERR
"%s: scsi_add_host failed\n", __func__
);
4056 scsi_host_put(hpnt
);
4058 scsi_scan_host(hpnt
);
4064 static int sdebug_driver_remove(struct device
* dev
)
4066 struct sdebug_host_info
*sdbg_host
;
4067 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
4069 sdbg_host
= to_sdebug_host(dev
);
4072 printk(KERN_ERR
"%s: Unable to locate host info\n",
4077 scsi_remove_host(sdbg_host
->shost
);
4079 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
4081 list_del(&sdbg_devinfo
->dev_list
);
4082 kfree(sdbg_devinfo
);
4085 scsi_host_put(sdbg_host
->shost
);
4089 static int pseudo_lld_bus_match(struct device
*dev
,
4090 struct device_driver
*dev_driver
)
4095 static struct bus_type pseudo_lld_bus
= {
4097 .match
= pseudo_lld_bus_match
,
4098 .probe
= sdebug_driver_probe
,
4099 .remove
= sdebug_driver_remove
,