2 * Functions related to sysfs handling
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/blktrace_api.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-cgroup.h>
17 struct queue_sysfs_entry
{
18 struct attribute attr
;
19 ssize_t (*show
)(struct request_queue
*, char *);
20 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
24 queue_var_show(unsigned long var
, char *page
)
26 return sprintf(page
, "%lu\n", var
);
30 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
35 err
= kstrtoul(page
, 10, &v
);
36 if (err
|| v
> UINT_MAX
)
44 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
46 return queue_var_show(q
->nr_requests
, (page
));
50 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
55 if (!q
->request_fn
&& !q
->mq_ops
)
58 ret
= queue_var_store(&nr
, page
, count
);
62 if (nr
< BLKDEV_MIN_RQ
)
66 err
= blk_update_nr_requests(q
, nr
);
68 err
= blk_mq_update_nr_requests(q
, nr
);
76 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
78 unsigned long ra_kb
= q
->backing_dev_info
.ra_pages
<<
79 (PAGE_CACHE_SHIFT
- 10);
81 return queue_var_show(ra_kb
, (page
));
85 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
88 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
93 q
->backing_dev_info
.ra_pages
= ra_kb
>> (PAGE_CACHE_SHIFT
- 10);
98 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
100 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
102 return queue_var_show(max_sectors_kb
, (page
));
105 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
107 return queue_var_show(queue_max_segments(q
), (page
));
110 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
112 return queue_var_show(q
->limits
.max_integrity_segments
, (page
));
115 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
117 if (blk_queue_cluster(q
))
118 return queue_var_show(queue_max_segment_size(q
), (page
));
120 return queue_var_show(PAGE_CACHE_SIZE
, (page
));
123 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
125 return queue_var_show(queue_logical_block_size(q
), page
);
128 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
130 return queue_var_show(queue_physical_block_size(q
), page
);
133 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
135 return queue_var_show(queue_io_min(q
), page
);
138 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
140 return queue_var_show(queue_io_opt(q
), page
);
143 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
145 return queue_var_show(q
->limits
.discard_granularity
, page
);
148 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
150 return sprintf(page
, "%llu\n",
151 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
154 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
156 return queue_var_show(queue_discard_zeroes_data(q
), page
);
159 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
161 return sprintf(page
, "%llu\n",
162 (unsigned long long)q
->limits
.max_write_same_sectors
<< 9);
167 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
169 unsigned long max_sectors_kb
,
170 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
171 page_kb
= 1 << (PAGE_CACHE_SHIFT
- 10);
172 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
177 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
180 spin_lock_irq(q
->queue_lock
);
181 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
182 spin_unlock_irq(q
->queue_lock
);
187 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
189 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
191 return queue_var_show(max_hw_sectors_kb
, (page
));
194 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
196 queue_show_##name(struct request_queue *q, char *page) \
199 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
200 return queue_var_show(neg ? !bit : bit, page); \
203 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
207 ret = queue_var_store(&val, page, count); \
213 spin_lock_irq(q->queue_lock); \
215 queue_flag_set(QUEUE_FLAG_##flag, q); \
217 queue_flag_clear(QUEUE_FLAG_##flag, q); \
218 spin_unlock_irq(q->queue_lock); \
222 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
223 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
224 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
225 #undef QUEUE_SYSFS_BIT_FNS
227 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
229 return queue_var_show((blk_queue_nomerges(q
) << 1) |
230 blk_queue_noxmerges(q
), page
);
233 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
237 ssize_t ret
= queue_var_store(&nm
, page
, count
);
242 spin_lock_irq(q
->queue_lock
);
243 queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
244 queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
246 queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
248 queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
249 spin_unlock_irq(q
->queue_lock
);
254 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
256 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
257 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
259 return queue_var_show(set
<< force
, page
);
263 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
265 ssize_t ret
= -EINVAL
;
269 ret
= queue_var_store(&val
, page
, count
);
273 spin_lock_irq(q
->queue_lock
);
275 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
276 queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
277 } else if (val
== 1) {
278 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
279 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
280 } else if (val
== 0) {
281 queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
282 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
284 spin_unlock_irq(q
->queue_lock
);
289 static struct queue_sysfs_entry queue_requests_entry
= {
290 .attr
= {.name
= "nr_requests", .mode
= S_IRUGO
| S_IWUSR
},
291 .show
= queue_requests_show
,
292 .store
= queue_requests_store
,
295 static struct queue_sysfs_entry queue_ra_entry
= {
296 .attr
= {.name
= "read_ahead_kb", .mode
= S_IRUGO
| S_IWUSR
},
297 .show
= queue_ra_show
,
298 .store
= queue_ra_store
,
301 static struct queue_sysfs_entry queue_max_sectors_entry
= {
302 .attr
= {.name
= "max_sectors_kb", .mode
= S_IRUGO
| S_IWUSR
},
303 .show
= queue_max_sectors_show
,
304 .store
= queue_max_sectors_store
,
307 static struct queue_sysfs_entry queue_max_hw_sectors_entry
= {
308 .attr
= {.name
= "max_hw_sectors_kb", .mode
= S_IRUGO
},
309 .show
= queue_max_hw_sectors_show
,
312 static struct queue_sysfs_entry queue_max_segments_entry
= {
313 .attr
= {.name
= "max_segments", .mode
= S_IRUGO
},
314 .show
= queue_max_segments_show
,
317 static struct queue_sysfs_entry queue_max_integrity_segments_entry
= {
318 .attr
= {.name
= "max_integrity_segments", .mode
= S_IRUGO
},
319 .show
= queue_max_integrity_segments_show
,
322 static struct queue_sysfs_entry queue_max_segment_size_entry
= {
323 .attr
= {.name
= "max_segment_size", .mode
= S_IRUGO
},
324 .show
= queue_max_segment_size_show
,
327 static struct queue_sysfs_entry queue_iosched_entry
= {
328 .attr
= {.name
= "scheduler", .mode
= S_IRUGO
| S_IWUSR
},
329 .show
= elv_iosched_show
,
330 .store
= elv_iosched_store
,
333 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
334 .attr
= {.name
= "hw_sector_size", .mode
= S_IRUGO
},
335 .show
= queue_logical_block_size_show
,
338 static struct queue_sysfs_entry queue_logical_block_size_entry
= {
339 .attr
= {.name
= "logical_block_size", .mode
= S_IRUGO
},
340 .show
= queue_logical_block_size_show
,
343 static struct queue_sysfs_entry queue_physical_block_size_entry
= {
344 .attr
= {.name
= "physical_block_size", .mode
= S_IRUGO
},
345 .show
= queue_physical_block_size_show
,
348 static struct queue_sysfs_entry queue_io_min_entry
= {
349 .attr
= {.name
= "minimum_io_size", .mode
= S_IRUGO
},
350 .show
= queue_io_min_show
,
353 static struct queue_sysfs_entry queue_io_opt_entry
= {
354 .attr
= {.name
= "optimal_io_size", .mode
= S_IRUGO
},
355 .show
= queue_io_opt_show
,
358 static struct queue_sysfs_entry queue_discard_granularity_entry
= {
359 .attr
= {.name
= "discard_granularity", .mode
= S_IRUGO
},
360 .show
= queue_discard_granularity_show
,
363 static struct queue_sysfs_entry queue_discard_max_entry
= {
364 .attr
= {.name
= "discard_max_bytes", .mode
= S_IRUGO
},
365 .show
= queue_discard_max_show
,
368 static struct queue_sysfs_entry queue_discard_zeroes_data_entry
= {
369 .attr
= {.name
= "discard_zeroes_data", .mode
= S_IRUGO
},
370 .show
= queue_discard_zeroes_data_show
,
373 static struct queue_sysfs_entry queue_write_same_max_entry
= {
374 .attr
= {.name
= "write_same_max_bytes", .mode
= S_IRUGO
},
375 .show
= queue_write_same_max_show
,
378 static struct queue_sysfs_entry queue_nonrot_entry
= {
379 .attr
= {.name
= "rotational", .mode
= S_IRUGO
| S_IWUSR
},
380 .show
= queue_show_nonrot
,
381 .store
= queue_store_nonrot
,
384 static struct queue_sysfs_entry queue_nomerges_entry
= {
385 .attr
= {.name
= "nomerges", .mode
= S_IRUGO
| S_IWUSR
},
386 .show
= queue_nomerges_show
,
387 .store
= queue_nomerges_store
,
390 static struct queue_sysfs_entry queue_rq_affinity_entry
= {
391 .attr
= {.name
= "rq_affinity", .mode
= S_IRUGO
| S_IWUSR
},
392 .show
= queue_rq_affinity_show
,
393 .store
= queue_rq_affinity_store
,
396 static struct queue_sysfs_entry queue_iostats_entry
= {
397 .attr
= {.name
= "iostats", .mode
= S_IRUGO
| S_IWUSR
},
398 .show
= queue_show_iostats
,
399 .store
= queue_store_iostats
,
402 static struct queue_sysfs_entry queue_random_entry
= {
403 .attr
= {.name
= "add_random", .mode
= S_IRUGO
| S_IWUSR
},
404 .show
= queue_show_random
,
405 .store
= queue_store_random
,
408 static struct attribute
*default_attrs
[] = {
409 &queue_requests_entry
.attr
,
410 &queue_ra_entry
.attr
,
411 &queue_max_hw_sectors_entry
.attr
,
412 &queue_max_sectors_entry
.attr
,
413 &queue_max_segments_entry
.attr
,
414 &queue_max_integrity_segments_entry
.attr
,
415 &queue_max_segment_size_entry
.attr
,
416 &queue_iosched_entry
.attr
,
417 &queue_hw_sector_size_entry
.attr
,
418 &queue_logical_block_size_entry
.attr
,
419 &queue_physical_block_size_entry
.attr
,
420 &queue_io_min_entry
.attr
,
421 &queue_io_opt_entry
.attr
,
422 &queue_discard_granularity_entry
.attr
,
423 &queue_discard_max_entry
.attr
,
424 &queue_discard_zeroes_data_entry
.attr
,
425 &queue_write_same_max_entry
.attr
,
426 &queue_nonrot_entry
.attr
,
427 &queue_nomerges_entry
.attr
,
428 &queue_rq_affinity_entry
.attr
,
429 &queue_iostats_entry
.attr
,
430 &queue_random_entry
.attr
,
434 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
437 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
439 struct queue_sysfs_entry
*entry
= to_queue(attr
);
440 struct request_queue
*q
=
441 container_of(kobj
, struct request_queue
, kobj
);
446 mutex_lock(&q
->sysfs_lock
);
447 if (blk_queue_dying(q
)) {
448 mutex_unlock(&q
->sysfs_lock
);
451 res
= entry
->show(q
, page
);
452 mutex_unlock(&q
->sysfs_lock
);
457 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
458 const char *page
, size_t length
)
460 struct queue_sysfs_entry
*entry
= to_queue(attr
);
461 struct request_queue
*q
;
467 q
= container_of(kobj
, struct request_queue
, kobj
);
468 mutex_lock(&q
->sysfs_lock
);
469 if (blk_queue_dying(q
)) {
470 mutex_unlock(&q
->sysfs_lock
);
473 res
= entry
->store(q
, page
, length
);
474 mutex_unlock(&q
->sysfs_lock
);
478 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
480 struct request_queue
*q
= container_of(rcu_head
, struct request_queue
,
482 kmem_cache_free(blk_requestq_cachep
, q
);
486 * blk_release_queue: - release a &struct request_queue when it is no longer needed
487 * @kobj: the kobj belonging to the request queue to be released
490 * blk_release_queue is the pair to blk_init_queue() or
491 * blk_queue_make_request(). It should be called when a request queue is
492 * being released; typically when a block device is being de-registered.
493 * Currently, its primary task it to free all the &struct request
494 * structures that were allocated to the queue and the queue itself.
497 * The low level driver must have finished any outstanding requests first
498 * via blk_cleanup_queue().
500 static void blk_release_queue(struct kobject
*kobj
)
502 struct request_queue
*q
=
503 container_of(kobj
, struct request_queue
, kobj
);
508 spin_lock_irq(q
->queue_lock
);
510 spin_unlock_irq(q
->queue_lock
);
511 elevator_exit(q
->elevator
);
514 blk_exit_rl(&q
->root_rl
);
517 __blk_queue_free_tags(q
);
520 blk_free_flush_queue(q
->fq
);
524 blk_trace_shutdown(q
);
526 ida_simple_remove(&blk_queue_ida
, q
->id
);
527 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
530 static const struct sysfs_ops queue_sysfs_ops
= {
531 .show
= queue_attr_show
,
532 .store
= queue_attr_store
,
535 struct kobj_type blk_queue_ktype
= {
536 .sysfs_ops
= &queue_sysfs_ops
,
537 .default_attrs
= default_attrs
,
538 .release
= blk_release_queue
,
541 int blk_register_queue(struct gendisk
*disk
)
544 struct device
*dev
= disk_to_dev(disk
);
545 struct request_queue
*q
= disk
->queue
;
551 * SCSI probing may synchronously create and destroy a lot of
552 * request_queues for non-existent devices. Shutting down a fully
553 * functional queue takes measureable wallclock time as RCU grace
554 * periods are involved. To avoid excessive latency in these
555 * cases, a request_queue starts out in a degraded mode which is
556 * faster to shut down and is made fully functional here as
557 * request_queues for non-existent devices never get registered.
559 if (!blk_queue_init_done(q
)) {
560 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE
, q
);
561 blk_queue_bypass_end(q
);
563 blk_mq_finish_init(q
);
566 ret
= blk_trace_init_sysfs(dev
);
570 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
572 blk_trace_remove_sysfs(dev
);
576 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
579 blk_mq_register_disk(disk
);
584 ret
= elv_register_queue(q
);
586 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
587 kobject_del(&q
->kobj
);
588 blk_trace_remove_sysfs(dev
);
589 kobject_put(&dev
->kobj
);
596 void blk_unregister_queue(struct gendisk
*disk
)
598 struct request_queue
*q
= disk
->queue
;
604 blk_mq_unregister_disk(disk
);
607 elv_unregister_queue(q
);
609 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
610 kobject_del(&q
->kobj
);
611 blk_trace_remove_sysfs(disk_to_dev(disk
));
612 kobject_put(&disk_to_dev(disk
)->kobj
);