1 #include <linux/capability.h>
2 #include <linux/blkdev.h>
3 #include <linux/blkpg.h>
4 #include <linux/hdreg.h>
5 #include <linux/backing-dev.h>
6 #include <linux/buffer_head.h>
7 #include <linux/smp_lock.h>
8 #include <linux/blktrace_api.h>
9 #include <asm/uaccess.h>
11 static int blkpg_ioctl(struct block_device
*bdev
, struct blkpg_ioctl_arg __user
*arg
)
13 struct block_device
*bdevp
;
15 struct hd_struct
*part
;
16 struct blkpg_ioctl_arg a
;
17 struct blkpg_partition p
;
18 struct disk_part_iter piter
;
19 long long start
, length
;
22 if (!capable(CAP_SYS_ADMIN
))
24 if (copy_from_user(&a
, arg
, sizeof(struct blkpg_ioctl_arg
)))
26 if (copy_from_user(&p
, a
.data
, sizeof(struct blkpg_partition
)))
29 if (bdev
!= bdev
->bd_contains
)
35 case BLKPG_ADD_PARTITION
:
37 length
= p
.length
>> 9;
38 /* check for fit in a hd_struct */
39 if (sizeof(sector_t
) == sizeof(long) &&
40 sizeof(long long) > sizeof(long)) {
41 long pstart
= start
, plength
= length
;
42 if (pstart
!= start
|| plength
!= length
43 || pstart
< 0 || plength
< 0)
47 mutex_lock(&bdev
->bd_mutex
);
50 disk_part_iter_init(&piter
, disk
,
51 DISK_PITER_INCL_EMPTY
);
52 while ((part
= disk_part_iter_next(&piter
))) {
53 if (!(start
+ length
<= part
->start_sect
||
54 start
>= part
->start_sect
+ part
->nr_sects
)) {
55 disk_part_iter_exit(&piter
);
56 mutex_unlock(&bdev
->bd_mutex
);
60 disk_part_iter_exit(&piter
);
63 part
= add_partition(disk
, partno
, start
, length
,
65 mutex_unlock(&bdev
->bd_mutex
);
66 return IS_ERR(part
) ? PTR_ERR(part
) : 0;
67 case BLKPG_DEL_PARTITION
:
68 part
= disk_get_part(disk
, partno
);
72 bdevp
= bdget(part_devt(part
));
77 mutex_lock(&bdevp
->bd_mutex
);
78 if (bdevp
->bd_openers
) {
79 mutex_unlock(&bdevp
->bd_mutex
);
85 invalidate_bdev(bdevp
);
87 mutex_lock_nested(&bdev
->bd_mutex
, 1);
88 delete_partition(disk
, partno
);
89 mutex_unlock(&bdev
->bd_mutex
);
90 mutex_unlock(&bdevp
->bd_mutex
);
99 static int blkdev_reread_part(struct block_device
*bdev
)
101 struct gendisk
*disk
= bdev
->bd_disk
;
104 if (!disk_partitionable(disk
) || bdev
!= bdev
->bd_contains
)
106 if (!capable(CAP_SYS_ADMIN
))
108 if (!mutex_trylock(&bdev
->bd_mutex
))
110 res
= rescan_partitions(disk
, bdev
);
111 mutex_unlock(&bdev
->bd_mutex
);
115 static void blk_ioc_discard_endio(struct bio
*bio
, int err
)
118 if (err
== -EOPNOTSUPP
)
119 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
120 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
122 complete(bio
->bi_private
);
125 static int blk_ioctl_discard(struct block_device
*bdev
, uint64_t start
,
128 struct request_queue
*q
= bdev_get_queue(bdev
);
138 if (start
+ len
> (bdev
->bd_inode
->i_size
>> 9))
141 if (!q
->prepare_discard_fn
)
144 while (len
&& !ret
) {
145 DECLARE_COMPLETION_ONSTACK(wait
);
148 bio
= bio_alloc(GFP_KERNEL
, 0);
150 bio
->bi_end_io
= blk_ioc_discard_endio
;
152 bio
->bi_private
= &wait
;
153 bio
->bi_sector
= start
;
155 if (len
> queue_max_hw_sectors(q
)) {
156 bio
->bi_size
= queue_max_hw_sectors(q
) << 9;
157 len
-= queue_max_hw_sectors(q
);
158 start
+= queue_max_hw_sectors(q
);
160 bio
->bi_size
= len
<< 9;
163 submit_bio(DISCARD_NOBARRIER
, bio
);
165 wait_for_completion(&wait
);
167 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
169 else if (!bio_flagged(bio
, BIO_UPTODATE
))
176 static int put_ushort(unsigned long arg
, unsigned short val
)
178 return put_user(val
, (unsigned short __user
*)arg
);
181 static int put_int(unsigned long arg
, int val
)
183 return put_user(val
, (int __user
*)arg
);
186 static int put_long(unsigned long arg
, long val
)
188 return put_user(val
, (long __user
*)arg
);
191 static int put_ulong(unsigned long arg
, unsigned long val
)
193 return put_user(val
, (unsigned long __user
*)arg
);
196 static int put_u64(unsigned long arg
, u64 val
)
198 return put_user(val
, (u64 __user
*)arg
);
201 int __blkdev_driver_ioctl(struct block_device
*bdev
, fmode_t mode
,
202 unsigned cmd
, unsigned long arg
)
204 struct gendisk
*disk
= bdev
->bd_disk
;
207 if (disk
->fops
->ioctl
)
208 return disk
->fops
->ioctl(bdev
, mode
, cmd
, arg
);
210 if (disk
->fops
->locked_ioctl
) {
212 ret
= disk
->fops
->locked_ioctl(bdev
, mode
, cmd
, arg
);
220 * For the record: _GPL here is only because somebody decided to slap it
221 * on the previous export. Sheer idiocy, since it wasn't copyrightable
222 * at all and could be open-coded without any exports by anybody who cares.
224 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl
);
227 * always keep this in sync with compat_blkdev_ioctl() and
228 * compat_blkdev_locked_ioctl()
230 int blkdev_ioctl(struct block_device
*bdev
, fmode_t mode
, unsigned cmd
,
233 struct gendisk
*disk
= bdev
->bd_disk
;
234 struct backing_dev_info
*bdi
;
240 if (!capable(CAP_SYS_ADMIN
))
243 ret
= __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
244 /* -EINVAL to handle old uncorrected drivers */
245 if (ret
!= -EINVAL
&& ret
!= -ENOTTY
)
250 invalidate_bdev(bdev
);
255 ret
= __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
256 /* -EINVAL to handle old uncorrected drivers */
257 if (ret
!= -EINVAL
&& ret
!= -ENOTTY
)
259 if (!capable(CAP_SYS_ADMIN
))
261 if (get_user(n
, (int __user
*)(arg
)))
264 set_device_ro(bdev
, n
);
271 if (!(mode
& FMODE_WRITE
))
274 if (copy_from_user(range
, (void __user
*)arg
, sizeof(range
)))
277 return blk_ioctl_discard(bdev
, range
[0], range
[1]);
281 struct hd_geometry geo
;
285 if (!disk
->fops
->getgeo
)
289 * We need to set the startsect first, the driver may
290 * want to override it.
292 geo
.start
= get_start_sect(bdev
);
293 ret
= disk
->fops
->getgeo(bdev
, &geo
);
296 if (copy_to_user((struct hd_geometry __user
*)arg
, &geo
,
305 bdi
= blk_get_backing_dev_info(bdev
);
308 return put_long(arg
, (bdi
->ra_pages
* PAGE_CACHE_SIZE
) / 512);
310 return put_int(arg
, bdev_read_only(bdev
) != 0);
311 case BLKBSZGET
: /* get the logical block size (cf. BLKSSZGET) */
312 return put_int(arg
, block_size(bdev
));
313 case BLKSSZGET
: /* get block device hardware sector size */
314 return put_int(arg
, bdev_logical_block_size(bdev
));
316 return put_ushort(arg
, queue_max_sectors(bdev_get_queue(bdev
)));
319 if(!capable(CAP_SYS_ADMIN
))
321 bdi
= blk_get_backing_dev_info(bdev
);
324 bdi
->ra_pages
= (arg
* 512) / PAGE_CACHE_SIZE
;
327 /* set the logical block size */
328 if (!capable(CAP_SYS_ADMIN
))
332 if (get_user(n
, (int __user
*) arg
))
334 if (!(mode
& FMODE_EXCL
) && bd_claim(bdev
, &bdev
) < 0)
336 ret
= set_blocksize(bdev
, n
);
337 if (!(mode
& FMODE_EXCL
))
342 ret
= blkpg_ioctl(bdev
, (struct blkpg_ioctl_arg __user
*) arg
);
347 ret
= blkdev_reread_part(bdev
);
351 size
= bdev
->bd_inode
->i_size
;
352 if ((size
>> 9) > ~0UL)
354 return put_ulong(arg
, size
>> 9);
356 return put_u64(arg
, bdev
->bd_inode
->i_size
);
360 case BLKTRACETEARDOWN
:
362 ret
= blk_trace_ioctl(bdev
, cmd
, (char __user
*) arg
);
366 ret
= __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
370 EXPORT_SYMBOL_GPL(blkdev_ioctl
);