initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / parisc / kernel / ioctl32.c
blob53ef897ec40504b7beef03181ffb9264abb1e44b
1 /* $Id: ioctl32.c,v 1.5 2002/10/18 00:21:43 varenet Exp $
2 * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
4 * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
7 * These routines maintain argument size conversion between 32bit and 64bit
8 * ioctls.
9 */
11 #include <linux/syscalls.h>
13 #define INCLUDES
14 #include "compat_ioctl.c"
16 #include <asm/perf.h>
17 #include <asm/ioctls.h>
19 #define CODE
20 #include "compat_ioctl.c"
22 /* Use this to get at 32-bit user passed pointers.
23 See sys_sparc32.c for description about these. */
24 #define A(__x) ((unsigned long)(__x))
25 /* The same for use with copy_from_user() and copy_to_user(). */
26 #define B(__x) ((void *)(unsigned long)(__x))
28 #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
29 /* This really belongs in include/linux/drm.h -DaveM */
30 #include "../../../drivers/char/drm/drm.h"
32 typedef struct drm32_version {
33 int version_major; /* Major version */
34 int version_minor; /* Minor version */
35 int version_patchlevel;/* Patch level */
36 int name_len; /* Length of name buffer */
37 u32 name; /* Name of driver */
38 int date_len; /* Length of date buffer */
39 u32 date; /* User-space buffer to hold date */
40 int desc_len; /* Length of desc buffer */
41 u32 desc; /* User-space buffer to hold desc */
42 } drm32_version_t;
43 #define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
45 static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
47 drm32_version_t *uversion = (drm32_version_t *)arg;
48 char *name_ptr, *date_ptr, *desc_ptr;
49 u32 tmp1, tmp2, tmp3;
50 drm_version_t kversion;
51 mm_segment_t old_fs;
52 int ret;
54 memset(&kversion, 0, sizeof(kversion));
55 if (get_user(kversion.name_len, &uversion->name_len) ||
56 get_user(kversion.date_len, &uversion->date_len) ||
57 get_user(kversion.desc_len, &uversion->desc_len) ||
58 get_user(tmp1, &uversion->name) ||
59 get_user(tmp2, &uversion->date) ||
60 get_user(tmp3, &uversion->desc))
61 return -EFAULT;
63 name_ptr = (char *) A(tmp1);
64 date_ptr = (char *) A(tmp2);
65 desc_ptr = (char *) A(tmp3);
67 ret = -ENOMEM;
68 if (kversion.name_len && name_ptr) {
69 kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
70 if (!kversion.name)
71 goto out;
73 if (kversion.date_len && date_ptr) {
74 kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
75 if (!kversion.date)
76 goto out;
78 if (kversion.desc_len && desc_ptr) {
79 kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
80 if (!kversion.desc)
81 goto out;
84 old_fs = get_fs();
85 set_fs(KERNEL_DS);
86 ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
87 set_fs(old_fs);
89 if (!ret) {
90 if ((kversion.name &&
91 copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
92 (kversion.date &&
93 copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
94 (kversion.desc &&
95 copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
96 ret = -EFAULT;
97 if (put_user(kversion.version_major, &uversion->version_major) ||
98 put_user(kversion.version_minor, &uversion->version_minor) ||
99 put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
100 put_user(kversion.name_len, &uversion->name_len) ||
101 put_user(kversion.date_len, &uversion->date_len) ||
102 put_user(kversion.desc_len, &uversion->desc_len))
103 ret = -EFAULT;
106 out:
107 if (kversion.name)
108 kfree(kversion.name);
109 if (kversion.date)
110 kfree(kversion.date);
111 if (kversion.desc)
112 kfree(kversion.desc);
113 return ret;
116 typedef struct drm32_unique {
117 int unique_len; /* Length of unique */
118 u32 unique; /* Unique name for driver instantiation */
119 } drm32_unique_t;
120 #define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
121 #define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
123 static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
125 drm32_unique_t *uarg = (drm32_unique_t *)arg;
126 drm_unique_t karg;
127 mm_segment_t old_fs;
128 char *uptr;
129 u32 tmp;
130 int ret;
132 if (get_user(karg.unique_len, &uarg->unique_len))
133 return -EFAULT;
134 karg.unique = NULL;
136 if (get_user(tmp, &uarg->unique))
137 return -EFAULT;
139 uptr = (char *) A(tmp);
141 if (uptr) {
142 karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
143 if (!karg.unique)
144 return -ENOMEM;
145 if (cmd == DRM32_IOCTL_SET_UNIQUE &&
146 copy_from_user(karg.unique, uptr, karg.unique_len)) {
147 kfree(karg.unique);
148 return -EFAULT;
152 old_fs = get_fs();
153 set_fs(KERNEL_DS);
154 if (cmd == DRM32_IOCTL_GET_UNIQUE)
155 ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
156 else
157 ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
158 set_fs(old_fs);
160 if (!ret) {
161 if (cmd == DRM32_IOCTL_GET_UNIQUE &&
162 uptr != NULL &&
163 copy_to_user(uptr, karg.unique, karg.unique_len))
164 ret = -EFAULT;
165 if (put_user(karg.unique_len, &uarg->unique_len))
166 ret = -EFAULT;
169 if (karg.unique != NULL)
170 kfree(karg.unique);
172 return ret;
175 typedef struct drm32_map {
176 u32 offset; /* Requested physical address (0 for SAREA)*/
177 u32 size; /* Requested physical size (bytes) */
178 drm_map_type_t type; /* Type of memory to map */
179 drm_map_flags_t flags; /* Flags */
180 u32 handle; /* User-space: "Handle" to pass to mmap */
181 /* Kernel-space: kernel-virtual address */
182 int mtrr; /* MTRR slot used */
183 /* Private data */
184 } drm32_map_t;
185 #define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
187 static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
189 drm32_map_t *uarg = (drm32_map_t *) arg;
190 drm_map_t karg;
191 mm_segment_t old_fs;
192 u32 tmp;
193 int ret;
195 ret = get_user(karg.offset, &uarg->offset);
196 ret |= get_user(karg.size, &uarg->size);
197 ret |= get_user(karg.type, &uarg->type);
198 ret |= get_user(karg.flags, &uarg->flags);
199 ret |= get_user(tmp, &uarg->handle);
200 ret |= get_user(karg.mtrr, &uarg->mtrr);
201 if (ret)
202 return -EFAULT;
204 karg.handle = (void *) A(tmp);
206 old_fs = get_fs();
207 set_fs(KERNEL_DS);
208 ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
209 set_fs(old_fs);
211 if (!ret) {
212 ret = put_user(karg.offset, &uarg->offset);
213 ret |= put_user(karg.size, &uarg->size);
214 ret |= put_user(karg.type, &uarg->type);
215 ret |= put_user(karg.flags, &uarg->flags);
216 tmp = (u32) (long)karg.handle;
217 ret |= put_user(tmp, &uarg->handle);
218 ret |= put_user(karg.mtrr, &uarg->mtrr);
219 if (ret)
220 ret = -EFAULT;
223 return ret;
226 typedef struct drm32_buf_info {
227 int count; /* Entries in list */
228 u32 list; /* (drm_buf_desc_t *) */
229 } drm32_buf_info_t;
230 #define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
232 static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
234 drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
235 drm_buf_desc_t *ulist;
236 drm_buf_info_t karg;
237 mm_segment_t old_fs;
238 int orig_count, ret;
239 u32 tmp;
241 if (get_user(karg.count, &uarg->count) ||
242 get_user(tmp, &uarg->list))
243 return -EFAULT;
245 ulist = (drm_buf_desc_t *) A(tmp);
247 orig_count = karg.count;
249 karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
250 if (!karg.list)
251 return -EFAULT;
253 old_fs = get_fs();
254 set_fs(KERNEL_DS);
255 ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
256 set_fs(old_fs);
258 if (!ret) {
259 if (karg.count <= orig_count &&
260 (copy_to_user(ulist, karg.list,
261 karg.count * sizeof(drm_buf_desc_t))))
262 ret = -EFAULT;
263 if (put_user(karg.count, &uarg->count))
264 ret = -EFAULT;
267 kfree(karg.list);
269 return ret;
272 typedef struct drm32_buf_free {
273 int count;
274 u32 list; /* (int *) */
275 } drm32_buf_free_t;
276 #define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
278 static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
280 drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
281 drm_buf_free_t karg;
282 mm_segment_t old_fs;
283 int *ulist;
284 int ret;
285 u32 tmp;
287 if (get_user(karg.count, &uarg->count) ||
288 get_user(tmp, &uarg->list))
289 return -EFAULT;
291 ulist = (int *) A(tmp);
293 karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
294 if (!karg.list)
295 return -ENOMEM;
297 ret = -EFAULT;
298 if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
299 goto out;
301 old_fs = get_fs();
302 set_fs(KERNEL_DS);
303 ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
304 set_fs(old_fs);
306 out:
307 kfree(karg.list);
309 return ret;
312 typedef struct drm32_buf_pub {
313 int idx; /* Index into master buflist */
314 int total; /* Buffer size */
315 int used; /* Amount of buffer in use (for DMA) */
316 u32 address; /* Address of buffer (void *) */
317 } drm32_buf_pub_t;
319 typedef struct drm32_buf_map {
320 int count; /* Length of buflist */
321 u32 virtual; /* Mmaped area in user-virtual (void *) */
322 u32 list; /* Buffer information (drm_buf_pub_t *) */
323 } drm32_buf_map_t;
324 #define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
326 static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
328 drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
329 drm32_buf_pub_t *ulist;
330 drm_buf_map_t karg;
331 mm_segment_t old_fs;
332 int orig_count, ret, i;
333 u32 tmp1, tmp2;
335 if (get_user(karg.count, &uarg->count) ||
336 get_user(tmp1, &uarg->virtual) ||
337 get_user(tmp2, &uarg->list))
338 return -EFAULT;
340 karg.virtual = (void *) A(tmp1);
341 ulist = (drm32_buf_pub_t *) A(tmp2);
343 orig_count = karg.count;
345 karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
346 if (!karg.list)
347 return -ENOMEM;
349 ret = -EFAULT;
350 for (i = 0; i < karg.count; i++) {
351 if (get_user(karg.list[i].idx, &ulist[i].idx) ||
352 get_user(karg.list[i].total, &ulist[i].total) ||
353 get_user(karg.list[i].used, &ulist[i].used) ||
354 get_user(tmp1, &ulist[i].address))
355 goto out;
357 karg.list[i].address = (void *) A(tmp1);
360 old_fs = get_fs();
361 set_fs(KERNEL_DS);
362 ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
363 set_fs(old_fs);
365 if (!ret) {
366 for (i = 0; i < orig_count; i++) {
367 tmp1 = (u32) (long) karg.list[i].address;
368 if (put_user(karg.list[i].idx, &ulist[i].idx) ||
369 put_user(karg.list[i].total, &ulist[i].total) ||
370 put_user(karg.list[i].used, &ulist[i].used) ||
371 put_user(tmp1, &ulist[i].address)) {
372 ret = -EFAULT;
373 goto out;
376 if (put_user(karg.count, &uarg->count))
377 ret = -EFAULT;
380 out:
381 kfree(karg.list);
382 return ret;
385 typedef struct drm32_dma {
386 /* Indices here refer to the offset into
387 buflist in drm_buf_get_t. */
388 int context; /* Context handle */
389 int send_count; /* Number of buffers to send */
390 u32 send_indices; /* List of handles to buffers (int *) */
391 u32 send_sizes; /* Lengths of data to send (int *) */
392 drm_dma_flags_t flags; /* Flags */
393 int request_count; /* Number of buffers requested */
394 int request_size; /* Desired size for buffers */
395 u32 request_indices; /* Buffer information (int *) */
396 u32 request_sizes; /* (int *) */
397 int granted_count; /* Number of buffers granted */
398 } drm32_dma_t;
399 #define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
401 /* RED PEN The DRM layer blindly dereferences the send/request
402 * indice/size arrays even though they are userland
403 * pointers. -DaveM
405 static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
407 drm32_dma_t *uarg = (drm32_dma_t *) arg;
408 int *u_si, *u_ss, *u_ri, *u_rs;
409 drm_dma_t karg;
410 mm_segment_t old_fs;
411 int ret;
412 u32 tmp1, tmp2, tmp3, tmp4;
414 karg.send_indices = karg.send_sizes = NULL;
415 karg.request_indices = karg.request_sizes = NULL;
417 if (get_user(karg.context, &uarg->context) ||
418 get_user(karg.send_count, &uarg->send_count) ||
419 get_user(tmp1, &uarg->send_indices) ||
420 get_user(tmp2, &uarg->send_sizes) ||
421 get_user(karg.flags, &uarg->flags) ||
422 get_user(karg.request_count, &uarg->request_count) ||
423 get_user(karg.request_size, &uarg->request_size) ||
424 get_user(tmp3, &uarg->request_indices) ||
425 get_user(tmp4, &uarg->request_sizes) ||
426 get_user(karg.granted_count, &uarg->granted_count))
427 return -EFAULT;
429 u_si = (int *) A(tmp1);
430 u_ss = (int *) A(tmp2);
431 u_ri = (int *) A(tmp3);
432 u_rs = (int *) A(tmp4);
434 if (karg.send_count) {
435 karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
436 karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
438 ret = -ENOMEM;
439 if (!karg.send_indices || !karg.send_sizes)
440 goto out;
442 ret = -EFAULT;
443 if (copy_from_user(karg.send_indices, u_si,
444 (karg.send_count * sizeof(int))) ||
445 copy_from_user(karg.send_sizes, u_ss,
446 (karg.send_count * sizeof(int))))
447 goto out;
450 if (karg.request_count) {
451 karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
452 karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
454 ret = -ENOMEM;
455 if (!karg.request_indices || !karg.request_sizes)
456 goto out;
458 ret = -EFAULT;
459 if (copy_from_user(karg.request_indices, u_ri,
460 (karg.request_count * sizeof(int))) ||
461 copy_from_user(karg.request_sizes, u_rs,
462 (karg.request_count * sizeof(int))))
463 goto out;
466 old_fs = get_fs();
467 set_fs(KERNEL_DS);
468 ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
469 set_fs(old_fs);
471 if (!ret) {
472 if (put_user(karg.context, &uarg->context) ||
473 put_user(karg.send_count, &uarg->send_count) ||
474 put_user(karg.flags, &uarg->flags) ||
475 put_user(karg.request_count, &uarg->request_count) ||
476 put_user(karg.request_size, &uarg->request_size) ||
477 put_user(karg.granted_count, &uarg->granted_count))
478 ret = -EFAULT;
480 if (karg.send_count) {
481 if (copy_to_user(u_si, karg.send_indices,
482 (karg.send_count * sizeof(int))) ||
483 copy_to_user(u_ss, karg.send_sizes,
484 (karg.send_count * sizeof(int))))
485 ret = -EFAULT;
487 if (karg.request_count) {
488 if (copy_to_user(u_ri, karg.request_indices,
489 (karg.request_count * sizeof(int))) ||
490 copy_to_user(u_rs, karg.request_sizes,
491 (karg.request_count * sizeof(int))))
492 ret = -EFAULT;
496 out:
497 if (karg.send_indices)
498 kfree(karg.send_indices);
499 if (karg.send_sizes)
500 kfree(karg.send_sizes);
501 if (karg.request_indices)
502 kfree(karg.request_indices);
503 if (karg.request_sizes)
504 kfree(karg.request_sizes);
506 return ret;
509 typedef struct drm32_ctx_res {
510 int count;
511 u32 contexts; /* (drm_ctx_t *) */
512 } drm32_ctx_res_t;
513 #define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
515 static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
517 drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
518 drm_ctx_t *ulist;
519 drm_ctx_res_t karg;
520 mm_segment_t old_fs;
521 int orig_count, ret;
522 u32 tmp;
524 karg.contexts = NULL;
525 if (get_user(karg.count, &uarg->count) ||
526 get_user(tmp, &uarg->contexts))
527 return -EFAULT;
529 ulist = (drm_ctx_t *) A(tmp);
531 orig_count = karg.count;
532 if (karg.count && ulist) {
533 karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
534 if (!karg.contexts)
535 return -ENOMEM;
536 if (copy_from_user(karg.contexts, ulist,
537 (karg.count * sizeof(drm_ctx_t)))) {
538 kfree(karg.contexts);
539 return -EFAULT;
543 old_fs = get_fs();
544 set_fs(KERNEL_DS);
545 ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
546 set_fs(old_fs);
548 if (!ret) {
549 if (orig_count) {
550 if (copy_to_user(ulist, karg.contexts,
551 (orig_count * sizeof(drm_ctx_t))))
552 ret = -EFAULT;
554 if (put_user(karg.count, &uarg->count))
555 ret = -EFAULT;
558 if (karg.contexts)
559 kfree(karg.contexts);
561 return ret;
564 #endif
566 #define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, 0 },
567 #define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
569 #define IOCTL_TABLE_START struct ioctl_trans ioctl_start[] = {
570 #define IOCTL_TABLE_END };
572 IOCTL_TABLE_START
573 #include <linux/compat_ioctl.h>
575 #define DECLARES
576 #include "compat_ioctl.c"
578 /* Might be moved to compat_ioctl.h with some ifdefs... */
579 COMPATIBLE_IOCTL(TIOCSTART)
580 COMPATIBLE_IOCTL(TIOCSTOP)
581 COMPATIBLE_IOCTL(TIOCSLTC)
583 /* PA-specific ioctls */
584 COMPATIBLE_IOCTL(PA_PERF_ON)
585 COMPATIBLE_IOCTL(PA_PERF_OFF)
586 COMPATIBLE_IOCTL(PA_PERF_VERSION)
588 /* And these ioctls need translation */
589 HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
590 HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
591 HANDLE_IOCTL(SIOCGPPPVER, dev_ifsioc)
593 #if defined(CONFIG_GEN_RTC)
594 COMPATIBLE_IOCTL(RTC_AIE_ON)
595 COMPATIBLE_IOCTL(RTC_AIE_OFF)
596 COMPATIBLE_IOCTL(RTC_UIE_ON)
597 COMPATIBLE_IOCTL(RTC_UIE_OFF)
598 COMPATIBLE_IOCTL(RTC_PIE_ON)
599 COMPATIBLE_IOCTL(RTC_PIE_OFF)
600 COMPATIBLE_IOCTL(RTC_WIE_ON)
601 COMPATIBLE_IOCTL(RTC_WIE_OFF)
602 COMPATIBLE_IOCTL(RTC_ALM_SET) /* struct rtc_time only has ints */
603 COMPATIBLE_IOCTL(RTC_ALM_READ) /* struct rtc_time only has ints */
604 COMPATIBLE_IOCTL(RTC_RD_TIME) /* struct rtc_time only has ints */
605 COMPATIBLE_IOCTL(RTC_SET_TIME) /* struct rtc_time only has ints */
606 HANDLE_IOCTL(RTC_IRQP_READ, w_long)
607 COMPATIBLE_IOCTL(RTC_IRQP_SET)
608 HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
609 COMPATIBLE_IOCTL(RTC_EPOCH_SET)
610 #endif
612 #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
613 HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version);
614 HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique);
615 HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique);
616 HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap);
617 HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs);
618 HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs);
619 HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs);
620 HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma);
621 HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx);
622 #endif /* DRM */
623 IOCTL_TABLE_END
625 int ioctl_table_size = ARRAY_SIZE(ioctl_start);