Staging: memrar: fix printk format warning
[linux-2.6/btrfs-unstable.git] / drivers / staging / memrar / memrar_handler.c
blob3e0dfe3657693fd03f5e3cc0795ae7c20782d87c
1 /*
2 * memrar_handler 1.0: An Intel restricted access region handler device
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
21 * -------------------------------------------------------------------
23 * Moorestown restricted access regions (RAR) provide isolated
24 * areas of main memory that are only acceessible by authorized
25 * devices.
27 * The Intel Moorestown RAR handler module exposes a kernel space
28 * RAR memory management mechanism. It is essentially a
29 * RAR-specific allocator.
31 * Besides providing RAR buffer management, the RAR handler also
32 * behaves in many ways like an OS virtual memory manager. For
33 * example, the RAR "handles" created by the RAR handler are
34 * analogous to user space virtual addresses.
36 * RAR memory itself is never accessed directly by the RAR
37 * handler.
40 #include <linux/miscdevice.h>
41 #include <linux/fs.h>
42 #include <linux/slab.h>
43 #include <linux/kref.h>
44 #include <linux/mutex.h>
45 #include <linux/kernel.h>
46 #include <linux/uaccess.h>
47 #include <linux/mm.h>
48 #include <linux/ioport.h>
49 #include <linux/io.h>
51 #include "../rar_register/rar_register.h"
53 #include "memrar.h"
54 #include "memrar_allocator.h"
57 #define MEMRAR_VER "1.0"
60 * Moorestown supports three restricted access regions.
62 * We only care about the first two, video and audio. The third,
63 * reserved for Chaabi and the P-unit, will be handled by their
64 * respective drivers.
66 #define MRST_NUM_RAR 2
68 /* ---------------- -------------------- ------------------- */
70 /**
71 * struct memrar_buffer_info - struct that keeps track of all RAR buffers
72 * @list: Linked list of memrar_buffer_info objects.
73 * @buffer: Core RAR buffer information.
74 * @refcount: Reference count.
75 * @owner: File handle corresponding to process that reserved the
76 * block of memory in RAR. This will be zero for buffers
77 * allocated by other drivers instead of by a user space
78 * process.
80 * This structure encapsulates a link list of RAR buffers, as well as
81 * other characteristics specific to a given list node, such as the
82 * reference count on the corresponding RAR buffer.
84 struct memrar_buffer_info {
85 struct list_head list;
86 struct RAR_buffer buffer;
87 struct kref refcount;
88 struct file *owner;
91 /**
92 * struct memrar_rar_info - characteristics of a given RAR
93 * @base: Base bus address of the RAR.
94 * @length: Length of the RAR.
95 * @iobase: Virtual address of RAR mapped into kernel.
96 * @allocator: Allocator associated with the RAR. Note the allocator
97 * "capacity" may be smaller than the RAR length if the
98 * length is not a multiple of the configured allocator
99 * block size.
100 * @buffers: Table that keeps track of all reserved RAR buffers.
101 * @lock: Lock used to synchronize access to RAR-specific data
102 * structures.
104 * Each RAR has an associated memrar_rar_info structure that describes
105 * where in memory the RAR is located, how large it is, and a list of
106 * reserved RAR buffers inside that RAR. Each RAR also has a mutex
107 * associated with it to reduce lock contention when operations on
108 * multiple RARs are performed in parallel.
110 struct memrar_rar_info {
111 dma_addr_t base;
112 unsigned long length;
113 void __iomem *iobase;
114 struct memrar_allocator *allocator;
115 struct memrar_buffer_info buffers;
116 struct mutex lock;
120 * Array of RAR characteristics.
122 static struct memrar_rar_info memrars[MRST_NUM_RAR];
124 /* ---------------- -------------------- ------------------- */
126 /* Validate RAR type. */
127 static inline int memrar_is_valid_rar_type(u32 type)
129 return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
132 /* Check if an address/handle falls with the given RAR memory range. */
133 static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
134 u32 vaddr)
136 unsigned long const iobase = (unsigned long) (rar->iobase);
137 return (vaddr >= iobase && vaddr < iobase + rar->length);
140 /* Retrieve RAR information associated with the given handle. */
141 static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
143 int i;
144 for (i = 0; i < MRST_NUM_RAR; ++i) {
145 struct memrar_rar_info * const rar = &memrars[i];
146 if (memrar_handle_in_range(rar, vaddr))
147 return rar;
150 return NULL;
154 * Retrieve bus address from given handle.
156 * Returns address corresponding to given handle. Zero if handle is
157 * invalid.
159 static dma_addr_t memrar_get_bus_address(
160 struct memrar_rar_info *rar,
161 u32 vaddr)
163 unsigned long const iobase = (unsigned long) (rar->iobase);
165 if (!memrar_handle_in_range(rar, vaddr))
166 return 0;
169 * An assumption is made that the virtual address offset is
170 * the same as the bus address offset, at least based on the
171 * way this driver is implemented. For example, vaddr + 2 ==
172 * baddr + 2.
174 * @todo Is that a valid assumption?
176 return rar->base + (vaddr - iobase);
180 * Retrieve physical address from given handle.
182 * Returns address corresponding to given handle. Zero if handle is
183 * invalid.
185 static dma_addr_t memrar_get_physical_address(
186 struct memrar_rar_info *rar,
187 u32 vaddr)
190 * @todo This assumes that the bus address and physical
191 * address are the same. That is true for Moorestown
192 * but not necessarily on other platforms. This
193 * deficiency should be addressed at some point.
195 return memrar_get_bus_address(rar, vaddr);
199 * Core block release code.
201 * Note: This code removes the node from a list. Make sure any list
202 * iteration is performed using list_for_each_safe().
204 static void memrar_release_block_i(struct kref *ref)
207 * Last reference is being released. Remove from the table,
208 * and reclaim resources.
211 struct memrar_buffer_info * const node =
212 container_of(ref, struct memrar_buffer_info, refcount);
214 struct RAR_block_info * const user_info =
215 &node->buffer.info;
217 struct memrar_allocator * const allocator =
218 memrars[user_info->type].allocator;
220 list_del(&node->list);
222 memrar_allocator_free(allocator, user_info->handle);
224 kfree(node);
228 * Initialize RAR parameters, such as bus addresses, etc.
230 static int memrar_init_rar_resources(char const *devname)
232 /* ---- Sanity Checks ----
233 * 1. RAR bus addresses in both Lincroft and Langwell RAR
234 * registers should be the same.
235 * a. There's no way we can do this through IA.
237 * 2. Secure device ID in Langwell RAR registers should be set
238 * appropriately, e.g. only LPE DMA for the audio RAR, and
239 * security for the other Langwell based RAR registers.
240 * a. There's no way we can do this through IA.
242 * 3. Audio and video RAR registers and RAR access should be
243 * locked down. If not, enable RAR access control. Except
244 * for debugging purposes, there is no reason for them to
245 * be unlocked.
246 * a. We can only do this for the Lincroft (IA) side.
248 * @todo Should the RAR handler driver even be aware of audio
249 * and video RAR settings?
253 * RAR buffer block size.
255 * We choose it to be the size of a page to simplify the
256 * /dev/memrar mmap() implementation and usage. Otherwise
257 * paging is not involved once an RAR is locked down.
259 static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
261 int z;
262 int found_rar = 0;
264 BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
266 for (z = 0; z != MRST_NUM_RAR; ++z) {
267 dma_addr_t low, high;
268 struct memrar_rar_info * const rar = &memrars[z];
270 BUG_ON(!memrar_is_valid_rar_type(z));
272 mutex_init(&rar->lock);
275 * Initialize the process table before we reach any
276 * code that exit on failure since the finalization
277 * code requires an initialized list.
279 INIT_LIST_HEAD(&rar->buffers.list);
281 if (rar_get_address(z, &low, &high) != 0) {
282 /* No RAR is available. */
283 break;
284 } else if (low == 0 || high == 0) {
286 * We don't immediately break out of the loop
287 * since the next type of RAR may be enabled.
289 rar->base = 0;
290 rar->length = 0;
291 rar->iobase = NULL;
292 rar->allocator = NULL;
293 continue;
297 * @todo Verify that LNC and LNW RAR register contents
298 * addresses, security, etc are compatible and
299 * consistent).
302 rar->length = high - low + 1;
304 /* Claim RAR memory as our own. */
305 if (request_mem_region(low, rar->length, devname) == NULL) {
306 rar->length = 0;
308 pr_err("%s: Unable to claim RAR[%d] memory.\n",
309 devname,
311 pr_err("%s: RAR[%d] disabled.\n", devname, z);
314 * Rather than break out of the loop by
315 * returning -EBUSY, for example, we may be
316 * able to claim memory of the next RAR region
317 * as our own.
319 continue;
322 rar->base = low;
325 * Now map it into the kernel address space.
327 * Note that the RAR memory may only be accessed by IA
328 * when debugging. Otherwise attempts to access the
329 * RAR memory when it is locked down will result in
330 * behavior similar to writing to /dev/null and
331 * reading from /dev/zero. This behavior is enforced
332 * by the hardware. Even if we don't access the
333 * memory, mapping it into the kernel provides us with
334 * a convenient RAR handle to bus address mapping.
336 rar->iobase = ioremap_nocache(rar->base, rar->length);
337 if (rar->iobase == NULL) {
338 pr_err("%s: Unable to map RAR memory.\n",
339 devname);
340 return -ENOMEM;
343 /* Initialize corresponding memory allocator. */
344 rar->allocator = memrar_create_allocator(
345 (unsigned long) rar->iobase,
346 rar->length,
347 RAR_BLOCK_SIZE);
348 if (rar->allocator == NULL)
349 return -1;
352 * -------------------------------------------------
353 * Make sure all RARs handled by us are locked down.
354 * -------------------------------------------------
357 /* Enable RAR protection on the Lincroft side. */
358 if (0) {
360 * This is mostly a sanity check since the
361 * vendor should have locked down RAR in the
362 * SMIP header RAR configuration.
364 rar_lock(z);
365 } else {
366 pr_warning("%s: LNC RAR[%d] no lock sanity check.\n",
367 devname,
371 /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
372 /* |||||||||||||||||||||||||||||||||||||||||||||||||| */
375 * It would be nice if we could verify that RAR
376 * protection on the Langwell side is enabled, but
377 * there is no way to do that from here. The
378 * necessary Langwell RAR registers are not accessible
379 * from the Lincroft (IA) side.
381 * Hopefully the ODM did the right thing and enabled
382 * Langwell side RAR protection in the integrated
383 * firmware SMIP header.
386 pr_info("%s: BRAR[%d] bus address range = "
387 "[0x%lx, 0x%lx]\n",
388 devname,
390 (unsigned long) low,
391 (unsigned long) high);
393 pr_info("%s: BRAR[%d] size = %zu KiB\n",
394 devname,
396 rar->allocator->capacity / 1024);
398 found_rar = 1;
401 if (!found_rar) {
403 * No RAR support. Don't bother continuing.
405 * Note that this is not a failure.
407 pr_info("%s: No Moorestown RAR support available.\n",
408 devname);
409 return -ENODEV;
412 return 0;
416 * Finalize RAR resources.
418 static void memrar_fini_rar_resources(void)
420 int z;
421 struct memrar_buffer_info *pos;
422 struct memrar_buffer_info *tmp;
425 * @todo Do we need to hold a lock at this point in time?
426 * (module initialization failure or exit?)
429 for (z = MRST_NUM_RAR; z-- != 0; ) {
430 struct memrar_rar_info * const rar = &memrars[z];
432 /* Clean up remaining resources. */
434 list_for_each_entry_safe(pos,
435 tmp,
436 &rar->buffers.list,
437 list) {
438 kref_put(&pos->refcount, memrar_release_block_i);
441 memrar_destroy_allocator(rar->allocator);
442 rar->allocator = NULL;
444 iounmap(rar->iobase);
445 rar->iobase = NULL;
447 release_mem_region(rar->base, rar->length);
448 rar->base = 0;
450 rar->length = 0;
454 static long memrar_reserve_block(struct RAR_buffer *request,
455 struct file *filp)
457 struct RAR_block_info * const rinfo = &request->info;
458 struct RAR_buffer *buffer;
459 struct memrar_buffer_info *buffer_info;
460 u32 handle;
461 struct memrar_rar_info *rar = NULL;
463 /* Prevent array overflow. */
464 if (!memrar_is_valid_rar_type(rinfo->type))
465 return -EINVAL;
467 rar = &memrars[rinfo->type];
469 /* Reserve memory in RAR. */
470 handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
471 if (handle == 0)
472 return -ENOMEM;
474 buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
476 if (buffer_info == NULL) {
477 memrar_allocator_free(rar->allocator, handle);
478 return -ENOMEM;
481 buffer = &buffer_info->buffer;
482 buffer->info.type = rinfo->type;
483 buffer->info.size = rinfo->size;
485 /* Memory handle corresponding to the bus address. */
486 buffer->info.handle = handle;
487 buffer->bus_address = memrar_get_bus_address(rar, handle);
490 * Keep track of owner so that we can later cleanup if
491 * necessary.
493 buffer_info->owner = filp;
495 kref_init(&buffer_info->refcount);
497 mutex_lock(&rar->lock);
498 list_add(&buffer_info->list, &rar->buffers.list);
499 mutex_unlock(&rar->lock);
501 rinfo->handle = buffer->info.handle;
502 request->bus_address = buffer->bus_address;
504 return 0;
507 static long memrar_release_block(u32 addr)
509 struct memrar_buffer_info *pos;
510 struct memrar_buffer_info *tmp;
511 struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
512 long result = -EINVAL;
514 if (rar == NULL)
515 return -EFAULT;
517 mutex_lock(&rar->lock);
520 * Iterate through the buffer list to find the corresponding
521 * buffer to be released.
523 list_for_each_entry_safe(pos,
524 tmp,
525 &rar->buffers.list,
526 list) {
527 struct RAR_block_info * const info =
528 &pos->buffer.info;
531 * Take into account handle offsets that may have been
532 * added to the base handle, such as in the following
533 * scenario:
535 * u32 handle = base + offset;
536 * rar_handle_to_bus(handle);
537 * rar_release(handle);
539 if (addr >= info->handle
540 && addr < (info->handle + info->size)
541 && memrar_is_valid_rar_type(info->type)) {
542 kref_put(&pos->refcount, memrar_release_block_i);
543 result = 0;
544 break;
548 mutex_unlock(&rar->lock);
550 return result;
553 static long memrar_get_stat(struct RAR_stat *r)
555 long result = -EINVAL;
557 if (likely(r != NULL) && memrar_is_valid_rar_type(r->type)) {
558 struct memrar_allocator * const allocator =
559 memrars[r->type].allocator;
561 BUG_ON(allocator == NULL);
564 * Allocator capacity doesn't change over time. No
565 * need to synchronize.
567 r->capacity = allocator->capacity;
569 mutex_lock(&allocator->lock);
571 r->largest_block_size = allocator->largest_free_area;
573 mutex_unlock(&allocator->lock);
575 result = 0;
578 return result;
581 static long memrar_ioctl(struct file *filp,
582 unsigned int cmd,
583 unsigned long arg)
585 void __user *argp = (void __user *)arg;
586 long result = 0;
588 struct RAR_buffer buffer;
589 struct RAR_block_info * const request = &buffer.info;
590 struct RAR_stat rar_info;
591 u32 rar_handle;
593 switch (cmd) {
594 case RAR_HANDLER_RESERVE:
595 if (copy_from_user(request,
596 argp,
597 sizeof(*request)))
598 return -EFAULT;
600 result = memrar_reserve_block(&buffer, filp);
601 if (result != 0)
602 return result;
604 return copy_to_user(argp, request, sizeof(*request));
606 case RAR_HANDLER_RELEASE:
607 if (copy_from_user(&rar_handle,
608 argp,
609 sizeof(rar_handle)))
610 return -EFAULT;
612 return memrar_release_block(rar_handle);
614 case RAR_HANDLER_STAT:
615 if (copy_from_user(&rar_info,
616 argp,
617 sizeof(rar_info)))
618 return -EFAULT;
621 * Populate the RAR_stat structure based on the RAR
622 * type given by the user
624 if (memrar_get_stat(&rar_info) != 0)
625 return -EINVAL;
628 * @todo Do we need to verify destination pointer
629 * "argp" is non-zero? Is that already done by
630 * copy_to_user()?
632 return copy_to_user(argp,
633 &rar_info,
634 sizeof(rar_info)) ? -EFAULT : 0;
636 default:
637 return -ENOTTY;
640 return 0;
643 static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
646 * This mmap() implementation is predominantly useful for
647 * debugging since the CPU will be prevented from accessing
648 * RAR memory by the hardware when RAR is properly locked
649 * down.
651 * In order for this implementation to be useful RAR memory
652 * must be not be locked down. However, we only want to do
653 * that when debugging. DO NOT leave RAR memory unlocked in a
654 * deployed device that utilizes RAR.
657 size_t const size = vma->vm_end - vma->vm_start;
659 /* Users pass the RAR handle as the mmap() offset parameter. */
660 unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
662 struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
664 unsigned long pfn;
666 /* Invalid RAR handle or size passed to mmap(). */
667 if (rar == NULL
668 || handle == 0
669 || size > (handle - (unsigned long) rar->iobase))
670 return -EINVAL;
673 * Retrieve physical address corresponding to the RAR handle,
674 * and convert it to a page frame.
676 pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
679 pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
680 handle,
681 handle + size);
684 * Map RAR memory into user space. This is really only useful
685 * for debugging purposes since the memory won't be
686 * accessible, i.e. reads return zero and writes are ignored,
687 * when RAR access control is enabled.
689 if (remap_pfn_range(vma,
690 vma->vm_start,
691 pfn,
692 size,
693 vma->vm_page_prot))
694 return -EAGAIN;
696 /* vma->vm_ops = &memrar_mem_ops; */
698 return 0;
701 static int memrar_open(struct inode *inode, struct file *filp)
703 /* Nothing to do yet. */
705 return 0;
708 static int memrar_release(struct inode *inode, struct file *filp)
710 /* Free all regions associated with the given file handle. */
712 struct memrar_buffer_info *pos;
713 struct memrar_buffer_info *tmp;
714 int z;
716 for (z = 0; z != MRST_NUM_RAR; ++z) {
717 struct memrar_rar_info * const rar = &memrars[z];
719 mutex_lock(&rar->lock);
721 list_for_each_entry_safe(pos,
722 tmp,
723 &rar->buffers.list,
724 list) {
725 if (filp == pos->owner)
726 kref_put(&pos->refcount,
727 memrar_release_block_i);
730 mutex_unlock(&rar->lock);
733 return 0;
737 * This function is part of the kernel space memrar driver API.
739 size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
741 struct RAR_buffer * const end =
742 (buffers == NULL ? buffers : buffers + count);
743 struct RAR_buffer *i;
745 size_t reserve_count = 0;
747 for (i = buffers; i != end; ++i) {
748 if (memrar_reserve_block(i, NULL) == 0)
749 ++reserve_count;
750 else
751 i->bus_address = 0;
754 return reserve_count;
756 EXPORT_SYMBOL(rar_reserve);
759 * This function is part of the kernel space memrar driver API.
761 size_t rar_release(struct RAR_buffer *buffers, size_t count)
763 struct RAR_buffer * const end =
764 (buffers == NULL ? buffers : buffers + count);
765 struct RAR_buffer *i;
767 size_t release_count = 0;
769 for (i = buffers; i != end; ++i) {
770 u32 * const handle = &i->info.handle;
771 if (memrar_release_block(*handle) == 0) {
773 * @todo We assume we should do this each time
774 * the ref count is decremented. Should
775 * we instead only do this when the ref
776 * count has dropped to zero, and the
777 * buffer has been completely
778 * released/unmapped?
780 *handle = 0;
781 ++release_count;
785 return release_count;
787 EXPORT_SYMBOL(rar_release);
790 * This function is part of the kernel space driver API.
792 size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
794 struct RAR_buffer * const end =
795 (buffers == NULL ? buffers : buffers + count);
796 struct RAR_buffer *i;
797 struct memrar_buffer_info *pos;
799 size_t conversion_count = 0;
802 * Find all bus addresses corresponding to the given handles.
804 * @todo Not liking this nested loop. Optimize.
806 for (i = buffers; i != end; ++i) {
807 struct memrar_rar_info * const rar =
808 memrar_get_rar_info(i->info.handle);
811 * Check if we have a bogus handle, and then continue
812 * with remaining buffers.
814 if (rar == NULL) {
815 i->bus_address = 0;
816 continue;
819 mutex_lock(&rar->lock);
821 list_for_each_entry(pos, &rar->buffers.list, list) {
822 struct RAR_block_info * const user_info =
823 &pos->buffer.info;
826 * Take into account handle offsets that may
827 * have been added to the base handle, such as
828 * in the following scenario:
830 * u32 handle = base + offset;
831 * rar_handle_to_bus(handle);
834 if (i->info.handle >= user_info->handle
835 && i->info.handle < (user_info->handle
836 + user_info->size)) {
837 u32 const offset =
838 i->info.handle - user_info->handle;
840 i->info.type = user_info->type;
841 i->info.size = user_info->size - offset;
842 i->bus_address =
843 pos->buffer.bus_address
844 + offset;
846 /* Increment the reference count. */
847 kref_get(&pos->refcount);
849 ++conversion_count;
850 break;
851 } else {
852 i->bus_address = 0;
856 mutex_unlock(&rar->lock);
859 return conversion_count;
861 EXPORT_SYMBOL(rar_handle_to_bus);
863 static const struct file_operations memrar_fops = {
864 .owner = THIS_MODULE,
865 .unlocked_ioctl = memrar_ioctl,
866 .mmap = memrar_mmap,
867 .open = memrar_open,
868 .release = memrar_release,
871 static struct miscdevice memrar_miscdev = {
872 .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
873 .name = "memrar", /* /dev/memrar */
874 .fops = &memrar_fops
877 static char const banner[] __initdata =
878 KERN_INFO
879 "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
881 static int memrar_registration_callback(void *ctx)
884 * We initialize the RAR parameters early on so that we can
885 * discontinue memrar device initialization and registration
886 * if suitably configured RARs are not available.
888 int result = memrar_init_rar_resources(memrar_miscdev.name);
890 if (result != 0)
891 return result;
893 result = misc_register(&memrar_miscdev);
895 if (result != 0) {
896 pr_err("%s: misc_register() failed.\n",
897 memrar_miscdev.name);
899 /* Clean up resources previously reserved. */
900 memrar_fini_rar_resources();
903 return result;
906 static int __init memrar_init(void)
908 printk(banner);
910 return register_rar(&memrar_registration_callback, 0);
913 static void __exit memrar_exit(void)
915 memrar_fini_rar_resources();
917 misc_deregister(&memrar_miscdev);
921 module_init(memrar_init);
922 module_exit(memrar_exit);
925 MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
926 MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
927 MODULE_LICENSE("GPL");
928 MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
929 MODULE_VERSION(MEMRAR_VER);
934 Local Variables:
935 c-file-style: "linux"
936 End: