Blackfin: use proper wrapper functions for modifying irq status
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / memrar / memrar_handler.c
blobcfcaa8e5b8e619a8a1bfb4baa131c1a0fad49a7e
1 /*
2 * memrar_handler 1.0: An Intel restricted access region handler device
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
21 * -------------------------------------------------------------------
23 * Moorestown restricted access regions (RAR) provide isolated
24 * areas of main memory that are only acceessible by authorized
25 * devices.
27 * The Intel Moorestown RAR handler module exposes a kernel space
28 * RAR memory management mechanism. It is essentially a
29 * RAR-specific allocator.
31 * Besides providing RAR buffer management, the RAR handler also
32 * behaves in many ways like an OS virtual memory manager. For
33 * example, the RAR "handles" created by the RAR handler are
34 * analogous to user space virtual addresses.
36 * RAR memory itself is never accessed directly by the RAR
37 * handler.
40 #include <linux/miscdevice.h>
41 #include <linux/fs.h>
42 #include <linux/slab.h>
43 #include <linux/kref.h>
44 #include <linux/mutex.h>
45 #include <linux/kernel.h>
46 #include <linux/uaccess.h>
47 #include <linux/mm.h>
48 #include <linux/ioport.h>
49 #include <linux/io.h>
50 #include <linux/rar_register.h>
52 #include "memrar.h"
53 #include "memrar_allocator.h"
56 #define MEMRAR_VER "1.0"
59 * Moorestown supports three restricted access regions.
61 * We only care about the first two, video and audio. The third,
62 * reserved for Chaabi and the P-unit, will be handled by their
63 * respective drivers.
65 #define MRST_NUM_RAR 2
67 /* ---------------- -------------------- ------------------- */
69 /**
70 * struct memrar_buffer_info - struct that keeps track of all RAR buffers
71 * @list: Linked list of memrar_buffer_info objects.
72 * @buffer: Core RAR buffer information.
73 * @refcount: Reference count.
74 * @owner: File handle corresponding to process that reserved the
75 * block of memory in RAR. This will be zero for buffers
76 * allocated by other drivers instead of by a user space
77 * process.
79 * This structure encapsulates a link list of RAR buffers, as well as
80 * other characteristics specific to a given list node, such as the
81 * reference count on the corresponding RAR buffer.
83 struct memrar_buffer_info {
84 struct list_head list;
85 struct RAR_buffer buffer;
86 struct kref refcount;
87 struct file *owner;
90 /**
91 * struct memrar_rar_info - characteristics of a given RAR
92 * @base: Base bus address of the RAR.
93 * @length: Length of the RAR.
94 * @iobase: Virtual address of RAR mapped into kernel.
95 * @allocator: Allocator associated with the RAR. Note the allocator
96 * "capacity" may be smaller than the RAR length if the
97 * length is not a multiple of the configured allocator
98 * block size.
99 * @buffers: Table that keeps track of all reserved RAR buffers.
100 * @lock: Lock used to synchronize access to RAR-specific data
101 * structures.
103 * Each RAR has an associated memrar_rar_info structure that describes
104 * where in memory the RAR is located, how large it is, and a list of
105 * reserved RAR buffers inside that RAR. Each RAR also has a mutex
106 * associated with it to reduce lock contention when operations on
107 * multiple RARs are performed in parallel.
109 struct memrar_rar_info {
110 dma_addr_t base;
111 unsigned long length;
112 void __iomem *iobase;
113 struct memrar_allocator *allocator;
114 struct memrar_buffer_info buffers;
115 struct mutex lock;
116 int allocated; /* True if we own this RAR */
120 * Array of RAR characteristics.
122 static struct memrar_rar_info memrars[MRST_NUM_RAR];
124 /* ---------------- -------------------- ------------------- */
126 /* Validate RAR type. */
127 static inline int memrar_is_valid_rar_type(u32 type)
129 return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
132 /* Check if an address/handle falls with the given RAR memory range. */
133 static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
134 u32 vaddr)
136 unsigned long const iobase = (unsigned long) (rar->iobase);
137 return (vaddr >= iobase && vaddr < iobase + rar->length);
140 /* Retrieve RAR information associated with the given handle. */
141 static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
143 int i;
144 for (i = 0; i < MRST_NUM_RAR; ++i) {
145 struct memrar_rar_info * const rar = &memrars[i];
146 if (memrar_handle_in_range(rar, vaddr))
147 return rar;
150 return NULL;
154 * memrar_get_bus address - handle to bus address
156 * Retrieve bus address from given handle.
158 * Returns address corresponding to given handle. Zero if handle is
159 * invalid.
161 static dma_addr_t memrar_get_bus_address(
162 struct memrar_rar_info *rar,
163 u32 vaddr)
165 unsigned long const iobase = (unsigned long) (rar->iobase);
167 if (!memrar_handle_in_range(rar, vaddr))
168 return 0;
171 * An assumption is made that the virtual address offset is
172 * the same as the bus address offset, at least based on the
173 * way this driver is implemented. For example, vaddr + 2 ==
174 * baddr + 2.
176 * @todo Is that a valid assumption?
178 return rar->base + (vaddr - iobase);
182 * memrar_get_physical_address - handle to physical address
184 * Retrieve physical address from given handle.
186 * Returns address corresponding to given handle. Zero if handle is
187 * invalid.
189 static dma_addr_t memrar_get_physical_address(
190 struct memrar_rar_info *rar,
191 u32 vaddr)
194 * @todo This assumes that the bus address and physical
195 * address are the same. That is true for Moorestown
196 * but not necessarily on other platforms. This
197 * deficiency should be addressed at some point.
199 return memrar_get_bus_address(rar, vaddr);
203 * memrar_release_block - release a block to the pool
204 * @kref: kref of block
206 * Core block release code. A node has hit zero references so can
207 * be released and the lists must be updated.
209 * Note: This code removes the node from a list. Make sure any list
210 * iteration is performed using list_for_each_safe().
212 static void memrar_release_block_i(struct kref *ref)
215 * Last reference is being released. Remove from the table,
216 * and reclaim resources.
219 struct memrar_buffer_info * const node =
220 container_of(ref, struct memrar_buffer_info, refcount);
222 struct RAR_block_info * const user_info =
223 &node->buffer.info;
225 struct memrar_allocator * const allocator =
226 memrars[user_info->type].allocator;
228 list_del(&node->list);
230 memrar_allocator_free(allocator, user_info->handle);
232 kfree(node);
236 * memrar_init_rar_resources - configure a RAR
237 * @rarnum: rar that has been allocated
238 * @devname: name of our device
240 * Initialize RAR parameters, such as bus addresses, etc and make
241 * the resource accessible.
243 static int memrar_init_rar_resources(int rarnum, char const *devname)
245 /* ---- Sanity Checks ----
246 * 1. RAR bus addresses in both Lincroft and Langwell RAR
247 * registers should be the same.
248 * a. There's no way we can do this through IA.
250 * 2. Secure device ID in Langwell RAR registers should be set
251 * appropriately, e.g. only LPE DMA for the audio RAR, and
252 * security for the other Langwell based RAR registers.
253 * a. There's no way we can do this through IA.
255 * 3. Audio and video RAR registers and RAR access should be
256 * locked down. If not, enable RAR access control. Except
257 * for debugging purposes, there is no reason for them to
258 * be unlocked.
259 * a. We can only do this for the Lincroft (IA) side.
261 * @todo Should the RAR handler driver even be aware of audio
262 * and video RAR settings?
266 * RAR buffer block size.
268 * We choose it to be the size of a page to simplify the
269 * /dev/memrar mmap() implementation and usage. Otherwise
270 * paging is not involved once an RAR is locked down.
272 static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
274 dma_addr_t low, high;
275 struct memrar_rar_info * const rar = &memrars[rarnum];
277 BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
278 BUG_ON(!memrar_is_valid_rar_type(rarnum));
279 BUG_ON(rar->allocated);
281 if (rar_get_address(rarnum, &low, &high) != 0)
282 /* No RAR is available. */
283 return -ENODEV;
285 if (low == 0 || high == 0) {
286 rar->base = 0;
287 rar->length = 0;
288 rar->iobase = NULL;
289 rar->allocator = NULL;
290 return -ENOSPC;
294 * @todo Verify that LNC and LNW RAR register contents
295 * addresses, security, etc are compatible and
296 * consistent).
299 rar->length = high - low + 1;
301 /* Claim RAR memory as our own. */
302 if (request_mem_region(low, rar->length, devname) == NULL) {
303 rar->length = 0;
304 pr_err("%s: Unable to claim RAR[%d] memory.\n",
305 devname, rarnum);
306 pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
307 return -EBUSY;
310 rar->base = low;
313 * Now map it into the kernel address space.
315 * Note that the RAR memory may only be accessed by IA
316 * when debugging. Otherwise attempts to access the
317 * RAR memory when it is locked down will result in
318 * behavior similar to writing to /dev/null and
319 * reading from /dev/zero. This behavior is enforced
320 * by the hardware. Even if we don't access the
321 * memory, mapping it into the kernel provides us with
322 * a convenient RAR handle to bus address mapping.
324 rar->iobase = ioremap_nocache(rar->base, rar->length);
325 if (rar->iobase == NULL) {
326 pr_err("%s: Unable to map RAR memory.\n", devname);
327 release_mem_region(low, rar->length);
328 return -ENOMEM;
331 /* Initialize corresponding memory allocator. */
332 rar->allocator = memrar_create_allocator((unsigned long) rar->iobase,
333 rar->length, RAR_BLOCK_SIZE);
334 if (rar->allocator == NULL) {
335 iounmap(rar->iobase);
336 release_mem_region(low, rar->length);
337 return -ENOMEM;
340 pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
341 devname, rarnum, (unsigned long) low, (unsigned long) high);
343 pr_info("%s: BRAR[%d] size = %zu KiB\n",
344 devname, rarnum, rar->allocator->capacity / 1024);
346 rar->allocated = 1;
347 return 0;
351 * memrar_fini_rar_resources - free up RAR resources
353 * Finalize RAR resources. Free up the resource tables, hand the memory
354 * back to the kernel, unmap the device and release the address space.
356 static void memrar_fini_rar_resources(void)
358 int z;
359 struct memrar_buffer_info *pos;
360 struct memrar_buffer_info *tmp;
363 * @todo Do we need to hold a lock at this point in time?
364 * (module initialization failure or exit?)
367 for (z = MRST_NUM_RAR; z-- != 0; ) {
368 struct memrar_rar_info * const rar = &memrars[z];
370 if (!rar->allocated)
371 continue;
373 /* Clean up remaining resources. */
375 list_for_each_entry_safe(pos,
376 tmp,
377 &rar->buffers.list,
378 list) {
379 kref_put(&pos->refcount, memrar_release_block_i);
382 memrar_destroy_allocator(rar->allocator);
383 rar->allocator = NULL;
385 iounmap(rar->iobase);
386 release_mem_region(rar->base, rar->length);
388 rar->iobase = NULL;
389 rar->base = 0;
390 rar->length = 0;
392 unregister_rar(z);
397 * memrar_reserve_block - handle an allocation request
398 * @request: block being requested
399 * @filp: owner it is tied to
401 * Allocate a block of the requested RAR. If successful return the
402 * request object filled in and zero, if not report an error code
405 static long memrar_reserve_block(struct RAR_buffer *request,
406 struct file *filp)
408 struct RAR_block_info * const rinfo = &request->info;
409 struct RAR_buffer *buffer;
410 struct memrar_buffer_info *buffer_info;
411 u32 handle;
412 struct memrar_rar_info *rar = NULL;
414 /* Prevent array overflow. */
415 if (!memrar_is_valid_rar_type(rinfo->type))
416 return -EINVAL;
418 rar = &memrars[rinfo->type];
419 if (!rar->allocated)
420 return -ENODEV;
422 /* Reserve memory in RAR. */
423 handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
424 if (handle == 0)
425 return -ENOMEM;
427 buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
429 if (buffer_info == NULL) {
430 memrar_allocator_free(rar->allocator, handle);
431 return -ENOMEM;
434 buffer = &buffer_info->buffer;
435 buffer->info.type = rinfo->type;
436 buffer->info.size = rinfo->size;
438 /* Memory handle corresponding to the bus address. */
439 buffer->info.handle = handle;
440 buffer->bus_address = memrar_get_bus_address(rar, handle);
443 * Keep track of owner so that we can later cleanup if
444 * necessary.
446 buffer_info->owner = filp;
448 kref_init(&buffer_info->refcount);
450 mutex_lock(&rar->lock);
451 list_add(&buffer_info->list, &rar->buffers.list);
452 mutex_unlock(&rar->lock);
454 rinfo->handle = buffer->info.handle;
455 request->bus_address = buffer->bus_address;
457 return 0;
461 * memrar_release_block - release a RAR block
462 * @addr: address in RAR space
464 * Release a previously allocated block. Releases act on complete
465 * blocks, partially freeing a block is not supported
468 static long memrar_release_block(u32 addr)
470 struct memrar_buffer_info *pos;
471 struct memrar_buffer_info *tmp;
472 struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
473 long result = -EINVAL;
475 if (rar == NULL)
476 return -ENOENT;
478 mutex_lock(&rar->lock);
481 * Iterate through the buffer list to find the corresponding
482 * buffer to be released.
484 list_for_each_entry_safe(pos,
485 tmp,
486 &rar->buffers.list,
487 list) {
488 struct RAR_block_info * const info =
489 &pos->buffer.info;
492 * Take into account handle offsets that may have been
493 * added to the base handle, such as in the following
494 * scenario:
496 * u32 handle = base + offset;
497 * rar_handle_to_bus(handle);
498 * rar_release(handle);
500 if (addr >= info->handle
501 && addr < (info->handle + info->size)
502 && memrar_is_valid_rar_type(info->type)) {
503 kref_put(&pos->refcount, memrar_release_block_i);
504 result = 0;
505 break;
509 mutex_unlock(&rar->lock);
511 return result;
515 * memrar_get_stats - read statistics for a RAR
516 * @r: statistics to be filled in
518 * Returns the statistics data for the RAR, or an error code if
519 * the request cannot be completed
521 static long memrar_get_stat(struct RAR_stat *r)
523 struct memrar_allocator *allocator;
525 if (!memrar_is_valid_rar_type(r->type))
526 return -EINVAL;
528 if (!memrars[r->type].allocated)
529 return -ENODEV;
531 allocator = memrars[r->type].allocator;
533 BUG_ON(allocator == NULL);
536 * Allocator capacity doesn't change over time. No
537 * need to synchronize.
539 r->capacity = allocator->capacity;
541 mutex_lock(&allocator->lock);
542 r->largest_block_size = allocator->largest_free_area;
543 mutex_unlock(&allocator->lock);
544 return 0;
548 * memrar_ioctl - ioctl callback
549 * @filp: file issuing the request
550 * @cmd: command
551 * @arg: pointer to control information
553 * Perform one of the ioctls supported by the memrar device
556 static long memrar_ioctl(struct file *filp,
557 unsigned int cmd,
558 unsigned long arg)
560 void __user *argp = (void __user *)arg;
561 long result = 0;
563 struct RAR_buffer buffer;
564 struct RAR_block_info * const request = &buffer.info;
565 struct RAR_stat rar_info;
566 u32 rar_handle;
568 switch (cmd) {
569 case RAR_HANDLER_RESERVE:
570 if (copy_from_user(request,
571 argp,
572 sizeof(*request)))
573 return -EFAULT;
575 result = memrar_reserve_block(&buffer, filp);
576 if (result != 0)
577 return result;
579 return copy_to_user(argp, request, sizeof(*request));
581 case RAR_HANDLER_RELEASE:
582 if (copy_from_user(&rar_handle,
583 argp,
584 sizeof(rar_handle)))
585 return -EFAULT;
587 return memrar_release_block(rar_handle);
589 case RAR_HANDLER_STAT:
590 if (copy_from_user(&rar_info,
591 argp,
592 sizeof(rar_info)))
593 return -EFAULT;
596 * Populate the RAR_stat structure based on the RAR
597 * type given by the user
599 if (memrar_get_stat(&rar_info) != 0)
600 return -EINVAL;
603 * @todo Do we need to verify destination pointer
604 * "argp" is non-zero? Is that already done by
605 * copy_to_user()?
607 return copy_to_user(argp,
608 &rar_info,
609 sizeof(rar_info)) ? -EFAULT : 0;
611 default:
612 return -ENOTTY;
615 return 0;
619 * memrar_mmap - mmap helper for deubgging
620 * @filp: handle doing the mapping
621 * @vma: memory area
623 * Support the mmap operation on the RAR space for debugging systems
624 * when the memory is not locked down.
627 static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
630 * This mmap() implementation is predominantly useful for
631 * debugging since the CPU will be prevented from accessing
632 * RAR memory by the hardware when RAR is properly locked
633 * down.
635 * In order for this implementation to be useful RAR memory
636 * must be not be locked down. However, we only want to do
637 * that when debugging. DO NOT leave RAR memory unlocked in a
638 * deployed device that utilizes RAR.
641 size_t const size = vma->vm_end - vma->vm_start;
643 /* Users pass the RAR handle as the mmap() offset parameter. */
644 unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
646 struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
647 unsigned long pfn;
649 /* Only allow priviledged apps to go poking around this way */
650 if (!capable(CAP_SYS_RAWIO))
651 return -EPERM;
653 /* Invalid RAR handle or size passed to mmap(). */
654 if (rar == NULL
655 || handle == 0
656 || size > (handle - (unsigned long) rar->iobase))
657 return -EINVAL;
660 * Retrieve physical address corresponding to the RAR handle,
661 * and convert it to a page frame.
663 pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
666 pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
667 handle,
668 handle + size);
671 * Map RAR memory into user space. This is really only useful
672 * for debugging purposes since the memory won't be
673 * accessible, i.e. reads return zero and writes are ignored,
674 * when RAR access control is enabled.
676 if (remap_pfn_range(vma,
677 vma->vm_start,
678 pfn,
679 size,
680 vma->vm_page_prot))
681 return -EAGAIN;
683 /* vma->vm_ops = &memrar_mem_ops; */
685 return 0;
689 * memrar_open - device open method
690 * @inode: inode to open
691 * @filp: file handle
693 * As we support multiple arbitary opens there is no work to be done
694 * really.
697 static int memrar_open(struct inode *inode, struct file *filp)
699 nonseekable_open(inode, filp);
700 return 0;
704 * memrar_release - close method for miscev
705 * @inode: inode of device
706 * @filp: handle that is going away
708 * Free up all the regions that belong to this file handle. We use
709 * the handle as a natural Linux style 'lifetime' indicator and to
710 * ensure resources are not leaked when their owner explodes in an
711 * unplanned fashion.
714 static int memrar_release(struct inode *inode, struct file *filp)
716 /* Free all regions associated with the given file handle. */
718 struct memrar_buffer_info *pos;
719 struct memrar_buffer_info *tmp;
720 int z;
722 for (z = 0; z != MRST_NUM_RAR; ++z) {
723 struct memrar_rar_info * const rar = &memrars[z];
725 mutex_lock(&rar->lock);
727 list_for_each_entry_safe(pos,
728 tmp,
729 &rar->buffers.list,
730 list) {
731 if (filp == pos->owner)
732 kref_put(&pos->refcount,
733 memrar_release_block_i);
736 mutex_unlock(&rar->lock);
739 return 0;
743 * rar_reserve - reserve RAR memory
744 * @buffers: buffers to reserve
745 * @count: number wanted
747 * Reserve a series of buffers in the RAR space. Returns the number of
748 * buffers successfully allocated
751 size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
753 struct RAR_buffer * const end =
754 (buffers == NULL ? buffers : buffers + count);
755 struct RAR_buffer *i;
757 size_t reserve_count = 0;
759 for (i = buffers; i != end; ++i) {
760 if (memrar_reserve_block(i, NULL) == 0)
761 ++reserve_count;
762 else
763 i->bus_address = 0;
766 return reserve_count;
768 EXPORT_SYMBOL(rar_reserve);
771 * rar_release - return RAR buffers
772 * @buffers: buffers to release
773 * @size: size of released block
775 * Return a set of buffers to the RAR pool
778 size_t rar_release(struct RAR_buffer *buffers, size_t count)
780 struct RAR_buffer * const end =
781 (buffers == NULL ? buffers : buffers + count);
782 struct RAR_buffer *i;
784 size_t release_count = 0;
786 for (i = buffers; i != end; ++i) {
787 u32 * const handle = &i->info.handle;
788 if (memrar_release_block(*handle) == 0) {
790 * @todo We assume we should do this each time
791 * the ref count is decremented. Should
792 * we instead only do this when the ref
793 * count has dropped to zero, and the
794 * buffer has been completely
795 * released/unmapped?
797 *handle = 0;
798 ++release_count;
802 return release_count;
804 EXPORT_SYMBOL(rar_release);
807 * rar_handle_to_bus - RAR to bus address
808 * @buffers: RAR buffer structure
809 * @count: number of buffers to convert
811 * Turn a list of RAR handle mappings into actual bus addresses. Note
812 * that when the device is locked down the bus addresses in question
813 * are not CPU accessible.
816 size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
818 struct RAR_buffer * const end =
819 (buffers == NULL ? buffers : buffers + count);
820 struct RAR_buffer *i;
821 struct memrar_buffer_info *pos;
823 size_t conversion_count = 0;
826 * Find all bus addresses corresponding to the given handles.
828 * @todo Not liking this nested loop. Optimize.
830 for (i = buffers; i != end; ++i) {
831 struct memrar_rar_info * const rar =
832 memrar_get_rar_info(i->info.handle);
835 * Check if we have a bogus handle, and then continue
836 * with remaining buffers.
838 if (rar == NULL) {
839 i->bus_address = 0;
840 continue;
843 mutex_lock(&rar->lock);
845 list_for_each_entry(pos, &rar->buffers.list, list) {
846 struct RAR_block_info * const user_info =
847 &pos->buffer.info;
850 * Take into account handle offsets that may
851 * have been added to the base handle, such as
852 * in the following scenario:
854 * u32 handle = base + offset;
855 * rar_handle_to_bus(handle);
858 if (i->info.handle >= user_info->handle
859 && i->info.handle < (user_info->handle
860 + user_info->size)) {
861 u32 const offset =
862 i->info.handle - user_info->handle;
864 i->info.type = user_info->type;
865 i->info.size = user_info->size - offset;
866 i->bus_address =
867 pos->buffer.bus_address
868 + offset;
870 /* Increment the reference count. */
871 kref_get(&pos->refcount);
873 ++conversion_count;
874 break;
875 } else {
876 i->bus_address = 0;
880 mutex_unlock(&rar->lock);
883 return conversion_count;
885 EXPORT_SYMBOL(rar_handle_to_bus);
887 static const struct file_operations memrar_fops = {
888 .owner = THIS_MODULE,
889 .unlocked_ioctl = memrar_ioctl,
890 .mmap = memrar_mmap,
891 .open = memrar_open,
892 .release = memrar_release,
893 .llseek = no_llseek,
896 static struct miscdevice memrar_miscdev = {
897 .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
898 .name = "memrar", /* /dev/memrar */
899 .fops = &memrar_fops
902 static char const banner[] __initdata =
903 KERN_INFO
904 "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
907 * memrar_registration_callback - RAR obtained
908 * @rar: RAR number
910 * We have been granted ownership of the RAR. Add it to our memory
911 * management tables
914 static int memrar_registration_callback(unsigned long rar)
917 * We initialize the RAR parameters early on so that we can
918 * discontinue memrar device initialization and registration
919 * if suitably configured RARs are not available.
921 return memrar_init_rar_resources(rar, memrar_miscdev.name);
925 * memrar_init - initialise RAR support
927 * Initialise support for RAR handlers. This may get loaded before
928 * the RAR support is activated, but the callbacks on the registration
929 * will handle that situation for us anyway.
932 static int __init memrar_init(void)
934 int err;
935 int i;
937 printk(banner);
940 * Some delayed initialization is performed in this driver.
941 * Make sure resources that are used during driver clean-up
942 * (e.g. during driver's release() function) are fully
943 * initialized before first use. This is particularly
944 * important for the case when the delayed initialization
945 * isn't completed, leaving behind a partially initialized
946 * driver.
948 * Such a scenario can occur when RAR is not available on the
949 * platform, and the driver is release()d.
951 for (i = 0; i != ARRAY_SIZE(memrars); ++i) {
952 struct memrar_rar_info * const rar = &memrars[i];
953 mutex_init(&rar->lock);
954 INIT_LIST_HEAD(&rar->buffers.list);
957 err = misc_register(&memrar_miscdev);
958 if (err)
959 return err;
961 /* Now claim the two RARs we want */
962 err = register_rar(0, memrar_registration_callback, 0);
963 if (err)
964 goto fail;
966 err = register_rar(1, memrar_registration_callback, 1);
967 if (err == 0)
968 return 0;
970 /* It is possible rar 0 registered and allocated resources then rar 1
971 failed so do a full resource free */
972 memrar_fini_rar_resources();
973 fail:
974 misc_deregister(&memrar_miscdev);
975 return err;
979 * memrar_exit - unregister and unload
981 * Unregister the device and then unload any mappings and release
982 * the RAR resources
985 static void __exit memrar_exit(void)
987 misc_deregister(&memrar_miscdev);
988 memrar_fini_rar_resources();
992 module_init(memrar_init);
993 module_exit(memrar_exit);
996 MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
997 MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
998 MODULE_LICENSE("GPL");
999 MODULE_VERSION(MEMRAR_VER);
1004 Local Variables:
1005 c-file-style: "linux"
1006 End: