coredump: make mm->core_state visible to ->core_dump()
[linux-2.6/mini2440.git] / drivers / xen / xencomm.c
blob797cb4e31f070bbf00d42058aa7814153fe7b4ef
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 * Copyright (C) IBM Corp. 2006
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
21 #include <linux/gfp.h>
22 #include <linux/mm.h>
23 #include <asm/page.h>
24 #include <xen/xencomm.h>
25 #include <xen/interface/xen.h>
26 #ifdef __ia64__
27 #include <asm/xen/xencomm.h> /* for is_kern_addr() */
28 #endif
30 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
31 #include <xen/platform-compat.h>
32 #endif
34 static int xencomm_init(struct xencomm_desc *desc,
35 void *buffer, unsigned long bytes)
37 unsigned long recorded = 0;
38 int i = 0;
40 while ((recorded < bytes) && (i < desc->nr_addrs)) {
41 unsigned long vaddr = (unsigned long)buffer + recorded;
42 unsigned long paddr;
43 int offset;
44 int chunksz;
46 offset = vaddr % PAGE_SIZE; /* handle partial pages */
47 chunksz = min(PAGE_SIZE - offset, bytes - recorded);
49 paddr = xencomm_vtop(vaddr);
50 if (paddr == ~0UL) {
51 printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
52 __func__, vaddr);
53 return -EINVAL;
56 desc->address[i++] = paddr;
57 recorded += chunksz;
60 if (recorded < bytes) {
61 printk(KERN_DEBUG
62 "%s: could only translate %ld of %ld bytes\n",
63 __func__, recorded, bytes);
64 return -ENOSPC;
67 /* mark remaining addresses invalid (just for safety) */
68 while (i < desc->nr_addrs)
69 desc->address[i++] = XENCOMM_INVALID;
71 desc->magic = XENCOMM_MAGIC;
73 return 0;
76 static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
77 void *buffer, unsigned long bytes)
79 struct xencomm_desc *desc;
80 unsigned long buffer_ulong = (unsigned long)buffer;
81 unsigned long start = buffer_ulong & PAGE_MASK;
82 unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
83 unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
84 unsigned long size = sizeof(*desc) +
85 sizeof(desc->address[0]) * nr_addrs;
88 * slab allocator returns at least sizeof(void*) aligned pointer.
89 * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
90 * cross page boundary.
92 if (sizeof(*desc) > sizeof(void *)) {
93 unsigned long order = get_order(size);
94 desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
95 order);
96 if (desc == NULL)
97 return NULL;
99 desc->nr_addrs =
100 ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
101 sizeof(*desc->address);
102 } else {
103 desc = kmalloc(size, gfp_mask);
104 if (desc == NULL)
105 return NULL;
107 desc->nr_addrs = nr_addrs;
109 return desc;
112 void xencomm_free(struct xencomm_handle *desc)
114 if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
115 struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
116 if (sizeof(*desc__) > sizeof(void *)) {
117 unsigned long size = sizeof(*desc__) +
118 sizeof(desc__->address[0]) * desc__->nr_addrs;
119 unsigned long order = get_order(size);
120 free_pages((unsigned long)__va(desc), order);
121 } else
122 kfree(__va(desc));
126 static int xencomm_create(void *buffer, unsigned long bytes,
127 struct xencomm_desc **ret, gfp_t gfp_mask)
129 struct xencomm_desc *desc;
130 int rc;
132 pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
134 if (bytes == 0) {
135 /* don't create a descriptor; Xen recognizes NULL. */
136 BUG_ON(buffer != NULL);
137 *ret = NULL;
138 return 0;
141 BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
143 desc = xencomm_alloc(gfp_mask, buffer, bytes);
144 if (!desc) {
145 printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
146 return -ENOMEM;
149 rc = xencomm_init(desc, buffer, bytes);
150 if (rc) {
151 printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
152 xencomm_free((struct xencomm_handle *)__pa(desc));
153 return rc;
156 *ret = desc;
157 return 0;
160 /* check if memory address is within VMALLOC region */
161 static int is_phys_contiguous(unsigned long addr)
163 if (!is_kernel_addr(addr))
164 return 0;
166 return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
169 static struct xencomm_handle *xencomm_create_inline(void *ptr)
171 unsigned long paddr;
173 BUG_ON(!is_phys_contiguous((unsigned long)ptr));
175 paddr = (unsigned long)xencomm_pa(ptr);
176 BUG_ON(paddr & XENCOMM_INLINE_FLAG);
177 return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
180 /* "mini" routine, for stack-based communications: */
181 static int xencomm_create_mini(void *buffer,
182 unsigned long bytes, struct xencomm_mini *xc_desc,
183 struct xencomm_desc **ret)
185 int rc = 0;
186 struct xencomm_desc *desc;
187 BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
189 desc = (void *)xc_desc;
191 desc->nr_addrs = XENCOMM_MINI_ADDRS;
193 rc = xencomm_init(desc, buffer, bytes);
194 if (!rc)
195 *ret = desc;
197 return rc;
200 struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
202 int rc;
203 struct xencomm_desc *desc;
205 if (is_phys_contiguous((unsigned long)ptr))
206 return xencomm_create_inline(ptr);
208 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
210 if (rc || desc == NULL)
211 return NULL;
213 return xencomm_pa(desc);
216 struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
217 struct xencomm_mini *xc_desc)
219 int rc;
220 struct xencomm_desc *desc = NULL;
222 if (is_phys_contiguous((unsigned long)ptr))
223 return xencomm_create_inline(ptr);
225 rc = xencomm_create_mini(ptr, bytes, xc_desc,
226 &desc);
228 if (rc)
229 return NULL;
231 return xencomm_pa(desc);