GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / xen / xenbus / xenbus_client.c
blob7e49527189b6c4fdad40ec817c9086a22c6175ed
1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
6 * Copyright (C) 2005 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <asm/xen/hypervisor.h>
37 #include <xen/interface/xen.h>
38 #include <xen/interface/event_channel.h>
39 #include <xen/events.h>
40 #include <xen/grant_table.h>
41 #include <xen/xenbus.h>
43 const char *xenbus_strstate(enum xenbus_state state)
45 static const char *const name[] = {
46 [ XenbusStateUnknown ] = "Unknown",
47 [ XenbusStateInitialising ] = "Initialising",
48 [ XenbusStateInitWait ] = "InitWait",
49 [ XenbusStateInitialised ] = "Initialised",
50 [ XenbusStateConnected ] = "Connected",
51 [ XenbusStateClosing ] = "Closing",
52 [ XenbusStateClosed ] = "Closed",
54 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
56 EXPORT_SYMBOL_GPL(xenbus_strstate);
58 /**
59 * xenbus_watch_path - register a watch
60 * @dev: xenbus device
61 * @path: path to watch
62 * @watch: watch to register
63 * @callback: callback to register
65 * Register a @watch on the given path, using the given xenbus_watch structure
66 * for storage, and the given @callback function as the callback. Return 0 on
67 * success, or -errno on error. On success, the given @path will be saved as
68 * @watch->node, and remains the caller's to free. On error, @watch->node will
69 * be NULL, the device will switch to %XenbusStateClosing, and the error will
70 * be saved in the store.
72 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
73 struct xenbus_watch *watch,
74 void (*callback)(struct xenbus_watch *,
75 const char **, unsigned int))
77 int err;
79 watch->node = path;
80 watch->callback = callback;
82 err = register_xenbus_watch(watch);
84 if (err) {
85 watch->node = NULL;
86 watch->callback = NULL;
87 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
90 return err;
92 EXPORT_SYMBOL_GPL(xenbus_watch_path);
95 /**
96 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
97 * @dev: xenbus device
98 * @watch: watch to register
99 * @callback: callback to register
100 * @pathfmt: format of path to watch
102 * Register a watch on the given @path, using the given xenbus_watch
103 * structure for storage, and the given @callback function as the callback.
104 * Return 0 on success, or -errno on error. On success, the watched path
105 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
106 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
107 * free, the device will switch to %XenbusStateClosing, and the error will be
108 * saved in the store.
110 int xenbus_watch_pathfmt(struct xenbus_device *dev,
111 struct xenbus_watch *watch,
112 void (*callback)(struct xenbus_watch *,
113 const char **, unsigned int),
114 const char *pathfmt, ...)
116 int err;
117 va_list ap;
118 char *path;
120 va_start(ap, pathfmt);
121 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
122 va_end(ap);
124 if (!path) {
125 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
126 return -ENOMEM;
128 err = xenbus_watch_path(dev, path, watch, callback);
130 if (err)
131 kfree(path);
132 return err;
134 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
136 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
137 const char *, ...);
139 static int
140 __xenbus_switch_state(struct xenbus_device *dev,
141 enum xenbus_state state, int depth)
143 /* We check whether the state is currently set to the given value, and
144 if not, then the state is set. We don't want to unconditionally
145 write the given state, because we don't want to fire watches
146 unnecessarily. Furthermore, if the node has gone, we don't write
147 to it, as the device will be tearing down, and we don't want to
148 resurrect that directory.
150 Note that, because of this cached value of our state, this
151 function will not take a caller's Xenstore transaction
152 (something it was trying to in the past) because dev->state
153 would not get reset if the transaction was aborted.
156 struct xenbus_transaction xbt;
157 int current_state;
158 int err, abort;
160 if (state == dev->state)
161 return 0;
163 again:
164 abort = 1;
166 err = xenbus_transaction_start(&xbt);
167 if (err) {
168 xenbus_switch_fatal(dev, depth, err, "starting transaction");
169 return 0;
172 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
173 if (err != 1)
174 goto abort;
176 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
177 if (err) {
178 xenbus_switch_fatal(dev, depth, err, "writing new state");
179 goto abort;
182 abort = 0;
183 abort:
184 err = xenbus_transaction_end(xbt, abort);
185 if (err) {
186 if (err == -EAGAIN && !abort)
187 goto again;
188 xenbus_switch_fatal(dev, depth, err, "ending transaction");
189 } else
190 dev->state = state;
192 return 0;
196 * xenbus_switch_state
197 * @dev: xenbus device
198 * @state: new state
200 * Advertise in the store a change of the given driver to the given new_state.
201 * Return 0 on success, or -errno on error. On error, the device will switch
202 * to XenbusStateClosing, and the error will be saved in the store.
204 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
206 return __xenbus_switch_state(dev, state, 0);
209 EXPORT_SYMBOL_GPL(xenbus_switch_state);
211 int xenbus_frontend_closed(struct xenbus_device *dev)
213 xenbus_switch_state(dev, XenbusStateClosed);
214 complete(&dev->down);
215 return 0;
217 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
220 * Return the path to the error node for the given device, or NULL on failure.
221 * If the value returned is non-NULL, then it is the caller's to kfree.
223 static char *error_path(struct xenbus_device *dev)
225 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
229 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
230 const char *fmt, va_list ap)
232 int ret;
233 unsigned int len;
234 char *printf_buffer = NULL;
235 char *path_buffer = NULL;
237 #define PRINTF_BUFFER_SIZE 4096
238 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
239 if (printf_buffer == NULL)
240 goto fail;
242 len = sprintf(printf_buffer, "%i ", -err);
243 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
245 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
247 dev_err(&dev->dev, "%s\n", printf_buffer);
249 path_buffer = error_path(dev);
251 if (path_buffer == NULL) {
252 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
253 dev->nodename, printf_buffer);
254 goto fail;
257 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
258 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
259 dev->nodename, printf_buffer);
260 goto fail;
263 fail:
264 kfree(printf_buffer);
265 kfree(path_buffer);
270 * xenbus_dev_error
271 * @dev: xenbus device
272 * @err: error to report
273 * @fmt: error message format
275 * Report the given negative errno into the store, along with the given
276 * formatted message.
278 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
280 va_list ap;
282 va_start(ap, fmt);
283 xenbus_va_dev_error(dev, err, fmt, ap);
284 va_end(ap);
286 EXPORT_SYMBOL_GPL(xenbus_dev_error);
289 * xenbus_dev_fatal
290 * @dev: xenbus device
291 * @err: error to report
292 * @fmt: error message format
294 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
295 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
296 * closedown of this driver and its peer.
299 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
301 va_list ap;
303 va_start(ap, fmt);
304 xenbus_va_dev_error(dev, err, fmt, ap);
305 va_end(ap);
307 xenbus_switch_state(dev, XenbusStateClosing);
309 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
312 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
313 * avoiding recursion within xenbus_switch_state.
315 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
316 const char *fmt, ...)
318 va_list ap;
320 va_start(ap, fmt);
321 xenbus_va_dev_error(dev, err, fmt, ap);
322 va_end(ap);
324 if (!depth)
325 __xenbus_switch_state(dev, XenbusStateClosing, 1);
329 * xenbus_grant_ring
330 * @dev: xenbus device
331 * @ring_mfn: mfn of ring to grant
333 * Grant access to the given @ring_mfn to the peer of the given device. Return
334 * 0 on success, or -errno on error. On error, the device will switch to
335 * XenbusStateClosing, and the error will be saved in the store.
337 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
339 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
340 if (err < 0)
341 xenbus_dev_fatal(dev, err, "granting access to ring page");
342 return err;
344 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
348 * Allocate an event channel for the given xenbus_device, assigning the newly
349 * created local port to *port. Return 0 on success, or -errno on error. On
350 * error, the device will switch to XenbusStateClosing, and the error will be
351 * saved in the store.
353 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
355 struct evtchn_alloc_unbound alloc_unbound;
356 int err;
358 alloc_unbound.dom = DOMID_SELF;
359 alloc_unbound.remote_dom = dev->otherend_id;
361 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
362 &alloc_unbound);
363 if (err)
364 xenbus_dev_fatal(dev, err, "allocating event channel");
365 else
366 *port = alloc_unbound.port;
368 return err;
370 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
374 * Bind to an existing interdomain event channel in another domain. Returns 0
375 * on success and stores the local port in *port. On error, returns -errno,
376 * switches the device to XenbusStateClosing, and saves the error in XenStore.
378 int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
380 struct evtchn_bind_interdomain bind_interdomain;
381 int err;
383 bind_interdomain.remote_dom = dev->otherend_id;
384 bind_interdomain.remote_port = remote_port;
386 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
387 &bind_interdomain);
388 if (err)
389 xenbus_dev_fatal(dev, err,
390 "binding to event channel %d from domain %d",
391 remote_port, dev->otherend_id);
392 else
393 *port = bind_interdomain.local_port;
395 return err;
397 EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
401 * Free an existing event channel. Returns 0 on success or -errno on error.
403 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
405 struct evtchn_close close;
406 int err;
408 close.port = port;
410 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
411 if (err)
412 xenbus_dev_error(dev, err, "freeing event channel %d", port);
414 return err;
416 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
420 * xenbus_map_ring_valloc
421 * @dev: xenbus device
422 * @gnt_ref: grant reference
423 * @vaddr: pointer to address to be filled out by mapping
425 * Based on Rusty Russell's skeleton driver's map_page.
426 * Map a page of memory into this domain from another domain's grant table.
427 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
428 * page to that address, and sets *vaddr to that address.
429 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
430 * or -ENOMEM on error. If an error is returned, device will switch to
431 * XenbusStateClosing and the error message will be saved in XenStore.
433 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
435 struct gnttab_map_grant_ref op = {
436 .flags = GNTMAP_host_map,
437 .ref = gnt_ref,
438 .dom = dev->otherend_id,
440 struct vm_struct *area;
442 *vaddr = NULL;
444 area = xen_alloc_vm_area(PAGE_SIZE);
445 if (!area)
446 return -ENOMEM;
448 op.host_addr = (unsigned long)area->addr;
450 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
451 BUG();
453 if (op.status != GNTST_okay) {
454 xen_free_vm_area(area);
455 xenbus_dev_fatal(dev, op.status,
456 "mapping in shared page %d from domain %d",
457 gnt_ref, dev->otherend_id);
458 return op.status;
461 /* Stuff the handle in an unused field */
462 area->phys_addr = (unsigned long)op.handle;
464 *vaddr = area->addr;
465 return 0;
467 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
471 * xenbus_map_ring
472 * @dev: xenbus device
473 * @gnt_ref: grant reference
474 * @handle: pointer to grant handle to be filled
475 * @vaddr: address to be mapped to
477 * Map a page of memory into this domain from another domain's grant table.
478 * xenbus_map_ring does not allocate the virtual address space (you must do
479 * this yourself!). It only maps in the page to the specified address.
480 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
481 * or -ENOMEM on error. If an error is returned, device will switch to
482 * XenbusStateClosing and the error message will be saved in XenStore.
484 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
485 grant_handle_t *handle, void *vaddr)
487 struct gnttab_map_grant_ref op = {
488 .host_addr = (unsigned long)vaddr,
489 .flags = GNTMAP_host_map,
490 .ref = gnt_ref,
491 .dom = dev->otherend_id,
494 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
495 BUG();
497 if (op.status != GNTST_okay) {
498 xenbus_dev_fatal(dev, op.status,
499 "mapping in shared page %d from domain %d",
500 gnt_ref, dev->otherend_id);
501 } else
502 *handle = op.handle;
504 return op.status;
506 EXPORT_SYMBOL_GPL(xenbus_map_ring);
510 * xenbus_unmap_ring_vfree
511 * @dev: xenbus device
512 * @vaddr: addr to unmap
514 * Based on Rusty Russell's skeleton driver's unmap_page.
515 * Unmap a page of memory in this domain that was imported from another domain.
516 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
517 * xenbus_map_ring_valloc (it will free the virtual address space).
518 * Returns 0 on success and returns GNTST_* on error
519 * (see xen/include/interface/grant_table.h).
521 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
523 struct vm_struct *area;
524 struct gnttab_unmap_grant_ref op = {
525 .host_addr = (unsigned long)vaddr,
528 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
529 * method so that we don't have to muck with vmalloc internals here.
530 * We could force the user to hang on to their struct vm_struct from
531 * xenbus_map_ring_valloc, but these 6 lines considerably simplify
532 * this API.
534 read_lock(&vmlist_lock);
535 for (area = vmlist; area != NULL; area = area->next) {
536 if (area->addr == vaddr)
537 break;
539 read_unlock(&vmlist_lock);
541 if (!area) {
542 xenbus_dev_error(dev, -ENOENT,
543 "can't find mapped virtual address %p", vaddr);
544 return GNTST_bad_virt_addr;
547 op.handle = (grant_handle_t)area->phys_addr;
549 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
550 BUG();
552 if (op.status == GNTST_okay)
553 xen_free_vm_area(area);
554 else
555 xenbus_dev_error(dev, op.status,
556 "unmapping page at handle %d error %d",
557 (int16_t)area->phys_addr, op.status);
559 return op.status;
561 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
565 * xenbus_unmap_ring
566 * @dev: xenbus device
567 * @handle: grant handle
568 * @vaddr: addr to unmap
570 * Unmap a page of memory in this domain that was imported from another domain.
571 * Returns 0 on success and returns GNTST_* on error
572 * (see xen/include/interface/grant_table.h).
574 int xenbus_unmap_ring(struct xenbus_device *dev,
575 grant_handle_t handle, void *vaddr)
577 struct gnttab_unmap_grant_ref op = {
578 .host_addr = (unsigned long)vaddr,
579 .handle = handle,
582 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
583 BUG();
585 if (op.status != GNTST_okay)
586 xenbus_dev_error(dev, op.status,
587 "unmapping page at handle %d error %d",
588 handle, op.status);
590 return op.status;
592 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
596 * xenbus_read_driver_state
597 * @path: path for driver
599 * Return the state of the driver rooted at the given store path, or
600 * XenbusStateUnknown if no state can be read.
602 enum xenbus_state xenbus_read_driver_state(const char *path)
604 enum xenbus_state result;
605 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
606 if (err)
607 result = XenbusStateUnknown;
609 return result;
611 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);