Merge branch 'bpf-Allow-selecting-numa-node-during-map-creation'
[linux-2.6/btrfs-unstable.git] / drivers / misc / vmw_vmci / vmci_event.c
blob84258a48029d41a2bc8ed9751deef566f3aa4a43
1 /*
2 * VMware VMCI Driver
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
16 #include <linux/vmw_vmci_defs.h>
17 #include <linux/vmw_vmci_api.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/rculist.h>
24 #include "vmci_driver.h"
25 #include "vmci_event.h"
27 #define EVENT_MAGIC 0xEABE0000
28 #define VMCI_EVENT_MAX_ATTEMPTS 10
30 struct vmci_subscription {
31 u32 id;
32 u32 event;
33 vmci_event_cb callback;
34 void *callback_data;
35 struct list_head node; /* on one of subscriber lists */
38 static struct list_head subscriber_array[VMCI_EVENT_MAX];
39 static DEFINE_MUTEX(subscriber_mutex);
41 int __init vmci_event_init(void)
43 int i;
45 for (i = 0; i < VMCI_EVENT_MAX; i++)
46 INIT_LIST_HEAD(&subscriber_array[i]);
48 return VMCI_SUCCESS;
51 void vmci_event_exit(void)
53 int e;
55 /* We free all memory at exit. */
56 for (e = 0; e < VMCI_EVENT_MAX; e++) {
57 struct vmci_subscription *cur, *p2;
58 list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
61 * We should never get here because all events
62 * should have been unregistered before we try
63 * to unload the driver module.
65 pr_warn("Unexpected free events occurring\n");
66 list_del(&cur->node);
67 kfree(cur);
73 * Find entry. Assumes subscriber_mutex is held.
75 static struct vmci_subscription *event_find(u32 sub_id)
77 int e;
79 for (e = 0; e < VMCI_EVENT_MAX; e++) {
80 struct vmci_subscription *cur;
81 list_for_each_entry(cur, &subscriber_array[e], node) {
82 if (cur->id == sub_id)
83 return cur;
86 return NULL;
90 * Actually delivers the events to the subscribers.
91 * The callback function for each subscriber is invoked.
93 static void event_deliver(struct vmci_event_msg *event_msg)
95 struct vmci_subscription *cur;
96 struct list_head *subscriber_list;
98 rcu_read_lock();
99 subscriber_list = &subscriber_array[event_msg->event_data.event];
100 list_for_each_entry_rcu(cur, subscriber_list, node) {
101 cur->callback(cur->id, &event_msg->event_data,
102 cur->callback_data);
104 rcu_read_unlock();
108 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
109 * subscribers for given event.
111 int vmci_event_dispatch(struct vmci_datagram *msg)
113 struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
115 if (msg->payload_size < sizeof(u32) ||
116 msg->payload_size > sizeof(struct vmci_event_data_max))
117 return VMCI_ERROR_INVALID_ARGS;
119 if (!VMCI_EVENT_VALID(event_msg->event_data.event))
120 return VMCI_ERROR_EVENT_UNKNOWN;
122 event_deliver(event_msg);
123 return VMCI_SUCCESS;
127 * vmci_event_subscribe() - Subscribe to a given event.
128 * @event: The event to subscribe to.
129 * @callback: The callback to invoke upon the event.
130 * @callback_data: Data to pass to the callback.
131 * @subscription_id: ID used to track subscription. Used with
132 * vmci_event_unsubscribe()
134 * Subscribes to the provided event. The callback specified will be
135 * fired from RCU critical section and therefore must not sleep.
137 int vmci_event_subscribe(u32 event,
138 vmci_event_cb callback,
139 void *callback_data,
140 u32 *new_subscription_id)
142 struct vmci_subscription *sub;
143 int attempts;
144 int retval;
145 bool have_new_id = false;
147 if (!new_subscription_id) {
148 pr_devel("%s: Invalid subscription (NULL)\n", __func__);
149 return VMCI_ERROR_INVALID_ARGS;
152 if (!VMCI_EVENT_VALID(event) || !callback) {
153 pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
154 __func__, event, callback, callback_data);
155 return VMCI_ERROR_INVALID_ARGS;
158 sub = kzalloc(sizeof(*sub), GFP_KERNEL);
159 if (!sub)
160 return VMCI_ERROR_NO_MEM;
162 sub->id = VMCI_EVENT_MAX;
163 sub->event = event;
164 sub->callback = callback;
165 sub->callback_data = callback_data;
166 INIT_LIST_HEAD(&sub->node);
168 mutex_lock(&subscriber_mutex);
170 /* Creation of a new event is always allowed. */
171 for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
172 static u32 subscription_id;
174 * We try to get an id a couple of time before
175 * claiming we are out of resources.
178 /* Test for duplicate id. */
179 if (!event_find(++subscription_id)) {
180 sub->id = subscription_id;
181 have_new_id = true;
182 break;
186 if (have_new_id) {
187 list_add_rcu(&sub->node, &subscriber_array[event]);
188 retval = VMCI_SUCCESS;
189 } else {
190 retval = VMCI_ERROR_NO_RESOURCES;
193 mutex_unlock(&subscriber_mutex);
195 *new_subscription_id = sub->id;
196 return retval;
198 EXPORT_SYMBOL_GPL(vmci_event_subscribe);
201 * vmci_event_unsubscribe() - unsubscribe from an event.
202 * @sub_id: A subscription ID as provided by vmci_event_subscribe()
204 * Unsubscribe from given event. Removes it from list and frees it.
205 * Will return callback_data if requested by caller.
207 int vmci_event_unsubscribe(u32 sub_id)
209 struct vmci_subscription *s;
211 mutex_lock(&subscriber_mutex);
212 s = event_find(sub_id);
213 if (s)
214 list_del_rcu(&s->node);
215 mutex_unlock(&subscriber_mutex);
217 if (!s)
218 return VMCI_ERROR_NOT_FOUND;
220 synchronize_rcu();
221 kfree(s);
223 return VMCI_SUCCESS;
225 EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);