staging: hv: move netvsc_initialize() to clean up forward declaration
[linux-2.6/kvm.git] / drivers / staging / hv / netvsc.c
blob6eaecc11b0f42bb8a3127b948fe40f86bb7beb63
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/delay.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30 #include "hv_api.h"
31 #include "logging.h"
32 #include "netvsc.h"
33 #include "rndis_filter.h"
34 #include "channel.h"
37 /* Globals */
38 static const char *driver_name = "netvsc";
40 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
41 static const struct hv_guid netvsc_device_type = {
42 .data = {
43 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
44 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
48 static void netvsc_channel_cb(void *context);
50 static int netvsc_init_send_buf(struct hv_device *device);
52 static int netvsc_init_recv_buf(struct hv_device *device);
54 static int netvsc_destroy_send_buf(struct netvsc_device *net_device);
56 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device);
58 static int netvsc_connect_vsp(struct hv_device *device);
60 static void netvsc_send_completion(struct hv_device *device,
61 struct vmpacket_descriptor *packet);
63 static void netvsc_receive(struct hv_device *device,
64 struct vmpacket_descriptor *packet);
66 static void netvsc_receive_completion(void *context);
68 static void netvsc_send_recv_completion(struct hv_device *device,
69 u64 transaction_id);
72 static struct netvsc_device *alloc_net_device(struct hv_device *device)
74 struct netvsc_device *net_device;
76 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
77 if (!net_device)
78 return NULL;
80 /* Set to 2 to allow both inbound and outbound traffic */
81 atomic_cmpxchg(&net_device->refcnt, 0, 2);
83 net_device->dev = device;
84 device->ext = net_device;
86 return net_device;
89 static void free_net_device(struct netvsc_device *device)
91 WARN_ON(atomic_read(&device->refcnt) != 0);
92 device->dev->ext = NULL;
93 kfree(device);
97 /* Get the net device object iff exists and its refcount > 1 */
98 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
100 struct netvsc_device *net_device;
102 net_device = device->ext;
103 if (net_device && atomic_read(&net_device->refcnt) > 1)
104 atomic_inc(&net_device->refcnt);
105 else
106 net_device = NULL;
108 return net_device;
111 /* Get the net device object iff exists and its refcount > 0 */
112 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
114 struct netvsc_device *net_device;
116 net_device = device->ext;
117 if (net_device && atomic_read(&net_device->refcnt))
118 atomic_inc(&net_device->refcnt);
119 else
120 net_device = NULL;
122 return net_device;
125 static void put_net_device(struct hv_device *device)
127 struct netvsc_device *net_device;
129 net_device = device->ext;
131 atomic_dec(&net_device->refcnt);
134 static struct netvsc_device *release_outbound_net_device(
135 struct hv_device *device)
137 struct netvsc_device *net_device;
139 net_device = device->ext;
140 if (net_device == NULL)
141 return NULL;
143 /* Busy wait until the ref drop to 2, then set it to 1 */
144 while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
145 udelay(100);
147 return net_device;
150 static struct netvsc_device *release_inbound_net_device(
151 struct hv_device *device)
153 struct netvsc_device *net_device;
155 net_device = device->ext;
156 if (net_device == NULL)
157 return NULL;
159 /* Busy wait until the ref drop to 1, then set it to 0 */
160 while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
161 udelay(100);
163 device->ext = NULL;
164 return net_device;
167 static int netvsc_init_recv_buf(struct hv_device *device)
169 int ret = 0;
170 struct netvsc_device *net_device;
171 struct nvsp_message *init_packet;
173 net_device = get_outbound_net_device(device);
174 if (!net_device) {
175 dev_err(&device->device, "unable to get net device..."
176 "device being destroyed?");
177 return -1;
180 net_device->recv_buf =
181 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
182 get_order(net_device->recv_buf_size));
183 if (!net_device->recv_buf) {
184 dev_err(&device->device, "unable to allocate receive "
185 "buffer of size %d", net_device->recv_buf_size);
186 ret = -1;
187 goto cleanup;
191 * Establish the gpadl handle for this buffer on this
192 * channel. Note: This call uses the vmbus connection rather
193 * than the channel to establish the gpadl handle.
195 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
196 net_device->recv_buf_size,
197 &net_device->recv_buf_gpadl_handle);
198 if (ret != 0) {
199 dev_err(&device->device,
200 "unable to establish receive buffer's gpadl");
201 goto cleanup;
205 /* Notify the NetVsp of the gpadl handle */
206 init_packet = &net_device->channel_init_pkt;
208 memset(init_packet, 0, sizeof(struct nvsp_message));
210 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
211 init_packet->msg.v1_msg.send_recv_buf.
212 gpadl_handle = net_device->recv_buf_gpadl_handle;
213 init_packet->msg.v1_msg.
214 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
216 /* Send the gpadl notification request */
217 net_device->wait_condition = 0;
218 ret = vmbus_sendpacket(device->channel, init_packet,
219 sizeof(struct nvsp_message),
220 (unsigned long)init_packet,
221 VM_PKT_DATA_INBAND,
222 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
223 if (ret != 0) {
224 dev_err(&device->device,
225 "unable to send receive buffer's gpadl to netvsp");
226 goto cleanup;
229 wait_event_timeout(net_device->channel_init_wait,
230 net_device->wait_condition,
231 msecs_to_jiffies(1000));
232 BUG_ON(net_device->wait_condition == 0);
235 /* Check the response */
236 if (init_packet->msg.v1_msg.
237 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
238 dev_err(&device->device, "Unable to complete receive buffer "
239 "initialzation with NetVsp - status %d",
240 init_packet->msg.v1_msg.
241 send_recv_buf_complete.status);
242 ret = -1;
243 goto cleanup;
246 /* Parse the response */
248 net_device->recv_section_cnt = init_packet->msg.
249 v1_msg.send_recv_buf_complete.num_sections;
251 net_device->recv_section = kmalloc(net_device->recv_section_cnt
252 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
253 if (net_device->recv_section == NULL) {
254 ret = -1;
255 goto cleanup;
258 memcpy(net_device->recv_section,
259 init_packet->msg.v1_msg.
260 send_recv_buf_complete.sections,
261 net_device->recv_section_cnt *
262 sizeof(struct nvsp_1_receive_buffer_section));
265 * For 1st release, there should only be 1 section that represents the
266 * entire receive buffer
268 if (net_device->recv_section_cnt != 1 ||
269 net_device->recv_section->offset != 0) {
270 ret = -1;
271 goto cleanup;
274 goto exit;
276 cleanup:
277 netvsc_destroy_recv_buf(net_device);
279 exit:
280 put_net_device(device);
281 return ret;
284 static int netvsc_init_send_buf(struct hv_device *device)
286 int ret = 0;
287 struct netvsc_device *net_device;
288 struct nvsp_message *init_packet;
290 net_device = get_outbound_net_device(device);
291 if (!net_device) {
292 dev_err(&device->device, "unable to get net device..."
293 "device being destroyed?");
294 return -1;
296 if (net_device->send_buf_size <= 0) {
297 ret = -EINVAL;
298 goto cleanup;
301 net_device->send_buf =
302 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
303 get_order(net_device->send_buf_size));
304 if (!net_device->send_buf) {
305 dev_err(&device->device, "unable to allocate send "
306 "buffer of size %d", net_device->send_buf_size);
307 ret = -1;
308 goto cleanup;
312 * Establish the gpadl handle for this buffer on this
313 * channel. Note: This call uses the vmbus connection rather
314 * than the channel to establish the gpadl handle.
316 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
317 net_device->send_buf_size,
318 &net_device->send_buf_gpadl_handle);
319 if (ret != 0) {
320 dev_err(&device->device, "unable to establish send buffer's gpadl");
321 goto cleanup;
324 /* Notify the NetVsp of the gpadl handle */
325 init_packet = &net_device->channel_init_pkt;
327 memset(init_packet, 0, sizeof(struct nvsp_message));
329 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
330 init_packet->msg.v1_msg.send_recv_buf.
331 gpadl_handle = net_device->send_buf_gpadl_handle;
332 init_packet->msg.v1_msg.send_recv_buf.id =
333 NETVSC_SEND_BUFFER_ID;
335 /* Send the gpadl notification request */
336 net_device->wait_condition = 0;
337 ret = vmbus_sendpacket(device->channel, init_packet,
338 sizeof(struct nvsp_message),
339 (unsigned long)init_packet,
340 VM_PKT_DATA_INBAND,
341 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
342 if (ret != 0) {
343 dev_err(&device->device,
344 "unable to send receive buffer's gpadl to netvsp");
345 goto cleanup;
348 wait_event_timeout(net_device->channel_init_wait,
349 net_device->wait_condition,
350 msecs_to_jiffies(1000));
351 BUG_ON(net_device->wait_condition == 0);
353 /* Check the response */
354 if (init_packet->msg.v1_msg.
355 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
356 dev_err(&device->device, "Unable to complete send buffer "
357 "initialzation with NetVsp - status %d",
358 init_packet->msg.v1_msg.
359 send_send_buf_complete.status);
360 ret = -1;
361 goto cleanup;
364 net_device->send_section_size = init_packet->
365 msg.v1_msg.send_send_buf_complete.section_size;
367 goto exit;
369 cleanup:
370 netvsc_destroy_send_buf(net_device);
372 exit:
373 put_net_device(device);
374 return ret;
377 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
379 struct nvsp_message *revoke_packet;
380 int ret = 0;
383 * If we got a section count, it means we received a
384 * SendReceiveBufferComplete msg (ie sent
385 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
386 * to send a revoke msg here
388 if (net_device->recv_section_cnt) {
389 /* Send the revoke receive buffer */
390 revoke_packet = &net_device->revoke_packet;
391 memset(revoke_packet, 0, sizeof(struct nvsp_message));
393 revoke_packet->hdr.msg_type =
394 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
395 revoke_packet->msg.v1_msg.
396 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
398 ret = vmbus_sendpacket(net_device->dev->channel,
399 revoke_packet,
400 sizeof(struct nvsp_message),
401 (unsigned long)revoke_packet,
402 VM_PKT_DATA_INBAND, 0);
404 * If we failed here, we might as well return and
405 * have a leak rather than continue and a bugchk
407 if (ret != 0) {
408 dev_err(&net_device->dev->device, "unable to send "
409 "revoke receive buffer to netvsp");
410 return -1;
414 /* Teardown the gpadl on the vsp end */
415 if (net_device->recv_buf_gpadl_handle) {
416 ret = vmbus_teardown_gpadl(net_device->dev->channel,
417 net_device->recv_buf_gpadl_handle);
419 /* If we failed here, we might as well return and have a leak rather than continue and a bugchk */
420 if (ret != 0) {
421 dev_err(&net_device->dev->device,
422 "unable to teardown receive buffer's gpadl");
423 return -1;
425 net_device->recv_buf_gpadl_handle = 0;
428 if (net_device->recv_buf) {
429 /* Free up the receive buffer */
430 free_pages((unsigned long)net_device->recv_buf,
431 get_order(net_device->recv_buf_size));
432 net_device->recv_buf = NULL;
435 if (net_device->recv_section) {
436 net_device->recv_section_cnt = 0;
437 kfree(net_device->recv_section);
438 net_device->recv_section = NULL;
441 return ret;
444 static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
446 struct nvsp_message *revoke_packet;
447 int ret = 0;
450 * If we got a section count, it means we received a
451 * SendReceiveBufferComplete msg (ie sent
452 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
453 * to send a revoke msg here
455 if (net_device->send_section_size) {
456 /* Send the revoke send buffer */
457 revoke_packet = &net_device->revoke_packet;
458 memset(revoke_packet, 0, sizeof(struct nvsp_message));
460 revoke_packet->hdr.msg_type =
461 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
462 revoke_packet->msg.v1_msg.
463 revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
465 ret = vmbus_sendpacket(net_device->dev->channel,
466 revoke_packet,
467 sizeof(struct nvsp_message),
468 (unsigned long)revoke_packet,
469 VM_PKT_DATA_INBAND, 0);
471 * If we failed here, we might as well return and have a leak
472 * rather than continue and a bugchk
474 if (ret != 0) {
475 dev_err(&net_device->dev->device, "unable to send "
476 "revoke send buffer to netvsp");
477 return -1;
481 /* Teardown the gpadl on the vsp end */
482 if (net_device->send_buf_gpadl_handle) {
483 ret = vmbus_teardown_gpadl(net_device->dev->channel,
484 net_device->send_buf_gpadl_handle);
487 * If we failed here, we might as well return and have a leak
488 * rather than continue and a bugchk
490 if (ret != 0) {
491 dev_err(&net_device->dev->device,
492 "unable to teardown send buffer's gpadl");
493 return -1;
495 net_device->send_buf_gpadl_handle = 0;
498 if (net_device->send_buf) {
499 /* Free up the receive buffer */
500 free_pages((unsigned long)net_device->send_buf,
501 get_order(net_device->send_buf_size));
502 net_device->send_buf = NULL;
505 return ret;
509 static int netvsc_connect_vsp(struct hv_device *device)
511 int ret;
512 struct netvsc_device *net_device;
513 struct nvsp_message *init_packet;
514 int ndis_version;
516 net_device = get_outbound_net_device(device);
517 if (!net_device) {
518 dev_err(&device->device, "unable to get net device..."
519 "device being destroyed?");
520 return -1;
523 init_packet = &net_device->channel_init_pkt;
525 memset(init_packet, 0, sizeof(struct nvsp_message));
526 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
527 init_packet->msg.init_msg.init.min_protocol_ver =
528 NVSP_MIN_PROTOCOL_VERSION;
529 init_packet->msg.init_msg.init.max_protocol_ver =
530 NVSP_MAX_PROTOCOL_VERSION;
532 /* Send the init request */
533 net_device->wait_condition = 0;
534 ret = vmbus_sendpacket(device->channel, init_packet,
535 sizeof(struct nvsp_message),
536 (unsigned long)init_packet,
537 VM_PKT_DATA_INBAND,
538 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
540 if (ret != 0)
541 goto cleanup;
543 wait_event_timeout(net_device->channel_init_wait,
544 net_device->wait_condition,
545 msecs_to_jiffies(1000));
546 if (net_device->wait_condition == 0) {
547 ret = -ETIMEDOUT;
548 goto cleanup;
551 if (init_packet->msg.init_msg.init_complete.status !=
552 NVSP_STAT_SUCCESS) {
553 ret = -1;
554 goto cleanup;
557 if (init_packet->msg.init_msg.init_complete.
558 negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
559 ret = -1;
560 goto cleanup;
562 /* Send the ndis version */
563 memset(init_packet, 0, sizeof(struct nvsp_message));
565 ndis_version = 0x00050000;
567 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
568 init_packet->msg.v1_msg.
569 send_ndis_ver.ndis_major_ver =
570 (ndis_version & 0xFFFF0000) >> 16;
571 init_packet->msg.v1_msg.
572 send_ndis_ver.ndis_minor_ver =
573 ndis_version & 0xFFFF;
575 /* Send the init request */
576 ret = vmbus_sendpacket(device->channel, init_packet,
577 sizeof(struct nvsp_message),
578 (unsigned long)init_packet,
579 VM_PKT_DATA_INBAND, 0);
580 if (ret != 0) {
581 ret = -1;
582 goto cleanup;
585 /* Post the big receive buffer to NetVSP */
586 ret = netvsc_init_recv_buf(device);
587 if (ret == 0)
588 ret = netvsc_init_send_buf(device);
590 cleanup:
591 put_net_device(device);
592 return ret;
595 static void NetVscDisconnectFromVsp(struct netvsc_device *net_device)
597 netvsc_destroy_recv_buf(net_device);
598 netvsc_destroy_send_buf(net_device);
602 * netvsc_device_add - Callback when the device belonging to this
603 * driver is added
605 static int netvsc_device_add(struct hv_device *device, void *additional_info)
607 int ret = 0;
608 int i;
609 struct netvsc_device *net_device;
610 struct hv_netvsc_packet *packet, *pos;
611 struct netvsc_driver *net_driver =
612 (struct netvsc_driver *)device->drv;
614 net_device = alloc_net_device(device);
615 if (!net_device) {
616 ret = -1;
617 goto cleanup;
620 /* Initialize the NetVSC channel extension */
621 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
622 spin_lock_init(&net_device->recv_pkt_list_lock);
624 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
626 INIT_LIST_HEAD(&net_device->recv_pkt_list);
628 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
629 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
630 (NETVSC_RECEIVE_SG_COUNT *
631 sizeof(struct hv_page_buffer)), GFP_KERNEL);
632 if (!packet)
633 break;
635 list_add_tail(&packet->list_ent,
636 &net_device->recv_pkt_list);
638 init_waitqueue_head(&net_device->channel_init_wait);
640 /* Open the channel */
641 ret = vmbus_open(device->channel, net_driver->ring_buf_size,
642 net_driver->ring_buf_size, NULL, 0,
643 netvsc_channel_cb, device);
645 if (ret != 0) {
646 dev_err(&device->device, "unable to open channel: %d", ret);
647 ret = -1;
648 goto cleanup;
651 /* Channel is opened */
652 pr_info("hv_netvsc channel opened successfully");
654 /* Connect with the NetVsp */
655 ret = netvsc_connect_vsp(device);
656 if (ret != 0) {
657 dev_err(&device->device,
658 "unable to connect to NetVSP - %d", ret);
659 ret = -1;
660 goto close;
663 return ret;
665 close:
666 /* Now, we can close the channel safely */
667 vmbus_close(device->channel);
669 cleanup:
671 if (net_device) {
672 list_for_each_entry_safe(packet, pos,
673 &net_device->recv_pkt_list,
674 list_ent) {
675 list_del(&packet->list_ent);
676 kfree(packet);
679 release_outbound_net_device(device);
680 release_inbound_net_device(device);
682 free_net_device(net_device);
685 return ret;
689 * netvsc_device_remove - Callback when the root bus device is removed
691 static int netvsc_device_remove(struct hv_device *device)
693 struct netvsc_device *net_device;
694 struct hv_netvsc_packet *netvsc_packet, *pos;
696 /* Stop outbound traffic ie sends and receives completions */
697 net_device = release_outbound_net_device(device);
698 if (!net_device) {
699 dev_err(&device->device, "No net device present!!");
700 return -1;
703 /* Wait for all send completions */
704 while (atomic_read(&net_device->num_outstanding_sends)) {
705 dev_err(&device->device,
706 "waiting for %d requests to complete...",
707 atomic_read(&net_device->num_outstanding_sends));
708 udelay(100);
711 NetVscDisconnectFromVsp(net_device);
713 /* Stop inbound traffic ie receives and sends completions */
714 net_device = release_inbound_net_device(device);
716 /* At this point, no one should be accessing netDevice except in here */
717 dev_notice(&device->device, "net device safe to remove");
719 /* Now, we can close the channel safely */
720 vmbus_close(device->channel);
722 /* Release all resources */
723 list_for_each_entry_safe(netvsc_packet, pos,
724 &net_device->recv_pkt_list, list_ent) {
725 list_del(&netvsc_packet->list_ent);
726 kfree(netvsc_packet);
729 free_net_device(net_device);
730 return 0;
734 * netvsc_cleanup - Perform any cleanup when the driver is removed
736 static void netvsc_cleanup(struct hv_driver *drv)
740 static void netvsc_send_completion(struct hv_device *device,
741 struct vmpacket_descriptor *packet)
743 struct netvsc_device *net_device;
744 struct nvsp_message *nvsp_packet;
745 struct hv_netvsc_packet *nvsc_packet;
747 net_device = get_inbound_net_device(device);
748 if (!net_device) {
749 dev_err(&device->device, "unable to get net device..."
750 "device being destroyed?");
751 return;
754 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
755 (packet->offset8 << 3));
757 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
758 (nvsp_packet->hdr.msg_type ==
759 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
760 (nvsp_packet->hdr.msg_type ==
761 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
762 /* Copy the response back */
763 memcpy(&net_device->channel_init_pkt, nvsp_packet,
764 sizeof(struct nvsp_message));
765 net_device->wait_condition = 1;
766 wake_up(&net_device->channel_init_wait);
767 } else if (nvsp_packet->hdr.msg_type ==
768 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
769 /* Get the send context */
770 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
771 packet->trans_id;
773 /* Notify the layer above us */
774 nvsc_packet->completion.send.send_completion(
775 nvsc_packet->completion.send.send_completion_ctx);
777 atomic_dec(&net_device->num_outstanding_sends);
778 } else {
779 dev_err(&device->device, "Unknown send completion packet type- "
780 "%d received!!", nvsp_packet->hdr.msg_type);
783 put_net_device(device);
786 static int netvsc_send(struct hv_device *device,
787 struct hv_netvsc_packet *packet)
789 struct netvsc_device *net_device;
790 int ret = 0;
792 struct nvsp_message sendMessage;
794 net_device = get_outbound_net_device(device);
795 if (!net_device) {
796 dev_err(&device->device, "net device (%p) shutting down..."
797 "ignoring outbound packets", net_device);
798 return -2;
801 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
802 if (packet->is_data_pkt) {
803 /* 0 is RMC_DATA; */
804 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
805 } else {
806 /* 1 is RMC_CONTROL; */
807 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
810 /* Not using send buffer section */
811 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
812 0xFFFFFFFF;
813 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
815 if (packet->page_buf_cnt) {
816 ret = vmbus_sendpacket_pagebuffer(device->channel,
817 packet->page_buf,
818 packet->page_buf_cnt,
819 &sendMessage,
820 sizeof(struct nvsp_message),
821 (unsigned long)packet);
822 } else {
823 ret = vmbus_sendpacket(device->channel, &sendMessage,
824 sizeof(struct nvsp_message),
825 (unsigned long)packet,
826 VM_PKT_DATA_INBAND,
827 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
831 if (ret != 0)
832 dev_err(&device->device, "Unable to send packet %p ret %d",
833 packet, ret);
835 atomic_inc(&net_device->num_outstanding_sends);
836 put_net_device(device);
837 return ret;
840 static void netvsc_receive(struct hv_device *device,
841 struct vmpacket_descriptor *packet)
843 struct netvsc_device *net_device;
844 struct vmtransfer_page_packet_header *vmxferpage_packet;
845 struct nvsp_message *nvsp_packet;
846 struct hv_netvsc_packet *netvsc_packet = NULL;
847 unsigned long start;
848 unsigned long end, end_virtual;
849 /* struct netvsc_driver *netvscDriver; */
850 struct xferpage_packet *xferpage_packet = NULL;
851 int i, j;
852 int count = 0, bytes_remain = 0;
853 unsigned long flags;
854 LIST_HEAD(listHead);
856 net_device = get_inbound_net_device(device);
857 if (!net_device) {
858 dev_err(&device->device, "unable to get net device..."
859 "device being destroyed?");
860 return;
864 * All inbound packets other than send completion should be xfer page
865 * packet
867 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
868 dev_err(&device->device, "Unknown packet type received - %d",
869 packet->type);
870 put_net_device(device);
871 return;
874 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
875 (packet->offset8 << 3));
877 /* Make sure this is a valid nvsp packet */
878 if (nvsp_packet->hdr.msg_type !=
879 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
880 dev_err(&device->device, "Unknown nvsp packet type received-"
881 " %d", nvsp_packet->hdr.msg_type);
882 put_net_device(device);
883 return;
886 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
888 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
889 dev_err(&device->device, "Invalid xfer page set id - "
890 "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
891 vmxferpage_packet->xfer_pageset_id);
892 put_net_device(device);
893 return;
897 * Grab free packets (range count + 1) to represent this xfer
898 * page packet. +1 to represent the xfer page packet itself.
899 * We grab it here so that we know exactly how many we can
900 * fulfil
902 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
903 while (!list_empty(&net_device->recv_pkt_list)) {
904 list_move_tail(net_device->recv_pkt_list.next, &listHead);
905 if (++count == vmxferpage_packet->range_cnt + 1)
906 break;
908 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
911 * We need at least 2 netvsc pkts (1 to represent the xfer
912 * page and at least 1 for the range) i.e. we can handled
913 * some of the xfer page packet ranges...
915 if (count < 2) {
916 dev_err(&device->device, "Got only %d netvsc pkt...needed "
917 "%d pkts. Dropping this xfer page packet completely!",
918 count, vmxferpage_packet->range_cnt + 1);
920 /* Return it to the freelist */
921 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
922 for (i = count; i != 0; i--) {
923 list_move_tail(listHead.next,
924 &net_device->recv_pkt_list);
926 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
927 flags);
929 netvsc_send_recv_completion(device,
930 vmxferpage_packet->d.trans_id);
932 put_net_device(device);
933 return;
936 /* Remove the 1st packet to represent the xfer page packet itself */
937 xferpage_packet = (struct xferpage_packet *)listHead.next;
938 list_del(&xferpage_packet->list_ent);
940 /* This is how much we can satisfy */
941 xferpage_packet->count = count - 1;
943 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
944 dev_err(&device->device, "Needed %d netvsc pkts to satisy "
945 "this xfer page...got %d",
946 vmxferpage_packet->range_cnt, xferpage_packet->count);
949 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
950 for (i = 0; i < (count - 1); i++) {
951 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
952 list_del(&netvsc_packet->list_ent);
954 /* Initialize the netvsc packet */
955 netvsc_packet->xfer_page_pkt = xferpage_packet;
956 netvsc_packet->completion.recv.recv_completion =
957 netvsc_receive_completion;
958 netvsc_packet->completion.recv.recv_completion_ctx =
959 netvsc_packet;
960 netvsc_packet->device = device;
961 /* Save this so that we can send it back */
962 netvsc_packet->completion.recv.recv_completion_tid =
963 vmxferpage_packet->d.trans_id;
965 netvsc_packet->total_data_buflen =
966 vmxferpage_packet->ranges[i].byte_count;
967 netvsc_packet->page_buf_cnt = 1;
969 netvsc_packet->page_buf[0].len =
970 vmxferpage_packet->ranges[i].byte_count;
972 start = virt_to_phys((void *)((unsigned long)net_device->
973 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
975 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
976 end_virtual = (unsigned long)net_device->recv_buf
977 + vmxferpage_packet->ranges[i].byte_offset
978 + vmxferpage_packet->ranges[i].byte_count - 1;
979 end = virt_to_phys((void *)end_virtual);
981 /* Calculate the page relative offset */
982 netvsc_packet->page_buf[0].offset =
983 vmxferpage_packet->ranges[i].byte_offset &
984 (PAGE_SIZE - 1);
985 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
986 /* Handle frame across multiple pages: */
987 netvsc_packet->page_buf[0].len =
988 (netvsc_packet->page_buf[0].pfn <<
989 PAGE_SHIFT)
990 + PAGE_SIZE - start;
991 bytes_remain = netvsc_packet->total_data_buflen -
992 netvsc_packet->page_buf[0].len;
993 for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
994 netvsc_packet->page_buf[j].offset = 0;
995 if (bytes_remain <= PAGE_SIZE) {
996 netvsc_packet->page_buf[j].len =
997 bytes_remain;
998 bytes_remain = 0;
999 } else {
1000 netvsc_packet->page_buf[j].len =
1001 PAGE_SIZE;
1002 bytes_remain -= PAGE_SIZE;
1004 netvsc_packet->page_buf[j].pfn =
1005 virt_to_phys((void *)(end_virtual -
1006 bytes_remain)) >> PAGE_SHIFT;
1007 netvsc_packet->page_buf_cnt++;
1008 if (bytes_remain == 0)
1009 break;
1013 /* Pass it to the upper layer */
1014 ((struct netvsc_driver *)device->drv)->
1015 recv_cb(device, netvsc_packet);
1017 netvsc_receive_completion(netvsc_packet->
1018 completion.recv.recv_completion_ctx);
1021 put_net_device(device);
1024 static void netvsc_send_recv_completion(struct hv_device *device,
1025 u64 transaction_id)
1027 struct nvsp_message recvcompMessage;
1028 int retries = 0;
1029 int ret;
1031 recvcompMessage.hdr.msg_type =
1032 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
1034 /* FIXME: Pass in the status */
1035 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
1036 NVSP_STAT_SUCCESS;
1038 retry_send_cmplt:
1039 /* Send the completion */
1040 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
1041 sizeof(struct nvsp_message), transaction_id,
1042 VM_PKT_COMP, 0);
1043 if (ret == 0) {
1044 /* success */
1045 /* no-op */
1046 } else if (ret == -1) {
1047 /* no more room...wait a bit and attempt to retry 3 times */
1048 retries++;
1049 dev_err(&device->device, "unable to send receive completion pkt"
1050 " (tid %llx)...retrying %d", transaction_id, retries);
1052 if (retries < 4) {
1053 udelay(100);
1054 goto retry_send_cmplt;
1055 } else {
1056 dev_err(&device->device, "unable to send receive "
1057 "completion pkt (tid %llx)...give up retrying",
1058 transaction_id);
1060 } else {
1061 dev_err(&device->device, "unable to send receive "
1062 "completion pkt - %llx", transaction_id);
1066 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
1067 static void netvsc_receive_completion(void *context)
1069 struct hv_netvsc_packet *packet = context;
1070 struct hv_device *device = (struct hv_device *)packet->device;
1071 struct netvsc_device *net_device;
1072 u64 transaction_id = 0;
1073 bool fsend_receive_comp = false;
1074 unsigned long flags;
1077 * Even though it seems logical to do a GetOutboundNetDevice() here to
1078 * send out receive completion, we are using GetInboundNetDevice()
1079 * since we may have disable outbound traffic already.
1081 net_device = get_inbound_net_device(device);
1082 if (!net_device) {
1083 dev_err(&device->device, "unable to get net device..."
1084 "device being destroyed?");
1085 return;
1088 /* Overloading use of the lock. */
1089 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
1091 packet->xfer_page_pkt->count--;
1094 * Last one in the line that represent 1 xfer page packet.
1095 * Return the xfer page packet itself to the freelist
1097 if (packet->xfer_page_pkt->count == 0) {
1098 fsend_receive_comp = true;
1099 transaction_id = packet->completion.recv.recv_completion_tid;
1100 list_add_tail(&packet->xfer_page_pkt->list_ent,
1101 &net_device->recv_pkt_list);
1105 /* Put the packet back */
1106 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
1107 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
1109 /* Send a receive completion for the xfer page packet */
1110 if (fsend_receive_comp)
1111 netvsc_send_recv_completion(device, transaction_id);
1113 put_net_device(device);
1116 static void netvsc_channel_cb(void *context)
1118 int ret;
1119 struct hv_device *device = context;
1120 struct netvsc_device *net_device;
1121 u32 bytes_recvd;
1122 u64 request_id;
1123 unsigned char *packet;
1124 struct vmpacket_descriptor *desc;
1125 unsigned char *buffer;
1126 int bufferlen = NETVSC_PACKET_SIZE;
1128 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
1129 GFP_ATOMIC);
1130 if (!packet)
1131 return;
1132 buffer = packet;
1134 net_device = get_inbound_net_device(device);
1135 if (!net_device) {
1136 dev_err(&device->device, "net device (%p) shutting down..."
1137 "ignoring inbound packets", net_device);
1138 goto out;
1141 do {
1142 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
1143 &bytes_recvd, &request_id);
1144 if (ret == 0) {
1145 if (bytes_recvd > 0) {
1146 desc = (struct vmpacket_descriptor *)buffer;
1147 switch (desc->type) {
1148 case VM_PKT_COMP:
1149 netvsc_send_completion(device, desc);
1150 break;
1152 case VM_PKT_DATA_USING_XFER_PAGES:
1153 netvsc_receive(device, desc);
1154 break;
1156 default:
1157 dev_err(&device->device,
1158 "unhandled packet type %d, "
1159 "tid %llx len %d\n",
1160 desc->type, request_id,
1161 bytes_recvd);
1162 break;
1165 /* reset */
1166 if (bufferlen > NETVSC_PACKET_SIZE) {
1167 kfree(buffer);
1168 buffer = packet;
1169 bufferlen = NETVSC_PACKET_SIZE;
1171 } else {
1172 /* reset */
1173 if (bufferlen > NETVSC_PACKET_SIZE) {
1174 kfree(buffer);
1175 buffer = packet;
1176 bufferlen = NETVSC_PACKET_SIZE;
1179 break;
1181 } else if (ret == -2) {
1182 /* Handle large packet */
1183 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1184 if (buffer == NULL) {
1185 /* Try again next time around */
1186 dev_err(&device->device,
1187 "unable to allocate buffer of size "
1188 "(%d)!!", bytes_recvd);
1189 break;
1192 bufferlen = bytes_recvd;
1194 } while (1);
1196 put_net_device(device);
1197 out:
1198 kfree(buffer);
1199 return;
1203 * netvsc_initialize - Main entry point
1205 int netvsc_initialize(struct hv_driver *drv)
1207 struct netvsc_driver *driver = (struct netvsc_driver *)drv;
1209 drv->name = driver_name;
1210 memcpy(&drv->dev_type, &netvsc_device_type, sizeof(struct hv_guid));
1212 /* Setup the dispatch table */
1213 driver->base.dev_add = netvsc_device_add;
1214 driver->base.dev_rm = netvsc_device_remove;
1215 driver->base.cleanup = netvsc_cleanup;
1217 driver->send = netvsc_send;
1219 rndis_filter_init(driver);
1220 return 0;