2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/delay.h>
29 #include <linux/slab.h>
32 #include "hyperv_net.h"
36 static const char *driver_name
= "netvsc";
38 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
39 static const struct hv_guid netvsc_device_type
= {
41 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
42 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
47 static struct netvsc_device
*alloc_net_device(struct hv_device
*device
)
49 struct netvsc_device
*net_device
;
51 net_device
= kzalloc(sizeof(struct netvsc_device
), GFP_KERNEL
);
55 /* Set to 2 to allow both inbound and outbound traffic */
56 atomic_cmpxchg(&net_device
->refcnt
, 0, 2);
58 net_device
->dev
= device
;
59 device
->ext
= net_device
;
64 static void free_net_device(struct netvsc_device
*device
)
66 WARN_ON(atomic_read(&device
->refcnt
) != 0);
67 device
->dev
->ext
= NULL
;
72 /* Get the net device object iff exists and its refcount > 1 */
73 static struct netvsc_device
*get_outbound_net_device(struct hv_device
*device
)
75 struct netvsc_device
*net_device
;
77 net_device
= device
->ext
;
78 if (net_device
&& atomic_read(&net_device
->refcnt
) > 1)
79 atomic_inc(&net_device
->refcnt
);
86 /* Get the net device object iff exists and its refcount > 0 */
87 static struct netvsc_device
*get_inbound_net_device(struct hv_device
*device
)
89 struct netvsc_device
*net_device
;
91 net_device
= device
->ext
;
92 if (net_device
&& atomic_read(&net_device
->refcnt
))
93 atomic_inc(&net_device
->refcnt
);
100 static void put_net_device(struct hv_device
*device
)
102 struct netvsc_device
*net_device
;
104 net_device
= device
->ext
;
106 atomic_dec(&net_device
->refcnt
);
109 static struct netvsc_device
*release_outbound_net_device(
110 struct hv_device
*device
)
112 struct netvsc_device
*net_device
;
114 net_device
= device
->ext
;
115 if (net_device
== NULL
)
118 /* Busy wait until the ref drop to 2, then set it to 1 */
119 while (atomic_cmpxchg(&net_device
->refcnt
, 2, 1) != 2)
125 static struct netvsc_device
*release_inbound_net_device(
126 struct hv_device
*device
)
128 struct netvsc_device
*net_device
;
130 net_device
= device
->ext
;
131 if (net_device
== NULL
)
134 /* Busy wait until the ref drop to 1, then set it to 0 */
135 while (atomic_cmpxchg(&net_device
->refcnt
, 1, 0) != 1)
142 static int netvsc_destroy_recv_buf(struct netvsc_device
*net_device
)
144 struct nvsp_message
*revoke_packet
;
148 * If we got a section count, it means we received a
149 * SendReceiveBufferComplete msg (ie sent
150 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
151 * to send a revoke msg here
153 if (net_device
->recv_section_cnt
) {
154 /* Send the revoke receive buffer */
155 revoke_packet
= &net_device
->revoke_packet
;
156 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
158 revoke_packet
->hdr
.msg_type
=
159 NVSP_MSG1_TYPE_REVOKE_RECV_BUF
;
160 revoke_packet
->msg
.v1_msg
.
161 revoke_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
163 ret
= vmbus_sendpacket(net_device
->dev
->channel
,
165 sizeof(struct nvsp_message
),
166 (unsigned long)revoke_packet
,
167 VM_PKT_DATA_INBAND
, 0);
169 * If we failed here, we might as well return and
170 * have a leak rather than continue and a bugchk
173 dev_err(&net_device
->dev
->device
, "unable to send "
174 "revoke receive buffer to netvsp");
179 /* Teardown the gpadl on the vsp end */
180 if (net_device
->recv_buf_gpadl_handle
) {
181 ret
= vmbus_teardown_gpadl(net_device
->dev
->channel
,
182 net_device
->recv_buf_gpadl_handle
);
184 /* If we failed here, we might as well return and have a leak
185 * rather than continue and a bugchk
188 dev_err(&net_device
->dev
->device
,
189 "unable to teardown receive buffer's gpadl");
192 net_device
->recv_buf_gpadl_handle
= 0;
195 if (net_device
->recv_buf
) {
196 /* Free up the receive buffer */
197 free_pages((unsigned long)net_device
->recv_buf
,
198 get_order(net_device
->recv_buf_size
));
199 net_device
->recv_buf
= NULL
;
202 if (net_device
->recv_section
) {
203 net_device
->recv_section_cnt
= 0;
204 kfree(net_device
->recv_section
);
205 net_device
->recv_section
= NULL
;
211 static int netvsc_init_recv_buf(struct hv_device
*device
)
215 struct netvsc_device
*net_device
;
216 struct nvsp_message
*init_packet
;
218 net_device
= get_outbound_net_device(device
);
220 dev_err(&device
->device
, "unable to get net device..."
221 "device being destroyed?");
225 net_device
->recv_buf
=
226 (void *)__get_free_pages(GFP_KERNEL
|__GFP_ZERO
,
227 get_order(net_device
->recv_buf_size
));
228 if (!net_device
->recv_buf
) {
229 dev_err(&device
->device
, "unable to allocate receive "
230 "buffer of size %d", net_device
->recv_buf_size
);
236 * Establish the gpadl handle for this buffer on this
237 * channel. Note: This call uses the vmbus connection rather
238 * than the channel to establish the gpadl handle.
240 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->recv_buf
,
241 net_device
->recv_buf_size
,
242 &net_device
->recv_buf_gpadl_handle
);
244 dev_err(&device
->device
,
245 "unable to establish receive buffer's gpadl");
250 /* Notify the NetVsp of the gpadl handle */
251 init_packet
= &net_device
->channel_init_pkt
;
253 memset(init_packet
, 0, sizeof(struct nvsp_message
));
255 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RECV_BUF
;
256 init_packet
->msg
.v1_msg
.send_recv_buf
.
257 gpadl_handle
= net_device
->recv_buf_gpadl_handle
;
258 init_packet
->msg
.v1_msg
.
259 send_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
261 /* Send the gpadl notification request */
262 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
263 sizeof(struct nvsp_message
),
264 (unsigned long)init_packet
,
266 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
268 dev_err(&device
->device
,
269 "unable to send receive buffer's gpadl to netvsp");
273 t
= wait_for_completion_timeout(&net_device
->channel_init_wait
, HZ
);
277 /* Check the response */
278 if (init_packet
->msg
.v1_msg
.
279 send_recv_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
280 dev_err(&device
->device
, "Unable to complete receive buffer "
281 "initialzation with NetVsp - status %d",
282 init_packet
->msg
.v1_msg
.
283 send_recv_buf_complete
.status
);
288 /* Parse the response */
290 net_device
->recv_section_cnt
= init_packet
->msg
.
291 v1_msg
.send_recv_buf_complete
.num_sections
;
293 net_device
->recv_section
= kmalloc(net_device
->recv_section_cnt
294 * sizeof(struct nvsp_1_receive_buffer_section
), GFP_KERNEL
);
295 if (net_device
->recv_section
== NULL
) {
300 memcpy(net_device
->recv_section
,
301 init_packet
->msg
.v1_msg
.
302 send_recv_buf_complete
.sections
,
303 net_device
->recv_section_cnt
*
304 sizeof(struct nvsp_1_receive_buffer_section
));
307 * For 1st release, there should only be 1 section that represents the
308 * entire receive buffer
310 if (net_device
->recv_section_cnt
!= 1 ||
311 net_device
->recv_section
->offset
!= 0) {
319 netvsc_destroy_recv_buf(net_device
);
322 put_net_device(device
);
326 static int netvsc_destroy_send_buf(struct netvsc_device
*net_device
)
328 struct nvsp_message
*revoke_packet
;
332 * If we got a section count, it means we received a
333 * SendReceiveBufferComplete msg (ie sent
334 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
335 * to send a revoke msg here
337 if (net_device
->send_section_size
) {
338 /* Send the revoke send buffer */
339 revoke_packet
= &net_device
->revoke_packet
;
340 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
342 revoke_packet
->hdr
.msg_type
=
343 NVSP_MSG1_TYPE_REVOKE_SEND_BUF
;
344 revoke_packet
->msg
.v1_msg
.
345 revoke_send_buf
.id
= NETVSC_SEND_BUFFER_ID
;
347 ret
= vmbus_sendpacket(net_device
->dev
->channel
,
349 sizeof(struct nvsp_message
),
350 (unsigned long)revoke_packet
,
351 VM_PKT_DATA_INBAND
, 0);
353 * If we failed here, we might as well return and have a leak
354 * rather than continue and a bugchk
357 dev_err(&net_device
->dev
->device
, "unable to send "
358 "revoke send buffer to netvsp");
363 /* Teardown the gpadl on the vsp end */
364 if (net_device
->send_buf_gpadl_handle
) {
365 ret
= vmbus_teardown_gpadl(net_device
->dev
->channel
,
366 net_device
->send_buf_gpadl_handle
);
369 * If we failed here, we might as well return and have a leak
370 * rather than continue and a bugchk
373 dev_err(&net_device
->dev
->device
,
374 "unable to teardown send buffer's gpadl");
377 net_device
->send_buf_gpadl_handle
= 0;
380 if (net_device
->send_buf
) {
381 /* Free up the receive buffer */
382 free_pages((unsigned long)net_device
->send_buf
,
383 get_order(net_device
->send_buf_size
));
384 net_device
->send_buf
= NULL
;
390 static int netvsc_init_send_buf(struct hv_device
*device
)
394 struct netvsc_device
*net_device
;
395 struct nvsp_message
*init_packet
;
397 net_device
= get_outbound_net_device(device
);
399 dev_err(&device
->device
, "unable to get net device..."
400 "device being destroyed?");
403 if (net_device
->send_buf_size
<= 0) {
408 net_device
->send_buf
=
409 (void *)__get_free_pages(GFP_KERNEL
|__GFP_ZERO
,
410 get_order(net_device
->send_buf_size
));
411 if (!net_device
->send_buf
) {
412 dev_err(&device
->device
, "unable to allocate send "
413 "buffer of size %d", net_device
->send_buf_size
);
419 * Establish the gpadl handle for this buffer on this
420 * channel. Note: This call uses the vmbus connection rather
421 * than the channel to establish the gpadl handle.
423 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->send_buf
,
424 net_device
->send_buf_size
,
425 &net_device
->send_buf_gpadl_handle
);
427 dev_err(&device
->device
, "unable to establish send buffer's gpadl");
431 /* Notify the NetVsp of the gpadl handle */
432 init_packet
= &net_device
->channel_init_pkt
;
434 memset(init_packet
, 0, sizeof(struct nvsp_message
));
436 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_SEND_BUF
;
437 init_packet
->msg
.v1_msg
.send_recv_buf
.
438 gpadl_handle
= net_device
->send_buf_gpadl_handle
;
439 init_packet
->msg
.v1_msg
.send_recv_buf
.id
=
440 NETVSC_SEND_BUFFER_ID
;
442 /* Send the gpadl notification request */
443 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
444 sizeof(struct nvsp_message
),
445 (unsigned long)init_packet
,
447 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
449 dev_err(&device
->device
,
450 "unable to send receive buffer's gpadl to netvsp");
454 t
= wait_for_completion_timeout(&net_device
->channel_init_wait
, HZ
);
458 /* Check the response */
459 if (init_packet
->msg
.v1_msg
.
460 send_send_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
461 dev_err(&device
->device
, "Unable to complete send buffer "
462 "initialzation with NetVsp - status %d",
463 init_packet
->msg
.v1_msg
.
464 send_send_buf_complete
.status
);
469 net_device
->send_section_size
= init_packet
->
470 msg
.v1_msg
.send_send_buf_complete
.section_size
;
475 netvsc_destroy_send_buf(net_device
);
478 put_net_device(device
);
483 static int netvsc_connect_vsp(struct hv_device
*device
)
486 struct netvsc_device
*net_device
;
487 struct nvsp_message
*init_packet
;
490 net_device
= get_outbound_net_device(device
);
492 dev_err(&device
->device
, "unable to get net device..."
493 "device being destroyed?");
497 init_packet
= &net_device
->channel_init_pkt
;
499 memset(init_packet
, 0, sizeof(struct nvsp_message
));
500 init_packet
->hdr
.msg_type
= NVSP_MSG_TYPE_INIT
;
501 init_packet
->msg
.init_msg
.init
.min_protocol_ver
=
502 NVSP_MIN_PROTOCOL_VERSION
;
503 init_packet
->msg
.init_msg
.init
.max_protocol_ver
=
504 NVSP_MAX_PROTOCOL_VERSION
;
506 /* Send the init request */
507 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
508 sizeof(struct nvsp_message
),
509 (unsigned long)init_packet
,
511 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
516 t
= wait_for_completion_timeout(&net_device
->channel_init_wait
, HZ
);
523 if (init_packet
->msg
.init_msg
.init_complete
.status
!=
529 if (init_packet
->msg
.init_msg
.init_complete
.
530 negotiated_protocol_ver
!= NVSP_PROTOCOL_VERSION_1
) {
534 /* Send the ndis version */
535 memset(init_packet
, 0, sizeof(struct nvsp_message
));
537 ndis_version
= 0x00050000;
539 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_NDIS_VER
;
540 init_packet
->msg
.v1_msg
.
541 send_ndis_ver
.ndis_major_ver
=
542 (ndis_version
& 0xFFFF0000) >> 16;
543 init_packet
->msg
.v1_msg
.
544 send_ndis_ver
.ndis_minor_ver
=
545 ndis_version
& 0xFFFF;
547 /* Send the init request */
548 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
549 sizeof(struct nvsp_message
),
550 (unsigned long)init_packet
,
551 VM_PKT_DATA_INBAND
, 0);
557 /* Post the big receive buffer to NetVSP */
558 ret
= netvsc_init_recv_buf(device
);
560 ret
= netvsc_init_send_buf(device
);
563 put_net_device(device
);
567 static void netvsc_disconnect_vsp(struct netvsc_device
*net_device
)
569 netvsc_destroy_recv_buf(net_device
);
570 netvsc_destroy_send_buf(net_device
);
574 * netvsc_device_remove - Callback when the root bus device is removed
576 int netvsc_device_remove(struct hv_device
*device
)
578 struct netvsc_device
*net_device
;
579 struct hv_netvsc_packet
*netvsc_packet
, *pos
;
581 /* Stop outbound traffic ie sends and receives completions */
582 net_device
= release_outbound_net_device(device
);
584 dev_err(&device
->device
, "No net device present!!");
588 /* Wait for all send completions */
589 while (atomic_read(&net_device
->num_outstanding_sends
)) {
590 dev_err(&device
->device
,
591 "waiting for %d requests to complete...",
592 atomic_read(&net_device
->num_outstanding_sends
));
596 netvsc_disconnect_vsp(net_device
);
598 /* Stop inbound traffic ie receives and sends completions */
599 net_device
= release_inbound_net_device(device
);
601 /* At this point, no one should be accessing netDevice except in here */
602 dev_notice(&device
->device
, "net device safe to remove");
604 /* Now, we can close the channel safely */
605 vmbus_close(device
->channel
);
607 /* Release all resources */
608 list_for_each_entry_safe(netvsc_packet
, pos
,
609 &net_device
->recv_pkt_list
, list_ent
) {
610 list_del(&netvsc_packet
->list_ent
);
611 kfree(netvsc_packet
);
614 free_net_device(net_device
);
618 static void netvsc_send_completion(struct hv_device
*device
,
619 struct vmpacket_descriptor
*packet
)
621 struct netvsc_device
*net_device
;
622 struct nvsp_message
*nvsp_packet
;
623 struct hv_netvsc_packet
*nvsc_packet
;
625 net_device
= get_inbound_net_device(device
);
627 dev_err(&device
->device
, "unable to get net device..."
628 "device being destroyed?");
632 nvsp_packet
= (struct nvsp_message
*)((unsigned long)packet
+
633 (packet
->offset8
<< 3));
635 if ((nvsp_packet
->hdr
.msg_type
== NVSP_MSG_TYPE_INIT_COMPLETE
) ||
636 (nvsp_packet
->hdr
.msg_type
==
637 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE
) ||
638 (nvsp_packet
->hdr
.msg_type
==
639 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE
)) {
640 /* Copy the response back */
641 memcpy(&net_device
->channel_init_pkt
, nvsp_packet
,
642 sizeof(struct nvsp_message
));
643 complete(&net_device
->channel_init_wait
);
644 } else if (nvsp_packet
->hdr
.msg_type
==
645 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
) {
646 /* Get the send context */
647 nvsc_packet
= (struct hv_netvsc_packet
*)(unsigned long)
650 /* Notify the layer above us */
651 nvsc_packet
->completion
.send
.send_completion(
652 nvsc_packet
->completion
.send
.send_completion_ctx
);
654 atomic_dec(&net_device
->num_outstanding_sends
);
656 dev_err(&device
->device
, "Unknown send completion packet type- "
657 "%d received!!", nvsp_packet
->hdr
.msg_type
);
660 put_net_device(device
);
663 int netvsc_send(struct hv_device
*device
,
664 struct hv_netvsc_packet
*packet
)
666 struct netvsc_device
*net_device
;
669 struct nvsp_message sendMessage
;
671 net_device
= get_outbound_net_device(device
);
673 dev_err(&device
->device
, "net device (%p) shutting down..."
674 "ignoring outbound packets", net_device
);
678 sendMessage
.hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RNDIS_PKT
;
679 if (packet
->is_data_pkt
) {
681 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.channel_type
= 0;
683 /* 1 is RMC_CONTROL; */
684 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.channel_type
= 1;
687 /* Not using send buffer section */
688 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.send_buf_section_index
=
690 sendMessage
.msg
.v1_msg
.send_rndis_pkt
.send_buf_section_size
= 0;
692 if (packet
->page_buf_cnt
) {
693 ret
= vmbus_sendpacket_pagebuffer(device
->channel
,
695 packet
->page_buf_cnt
,
697 sizeof(struct nvsp_message
),
698 (unsigned long)packet
);
700 ret
= vmbus_sendpacket(device
->channel
, &sendMessage
,
701 sizeof(struct nvsp_message
),
702 (unsigned long)packet
,
704 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
709 dev_err(&device
->device
, "Unable to send packet %p ret %d",
712 atomic_inc(&net_device
->num_outstanding_sends
);
713 put_net_device(device
);
717 static void netvsc_send_recv_completion(struct hv_device
*device
,
720 struct nvsp_message recvcompMessage
;
724 recvcompMessage
.hdr
.msg_type
=
725 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
;
727 /* FIXME: Pass in the status */
728 recvcompMessage
.msg
.v1_msg
.send_rndis_pkt_complete
.status
=
732 /* Send the completion */
733 ret
= vmbus_sendpacket(device
->channel
, &recvcompMessage
,
734 sizeof(struct nvsp_message
), transaction_id
,
739 } else if (ret
== -1) {
740 /* no more room...wait a bit and attempt to retry 3 times */
742 dev_err(&device
->device
, "unable to send receive completion pkt"
743 " (tid %llx)...retrying %d", transaction_id
, retries
);
747 goto retry_send_cmplt
;
749 dev_err(&device
->device
, "unable to send receive "
750 "completion pkt (tid %llx)...give up retrying",
754 dev_err(&device
->device
, "unable to send receive "
755 "completion pkt - %llx", transaction_id
);
759 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
760 static void netvsc_receive_completion(void *context
)
762 struct hv_netvsc_packet
*packet
= context
;
763 struct hv_device
*device
= (struct hv_device
*)packet
->device
;
764 struct netvsc_device
*net_device
;
765 u64 transaction_id
= 0;
766 bool fsend_receive_comp
= false;
770 * Even though it seems logical to do a GetOutboundNetDevice() here to
771 * send out receive completion, we are using GetInboundNetDevice()
772 * since we may have disable outbound traffic already.
774 net_device
= get_inbound_net_device(device
);
776 dev_err(&device
->device
, "unable to get net device..."
777 "device being destroyed?");
781 /* Overloading use of the lock. */
782 spin_lock_irqsave(&net_device
->recv_pkt_list_lock
, flags
);
784 packet
->xfer_page_pkt
->count
--;
787 * Last one in the line that represent 1 xfer page packet.
788 * Return the xfer page packet itself to the freelist
790 if (packet
->xfer_page_pkt
->count
== 0) {
791 fsend_receive_comp
= true;
792 transaction_id
= packet
->completion
.recv
.recv_completion_tid
;
793 list_add_tail(&packet
->xfer_page_pkt
->list_ent
,
794 &net_device
->recv_pkt_list
);
798 /* Put the packet back */
799 list_add_tail(&packet
->list_ent
, &net_device
->recv_pkt_list
);
800 spin_unlock_irqrestore(&net_device
->recv_pkt_list_lock
, flags
);
802 /* Send a receive completion for the xfer page packet */
803 if (fsend_receive_comp
)
804 netvsc_send_recv_completion(device
, transaction_id
);
806 put_net_device(device
);
809 static void netvsc_receive(struct hv_device
*device
,
810 struct vmpacket_descriptor
*packet
)
812 struct netvsc_device
*net_device
;
813 struct vmtransfer_page_packet_header
*vmxferpage_packet
;
814 struct nvsp_message
*nvsp_packet
;
815 struct hv_netvsc_packet
*netvsc_packet
= NULL
;
817 unsigned long end
, end_virtual
;
818 /* struct netvsc_driver *netvscDriver; */
819 struct xferpage_packet
*xferpage_packet
= NULL
;
821 int count
= 0, bytes_remain
= 0;
826 net_device
= get_inbound_net_device(device
);
828 dev_err(&device
->device
, "unable to get net device..."
829 "device being destroyed?");
834 * All inbound packets other than send completion should be xfer page
837 if (packet
->type
!= VM_PKT_DATA_USING_XFER_PAGES
) {
838 dev_err(&device
->device
, "Unknown packet type received - %d",
840 put_net_device(device
);
844 nvsp_packet
= (struct nvsp_message
*)((unsigned long)packet
+
845 (packet
->offset8
<< 3));
847 /* Make sure this is a valid nvsp packet */
848 if (nvsp_packet
->hdr
.msg_type
!=
849 NVSP_MSG1_TYPE_SEND_RNDIS_PKT
) {
850 dev_err(&device
->device
, "Unknown nvsp packet type received-"
851 " %d", nvsp_packet
->hdr
.msg_type
);
852 put_net_device(device
);
856 vmxferpage_packet
= (struct vmtransfer_page_packet_header
*)packet
;
858 if (vmxferpage_packet
->xfer_pageset_id
!= NETVSC_RECEIVE_BUFFER_ID
) {
859 dev_err(&device
->device
, "Invalid xfer page set id - "
860 "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID
,
861 vmxferpage_packet
->xfer_pageset_id
);
862 put_net_device(device
);
867 * Grab free packets (range count + 1) to represent this xfer
868 * page packet. +1 to represent the xfer page packet itself.
869 * We grab it here so that we know exactly how many we can
872 spin_lock_irqsave(&net_device
->recv_pkt_list_lock
, flags
);
873 while (!list_empty(&net_device
->recv_pkt_list
)) {
874 list_move_tail(net_device
->recv_pkt_list
.next
, &listHead
);
875 if (++count
== vmxferpage_packet
->range_cnt
+ 1)
878 spin_unlock_irqrestore(&net_device
->recv_pkt_list_lock
, flags
);
881 * We need at least 2 netvsc pkts (1 to represent the xfer
882 * page and at least 1 for the range) i.e. we can handled
883 * some of the xfer page packet ranges...
886 dev_err(&device
->device
, "Got only %d netvsc pkt...needed "
887 "%d pkts. Dropping this xfer page packet completely!",
888 count
, vmxferpage_packet
->range_cnt
+ 1);
890 /* Return it to the freelist */
891 spin_lock_irqsave(&net_device
->recv_pkt_list_lock
, flags
);
892 for (i
= count
; i
!= 0; i
--) {
893 list_move_tail(listHead
.next
,
894 &net_device
->recv_pkt_list
);
896 spin_unlock_irqrestore(&net_device
->recv_pkt_list_lock
,
899 netvsc_send_recv_completion(device
,
900 vmxferpage_packet
->d
.trans_id
);
902 put_net_device(device
);
906 /* Remove the 1st packet to represent the xfer page packet itself */
907 xferpage_packet
= (struct xferpage_packet
*)listHead
.next
;
908 list_del(&xferpage_packet
->list_ent
);
910 /* This is how much we can satisfy */
911 xferpage_packet
->count
= count
- 1;
913 if (xferpage_packet
->count
!= vmxferpage_packet
->range_cnt
) {
914 dev_err(&device
->device
, "Needed %d netvsc pkts to satisy "
915 "this xfer page...got %d",
916 vmxferpage_packet
->range_cnt
, xferpage_packet
->count
);
919 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
920 for (i
= 0; i
< (count
- 1); i
++) {
921 netvsc_packet
= (struct hv_netvsc_packet
*)listHead
.next
;
922 list_del(&netvsc_packet
->list_ent
);
924 /* Initialize the netvsc packet */
925 netvsc_packet
->xfer_page_pkt
= xferpage_packet
;
926 netvsc_packet
->completion
.recv
.recv_completion
=
927 netvsc_receive_completion
;
928 netvsc_packet
->completion
.recv
.recv_completion_ctx
=
930 netvsc_packet
->device
= device
;
931 /* Save this so that we can send it back */
932 netvsc_packet
->completion
.recv
.recv_completion_tid
=
933 vmxferpage_packet
->d
.trans_id
;
935 netvsc_packet
->total_data_buflen
=
936 vmxferpage_packet
->ranges
[i
].byte_count
;
937 netvsc_packet
->page_buf_cnt
= 1;
939 netvsc_packet
->page_buf
[0].len
=
940 vmxferpage_packet
->ranges
[i
].byte_count
;
942 start
= virt_to_phys((void *)((unsigned long)net_device
->
943 recv_buf
+ vmxferpage_packet
->ranges
[i
].byte_offset
));
945 netvsc_packet
->page_buf
[0].pfn
= start
>> PAGE_SHIFT
;
946 end_virtual
= (unsigned long)net_device
->recv_buf
947 + vmxferpage_packet
->ranges
[i
].byte_offset
948 + vmxferpage_packet
->ranges
[i
].byte_count
- 1;
949 end
= virt_to_phys((void *)end_virtual
);
951 /* Calculate the page relative offset */
952 netvsc_packet
->page_buf
[0].offset
=
953 vmxferpage_packet
->ranges
[i
].byte_offset
&
955 if ((end
>> PAGE_SHIFT
) != (start
>> PAGE_SHIFT
)) {
956 /* Handle frame across multiple pages: */
957 netvsc_packet
->page_buf
[0].len
=
958 (netvsc_packet
->page_buf
[0].pfn
<<
961 bytes_remain
= netvsc_packet
->total_data_buflen
-
962 netvsc_packet
->page_buf
[0].len
;
963 for (j
= 1; j
< NETVSC_PACKET_MAXPAGE
; j
++) {
964 netvsc_packet
->page_buf
[j
].offset
= 0;
965 if (bytes_remain
<= PAGE_SIZE
) {
966 netvsc_packet
->page_buf
[j
].len
=
970 netvsc_packet
->page_buf
[j
].len
=
972 bytes_remain
-= PAGE_SIZE
;
974 netvsc_packet
->page_buf
[j
].pfn
=
975 virt_to_phys((void *)(end_virtual
-
976 bytes_remain
)) >> PAGE_SHIFT
;
977 netvsc_packet
->page_buf_cnt
++;
978 if (bytes_remain
== 0)
983 /* Pass it to the upper layer */
984 rndis_filter_receive(device
, netvsc_packet
);
986 netvsc_receive_completion(netvsc_packet
->
987 completion
.recv
.recv_completion_ctx
);
990 put_net_device(device
);
993 static void netvsc_channel_cb(void *context
)
996 struct hv_device
*device
= context
;
997 struct netvsc_device
*net_device
;
1000 unsigned char *packet
;
1001 struct vmpacket_descriptor
*desc
;
1002 unsigned char *buffer
;
1003 int bufferlen
= NETVSC_PACKET_SIZE
;
1005 packet
= kzalloc(NETVSC_PACKET_SIZE
* sizeof(unsigned char),
1011 net_device
= get_inbound_net_device(device
);
1013 dev_err(&device
->device
, "net device (%p) shutting down..."
1014 "ignoring inbound packets", net_device
);
1019 ret
= vmbus_recvpacket_raw(device
->channel
, buffer
, bufferlen
,
1020 &bytes_recvd
, &request_id
);
1022 if (bytes_recvd
> 0) {
1023 desc
= (struct vmpacket_descriptor
*)buffer
;
1024 switch (desc
->type
) {
1026 netvsc_send_completion(device
, desc
);
1029 case VM_PKT_DATA_USING_XFER_PAGES
:
1030 netvsc_receive(device
, desc
);
1034 dev_err(&device
->device
,
1035 "unhandled packet type %d, "
1036 "tid %llx len %d\n",
1037 desc
->type
, request_id
,
1043 if (bufferlen
> NETVSC_PACKET_SIZE
) {
1046 bufferlen
= NETVSC_PACKET_SIZE
;
1050 if (bufferlen
> NETVSC_PACKET_SIZE
) {
1053 bufferlen
= NETVSC_PACKET_SIZE
;
1058 } else if (ret
== -2) {
1059 /* Handle large packet */
1060 buffer
= kmalloc(bytes_recvd
, GFP_ATOMIC
);
1061 if (buffer
== NULL
) {
1062 /* Try again next time around */
1063 dev_err(&device
->device
,
1064 "unable to allocate buffer of size "
1065 "(%d)!!", bytes_recvd
);
1069 bufferlen
= bytes_recvd
;
1073 put_net_device(device
);
1080 * netvsc_device_add - Callback when the device belonging to this
1083 int netvsc_device_add(struct hv_device
*device
, void *additional_info
)
1088 ((struct netvsc_device_info
*)additional_info
)->ring_size
;
1089 struct netvsc_device
*net_device
;
1090 struct hv_netvsc_packet
*packet
, *pos
;
1092 net_device
= alloc_net_device(device
);
1098 /* Initialize the NetVSC channel extension */
1099 net_device
->recv_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE
;
1100 spin_lock_init(&net_device
->recv_pkt_list_lock
);
1102 net_device
->send_buf_size
= NETVSC_SEND_BUFFER_SIZE
;
1104 INIT_LIST_HEAD(&net_device
->recv_pkt_list
);
1106 for (i
= 0; i
< NETVSC_RECEIVE_PACKETLIST_COUNT
; i
++) {
1107 packet
= kzalloc(sizeof(struct hv_netvsc_packet
) +
1108 (NETVSC_RECEIVE_SG_COUNT
*
1109 sizeof(struct hv_page_buffer
)), GFP_KERNEL
);
1113 list_add_tail(&packet
->list_ent
,
1114 &net_device
->recv_pkt_list
);
1116 init_completion(&net_device
->channel_init_wait
);
1118 /* Open the channel */
1119 ret
= vmbus_open(device
->channel
, ring_size
* PAGE_SIZE
,
1120 ring_size
* PAGE_SIZE
, NULL
, 0,
1121 netvsc_channel_cb
, device
);
1124 dev_err(&device
->device
, "unable to open channel: %d", ret
);
1129 /* Channel is opened */
1130 pr_info("hv_netvsc channel opened successfully");
1132 /* Connect with the NetVsp */
1133 ret
= netvsc_connect_vsp(device
);
1135 dev_err(&device
->device
,
1136 "unable to connect to NetVSP - %d", ret
);
1144 /* Now, we can close the channel safely */
1145 vmbus_close(device
->channel
);
1150 list_for_each_entry_safe(packet
, pos
,
1151 &net_device
->recv_pkt_list
,
1153 list_del(&packet
->list_ent
);
1157 release_outbound_net_device(device
);
1158 release_inbound_net_device(device
);
1160 free_net_device(net_device
);
1167 * netvsc_initialize - Main entry point
1169 int netvsc_initialize(struct hv_driver
*drv
)
1172 drv
->name
= driver_name
;
1173 memcpy(&drv
->dev_type
, &netvsc_device_type
, sizeof(struct hv_guid
));