3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/prefetch.h>
34 #include "hyperv_vmbus.h"
36 #define VMBUS_PKT_TRAILER 8
39 * When we write to the ring buffer, check if the host needs to
40 * be signaled. Here is the details of this protocol:
42 * 1. The host guarantees that while it is draining the
43 * ring buffer, it will set the interrupt_mask to
44 * indicate it does not need to be interrupted when
47 * 2. The host guarantees that it will completely drain
48 * the ring buffer before exiting the read loop. Further,
49 * once the ring buffer is empty, it will clear the
50 * interrupt_mask and re-check to see if new data has
54 * It looks like Windows hosts have logic to deal with DOS attacks that
55 * can be triggered if it receives interrupts when it is not expecting
56 * the interrupt. The host expects interrupts only when the ring
57 * transitions from empty to non-empty (or full to non full on the guest
59 * So, base the signaling decision solely on the ring state until the
60 * host logic is fixed.
63 static void hv_signal_on_write(u32 old_write
, struct vmbus_channel
*channel
)
65 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
68 if (READ_ONCE(rbi
->ring_buffer
->interrupt_mask
))
71 /* check interrupt_mask before read_index */
74 * This is the only case we need to signal when the
75 * ring transitions from being empty to non-empty.
77 if (old_write
== READ_ONCE(rbi
->ring_buffer
->read_index
))
78 vmbus_setevent(channel
);
81 /* Get the next write location for the specified ring buffer. */
83 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
85 u32 next
= ring_info
->ring_buffer
->write_index
;
90 /* Set the next write location for the specified ring buffer. */
92 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
93 u32 next_write_location
)
95 ring_info
->ring_buffer
->write_index
= next_write_location
;
98 /* Set the next read location for the specified ring buffer. */
100 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
101 u32 next_read_location
)
103 ring_info
->ring_buffer
->read_index
= next_read_location
;
104 ring_info
->priv_read_index
= next_read_location
;
107 /* Get the size of the ring buffer. */
109 hv_get_ring_buffersize(const struct hv_ring_buffer_info
*ring_info
)
111 return ring_info
->ring_datasize
;
114 /* Get the read and write indices as u64 of the specified ring buffer. */
116 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
118 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
122 * Helper routine to copy from source to ring buffer.
123 * Assume there is enough room. Handles wrap-around in dest case only!!
125 static u32
hv_copyto_ringbuffer(
126 struct hv_ring_buffer_info
*ring_info
,
127 u32 start_write_offset
,
131 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
132 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
134 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
136 start_write_offset
+= srclen
;
137 if (start_write_offset
>= ring_buffer_size
)
138 start_write_offset
-= ring_buffer_size
;
140 return start_write_offset
;
143 /* Get various debug metrics for the specified ring buffer. */
144 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info
*ring_info
,
145 struct hv_ring_buffer_debug_info
*debug_info
)
147 u32 bytes_avail_towrite
;
148 u32 bytes_avail_toread
;
150 if (ring_info
->ring_buffer
) {
151 hv_get_ringbuffer_availbytes(ring_info
,
153 &bytes_avail_towrite
);
155 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
156 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
157 debug_info
->current_read_index
=
158 ring_info
->ring_buffer
->read_index
;
159 debug_info
->current_write_index
=
160 ring_info
->ring_buffer
->write_index
;
161 debug_info
->current_interrupt_mask
=
162 ring_info
->ring_buffer
->interrupt_mask
;
165 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo
);
167 /* Initialize the ring buffer. */
168 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
169 struct page
*pages
, u32 page_cnt
)
172 struct page
**pages_wraparound
;
174 BUILD_BUG_ON((sizeof(struct hv_ring_buffer
) != PAGE_SIZE
));
176 memset(ring_info
, 0, sizeof(struct hv_ring_buffer_info
));
179 * First page holds struct hv_ring_buffer, do wraparound mapping for
182 pages_wraparound
= kzalloc(sizeof(struct page
*) * (page_cnt
* 2 - 1),
184 if (!pages_wraparound
)
187 pages_wraparound
[0] = pages
;
188 for (i
= 0; i
< 2 * (page_cnt
- 1); i
++)
189 pages_wraparound
[i
+ 1] = &pages
[i
% (page_cnt
- 1) + 1];
191 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)
192 vmap(pages_wraparound
, page_cnt
* 2 - 1, VM_MAP
, PAGE_KERNEL
);
194 kfree(pages_wraparound
);
197 if (!ring_info
->ring_buffer
)
200 ring_info
->ring_buffer
->read_index
=
201 ring_info
->ring_buffer
->write_index
= 0;
203 /* Set the feature bit for enabling flow control. */
204 ring_info
->ring_buffer
->feature_bits
.value
= 1;
206 ring_info
->ring_size
= page_cnt
<< PAGE_SHIFT
;
207 ring_info
->ring_datasize
= ring_info
->ring_size
-
208 sizeof(struct hv_ring_buffer
);
210 spin_lock_init(&ring_info
->ring_lock
);
215 /* Cleanup the ring buffer. */
216 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
218 vunmap(ring_info
->ring_buffer
);
221 /* Write to the ring buffer. */
222 int hv_ringbuffer_write(struct vmbus_channel
*channel
,
223 const struct kvec
*kv_list
, u32 kv_count
)
226 u32 bytes_avail_towrite
;
227 u32 totalbytes_towrite
= sizeof(u64
);
228 u32 next_write_location
;
232 struct hv_ring_buffer_info
*outring_info
= &channel
->outbound
;
234 if (channel
->rescind
)
237 for (i
= 0; i
< kv_count
; i
++)
238 totalbytes_towrite
+= kv_list
[i
].iov_len
;
240 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
242 bytes_avail_towrite
= hv_get_bytes_to_write(outring_info
);
245 * If there is only room for the packet, assume it is full.
246 * Otherwise, the next time around, we think the ring buffer
247 * is empty since the read index == write index.
249 if (bytes_avail_towrite
<= totalbytes_towrite
) {
250 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
254 /* Write to the ring buffer */
255 next_write_location
= hv_get_next_write_location(outring_info
);
257 old_write
= next_write_location
;
259 for (i
= 0; i
< kv_count
; i
++) {
260 next_write_location
= hv_copyto_ringbuffer(outring_info
,
266 /* Set previous packet start */
267 prev_indices
= hv_get_ring_bufferindices(outring_info
);
269 next_write_location
= hv_copyto_ringbuffer(outring_info
,
274 /* Issue a full memory barrier before updating the write index */
277 /* Now, update the write location */
278 hv_set_next_write_location(outring_info
, next_write_location
);
281 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
283 hv_signal_on_write(old_write
, channel
);
285 if (channel
->rescind
)
291 int hv_ringbuffer_read(struct vmbus_channel
*channel
,
292 void *buffer
, u32 buflen
, u32
*buffer_actual_len
,
293 u64
*requestid
, bool raw
)
295 struct vmpacket_descriptor
*desc
;
296 u32 packetlen
, offset
;
298 if (unlikely(buflen
== 0))
301 *buffer_actual_len
= 0;
304 /* Make sure there is something to read */
305 desc
= hv_pkt_iter_first(channel
);
308 * No error is set when there is even no header, drivers are
309 * supposed to analyze buffer_actual_len.
314 offset
= raw
? 0 : (desc
->offset8
<< 3);
315 packetlen
= (desc
->len8
<< 3) - offset
;
316 *buffer_actual_len
= packetlen
;
317 *requestid
= desc
->trans_id
;
319 if (unlikely(packetlen
> buflen
))
322 /* since ring is double mapped, only one copy is necessary */
323 memcpy(buffer
, (const char *)desc
+ offset
, packetlen
);
325 /* Advance ring index to next packet descriptor */
326 __hv_pkt_iter_next(channel
, desc
);
328 /* Notify host of update */
329 hv_pkt_iter_close(channel
);
335 * Determine number of bytes available in ring buffer after
336 * the current iterator (priv_read_index) location.
338 * This is similar to hv_get_bytes_to_read but with private
339 * read index instead.
341 static u32
hv_pkt_iter_avail(const struct hv_ring_buffer_info
*rbi
)
343 u32 priv_read_loc
= rbi
->priv_read_index
;
344 u32 write_loc
= READ_ONCE(rbi
->ring_buffer
->write_index
);
346 if (write_loc
>= priv_read_loc
)
347 return write_loc
- priv_read_loc
;
349 return (rbi
->ring_datasize
- priv_read_loc
) + write_loc
;
353 * Get first vmbus packet from ring buffer after read_index
355 * If ring buffer is empty, returns NULL and no other action needed.
357 struct vmpacket_descriptor
*hv_pkt_iter_first(struct vmbus_channel
*channel
)
359 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
360 struct vmpacket_descriptor
*desc
;
362 if (hv_pkt_iter_avail(rbi
) < sizeof(struct vmpacket_descriptor
))
365 desc
= hv_get_ring_buffer(rbi
) + rbi
->priv_read_index
;
367 prefetch((char *)desc
+ (desc
->len8
<< 3));
371 EXPORT_SYMBOL_GPL(hv_pkt_iter_first
);
374 * Get next vmbus packet from ring buffer.
376 * Advances the current location (priv_read_index) and checks for more
377 * data. If the end of the ring buffer is reached, then return NULL.
379 struct vmpacket_descriptor
*
380 __hv_pkt_iter_next(struct vmbus_channel
*channel
,
381 const struct vmpacket_descriptor
*desc
)
383 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
384 u32 packetlen
= desc
->len8
<< 3;
385 u32 dsize
= rbi
->ring_datasize
;
387 /* bump offset to next potential packet */
388 rbi
->priv_read_index
+= packetlen
+ VMBUS_PKT_TRAILER
;
389 if (rbi
->priv_read_index
>= dsize
)
390 rbi
->priv_read_index
-= dsize
;
393 return hv_pkt_iter_first(channel
);
395 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next
);
398 * Update host ring buffer after iterating over packets.
400 void hv_pkt_iter_close(struct vmbus_channel
*channel
)
402 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
403 u32 orig_write_sz
= hv_get_bytes_to_write(rbi
);
406 * Make sure all reads are done before we update the read index since
407 * the writer may start writing to the read area once the read index
411 rbi
->ring_buffer
->read_index
= rbi
->priv_read_index
;
414 * Issue a full memory barrier before making the signaling decision.
415 * Here is the reason for having this barrier:
416 * If the reading of the pend_sz (in this function)
417 * were to be reordered and read before we commit the new read
418 * index (in the calling function) we could
419 * have a problem. If the host were to set the pending_sz after we
420 * have sampled pending_sz and go to sleep before we commit the
421 * read index, we could miss sending the interrupt. Issue a full
422 * memory barrier to address this.
426 /* If host has disabled notifications then skip */
427 if (rbi
->ring_buffer
->interrupt_mask
)
430 if (rbi
->ring_buffer
->feature_bits
.feat_pending_send_sz
) {
431 u32 pending_sz
= READ_ONCE(rbi
->ring_buffer
->pending_send_sz
);
434 * If there was space before we began iteration,
435 * then host was not blocked. Also handles case where
436 * pending_sz is zero then host has nothing pending
437 * and does not need to be signaled.
439 if (orig_write_sz
> pending_sz
)
442 /* If pending write will not fit, don't give false hope. */
443 if (hv_get_bytes_to_write(rbi
) < pending_sz
)
447 vmbus_setevent(channel
);
449 EXPORT_SYMBOL_GPL(hv_pkt_iter_close
);