24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
40 hv_get_next_write_location(
struct hv_ring_buffer_info *
ring_info)
42 u32 next = ring_info->ring_buffer->write_index;
54 hv_set_next_write_location(
struct hv_ring_buffer_info *
ring_info,
55 u32 next_write_location)
57 ring_info->ring_buffer->write_index = next_write_location;
66 hv_get_next_read_location(
struct hv_ring_buffer_info *
ring_info)
68 u32 next = ring_info->ring_buffer->read_index;
80 hv_get_next_readlocation_withoffset(
struct hv_ring_buffer_info *
ring_info,
83 u32 next = ring_info->ring_buffer->read_index;
86 next %= ring_info->ring_datasize;
99 hv_set_next_read_location(
struct hv_ring_buffer_info *
ring_info,
100 u32 next_read_location)
102 ring_info->ring_buffer->read_index = next_read_location;
113 hv_get_ring_buffer(
struct hv_ring_buffer_info *
ring_info)
115 return (
void *)ring_info->ring_buffer->buffer;
126 hv_get_ring_buffersize(
struct hv_ring_buffer_info *ring_info)
128 return ring_info->ring_datasize;
139 hv_get_ring_bufferindices(
struct hv_ring_buffer_info *ring_info)
141 return (
u64)ring_info->ring_buffer->write_index << 32;
152 static u32 hv_copyfrom_ringbuffer(
153 struct hv_ring_buffer_info *ring_info,
156 u32 start_read_offset)
164 if (destlen > ring_buffer_size - start_read_offset) {
165 frag_len = ring_buffer_size - start_read_offset;
167 memcpy(dest, ring_buffer + start_read_offset, frag_len);
168 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
171 memcpy(dest, ring_buffer + start_read_offset, destlen);
174 start_read_offset += destlen;
175 start_read_offset %= ring_buffer_size;
177 return start_read_offset;
189 static u32 hv_copyto_ringbuffer(
190 struct hv_ring_buffer_info *ring_info,
191 u32 start_write_offset,
195 void *ring_buffer = hv_get_ring_buffer(ring_info);
196 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
200 if (srclen > ring_buffer_size - start_write_offset) {
201 frag_len = ring_buffer_size - start_write_offset;
202 memcpy(ring_buffer + start_write_offset, src, frag_len);
203 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
205 memcpy(ring_buffer + start_write_offset, src, srclen);
207 start_write_offset += srclen;
208 start_write_offset %= ring_buffer_size;
210 return start_write_offset;
223 u32 bytes_avail_towrite;
224 u32 bytes_avail_toread;
226 if (ring_info->ring_buffer) {
227 hv_get_ringbuffer_availbytes(ring_info,
229 &bytes_avail_towrite);
231 debug_info->bytes_avail_toread = bytes_avail_toread;
232 debug_info->bytes_avail_towrite = bytes_avail_towrite;
233 debug_info->current_read_index =
234 ring_info->ring_buffer->read_index;
235 debug_info->current_write_index =
236 ring_info->ring_buffer->write_index;
237 debug_info->current_interrupt_mask =
238 ring_info->ring_buffer->interrupt_mask;
252 return rbi->ring_buffer->interrupt_mask;
265 if (
sizeof(
struct hv_ring_buffer) !=
PAGE_SIZE)
268 memset(ring_info, 0,
sizeof(
struct hv_ring_buffer_info));
270 ring_info->ring_buffer = (
struct hv_ring_buffer *)buffer;
271 ring_info->ring_buffer->read_index =
272 ring_info->ring_buffer->write_index = 0;
274 ring_info->ring_size =
buflen;
275 ring_info->ring_datasize = buflen -
sizeof(
struct hv_ring_buffer);
304 u32 bytes_avail_towrite;
305 u32 bytes_avail_toread;
306 u32 totalbytes_towrite = 0;
309 u32 next_write_location;
310 u64 prev_indices = 0;
315 totalbytes_towrite += sg->
length;
318 totalbytes_towrite +=
sizeof(
u64);
322 hv_get_ringbuffer_availbytes(outring_info,
324 &bytes_avail_towrite);
330 if (bytes_avail_towrite <= totalbytes_towrite) {
331 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
336 next_write_location = hv_get_next_write_location(outring_info);
340 next_write_location = hv_copyto_ringbuffer(outring_info,
347 prev_indices = hv_get_ring_bufferindices(outring_info);
349 next_write_location = hv_copyto_ringbuffer(outring_info,
358 hv_set_next_write_location(outring_info, next_write_location);
361 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
376 u32 bytes_avail_towrite;
377 u32 bytes_avail_toread;
378 u32 next_read_location = 0;
383 hv_get_ringbuffer_availbytes(Inring_info,
385 &bytes_avail_towrite);
388 if (bytes_avail_toread < buflen) {
390 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
396 next_read_location = hv_get_next_read_location(Inring_info);
398 next_read_location = hv_copyfrom_ringbuffer(Inring_info,
403 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
419 u32 bytes_avail_towrite;
420 u32 bytes_avail_toread;
421 u32 next_read_location = 0;
422 u64 prev_indices = 0;
430 hv_get_ringbuffer_availbytes(inring_info,
432 &bytes_avail_towrite);
435 if (bytes_avail_toread < buflen) {
436 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
442 hv_get_next_readlocation_withoffset(inring_info, offset);
444 next_read_location = hv_copyfrom_ringbuffer(inring_info,
449 next_read_location = hv_copyfrom_ringbuffer(inring_info,
460 hv_set_next_read_location(inring_info, next_read_location);
462 spin_unlock_irqrestore(&inring_info->ring_lock, flags);