21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
33 #define NUM_PAGES_SPANNED(addr, len) \
34 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
37 static int create_gpadl_header(
40 struct vmbus_channel_msginfo **
msginfo,
42 static void vmbus_setevent(
struct vmbus_channel *
channel);
48 static void vmbus_setevent(
struct vmbus_channel *
channel)
52 if (channel->offermsg.monitor_allocated) {
56 (channel->offermsg.child_relid >> 5));
63 [channel->monitor_grp].
pending);
74 struct vmbus_channel_debug_info *debuginfo)
77 u8 monitor_group = (
u8)channel->offermsg.monitorid / 32;
78 u8 monitor_offset = (
u8)channel->offermsg.monitorid % 32;
80 debuginfo->relid = channel->offermsg.child_relid;
81 debuginfo->state = channel->state;
82 memcpy(&debuginfo->interfacetype,
83 &channel->offermsg.offer.if_type,
sizeof(
uuid_le));
84 memcpy(&debuginfo->interface_instance,
85 &channel->offermsg.offer.if_instance,
90 debuginfo->monitorid = channel->offermsg.monitorid;
92 debuginfo->servermonitor_pending =
94 debuginfo->servermonitor_latency =
95 monitorpage->
latency[monitor_group][monitor_offset];
96 debuginfo->servermonitor_connectionid =
98 [monitor_offset].connectionid.u.id;
102 debuginfo->clientmonitor_pending =
104 debuginfo->clientmonitor_latency =
105 monitorpage->
latency[monitor_group][monitor_offset];
106 debuginfo->clientmonitor_connectionid =
108 [monitor_offset].connectionid.u.id;
118 u32 recv_ringbuffer_size,
void *userdata,
u32 userdatalen,
119 void (*onchannelcallback)(
void *
context),
void *context)
121 struct vmbus_channel_open_channel *
open_msg;
122 struct vmbus_channel_msginfo *open_info =
NULL;
127 newchannel->onchannel_callback = onchannelcallback;
128 newchannel->channel_callback_context =
context;
132 get_order(send_ringbuffer_size + recv_ringbuffer_size));
138 in = (
void *)((
unsigned long)out + send_ringbuffer_size);
140 newchannel->ringbuffer_pages =
out;
141 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
145 &newchannel->outbound, out, send_ringbuffer_size);
153 &newchannel->inbound, in, recv_ringbuffer_size);
161 newchannel->ringbuffer_gpadlhandle = 0;
164 newchannel->outbound.ring_buffer,
165 send_ringbuffer_size +
166 recv_ringbuffer_size,
167 &newchannel->ringbuffer_gpadlhandle);
175 open_info =
kmalloc(
sizeof(*open_info) +
176 sizeof(
struct vmbus_channel_open_channel),
183 init_completion(&open_info->waitevent);
185 open_msg = (
struct vmbus_channel_open_channel *)open_info->msg;
186 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
187 open_msg->openid = newchannel->offermsg.child_relid;
188 open_msg->child_relid = newchannel->offermsg.child_relid;
189 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
190 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
192 open_msg->server_contextarea_gpadlhandle = 0;
194 if (userdatalen > MAX_USER_DEFINED_BYTES) {
200 memcpy(open_msg->userdata, userdata, userdatalen);
208 sizeof(
struct vmbus_channel_open_channel));
220 if (open_info->response.open_result.status)
221 err = open_info->response.open_result.status;
237 get_order(send_ringbuffer_size + recv_ringbuffer_size));
246 static int create_gpadl_header(
void *kbuffer,
u32 size,
247 struct vmbus_channel_msginfo **
msginfo,
252 unsigned long long pfn;
253 struct vmbus_channel_gpadl_header *gpadl_header;
254 struct vmbus_channel_gpadl_body *gpadl_body;
255 struct vmbus_channel_msginfo *msgheader;
256 struct vmbus_channel_msginfo *msgbody =
NULL;
259 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
266 sizeof(
struct vmbus_channel_gpadl_header) -
268 pfncount = pfnsize /
sizeof(
u64);
270 if (pagecount > pfncount) {
273 msgsize =
sizeof(
struct vmbus_channel_msginfo) +
280 INIT_LIST_HEAD(&msgheader->submsglist);
281 msgheader->msgsize = msgsize;
283 gpadl_header = (
struct vmbus_channel_gpadl_header *)
285 gpadl_header->rangecount = 1;
286 gpadl_header->range_buflen =
sizeof(
struct gpa_range) +
287 pagecount *
sizeof(
u64);
288 gpadl_header->range[0].byte_offset = 0;
289 gpadl_header->range[0].byte_count =
size;
290 for (i = 0; i < pfncount; i++)
291 gpadl_header->range[0].pfn_array[i] = pfn+i;
292 *msginfo = msgheader;
296 pfnleft = pagecount - pfncount;
300 sizeof(
struct vmbus_channel_gpadl_body);
301 pfncount = pfnsize /
sizeof(
u64);
305 if (pfnleft > pfncount)
310 msgsize =
sizeof(
struct vmbus_channel_msginfo) +
312 pfncurr *
sizeof(
u64);
316 struct vmbus_channel_msginfo *
pos =
NULL;
317 struct vmbus_channel_msginfo *
tmp =
NULL;
322 &msgheader->submsglist,
332 msgbody->msgsize = msgsize;
335 (
struct vmbus_channel_gpadl_body *)msgbody->msg;
343 for (i = 0; i < pfncurr; i++)
344 gpadl_body->pfn[i] = pfn + pfnsum + i;
348 &msgheader->submsglist);
354 msgsize =
sizeof(
struct vmbus_channel_msginfo) +
358 if (msgheader ==
NULL)
360 msgheader->msgsize = msgsize;
362 gpadl_header = (
struct vmbus_channel_gpadl_header *)
364 gpadl_header->rangecount = 1;
365 gpadl_header->range_buflen =
sizeof(
struct gpa_range) +
366 pagecount *
sizeof(
u64);
367 gpadl_header->range[0].byte_offset = 0;
368 gpadl_header->range[0].byte_count =
size;
369 for (i = 0; i < pagecount; i++)
370 gpadl_header->range[0].pfn_array[i] = pfn+i;
372 *msginfo = msgheader;
392 u32 size,
u32 *gpadl_handle)
394 struct vmbus_channel_gpadl_header *gpadlmsg;
395 struct vmbus_channel_gpadl_body *gpadl_body;
396 struct vmbus_channel_msginfo *msginfo =
NULL;
397 struct vmbus_channel_msginfo *submsginfo;
400 u32 next_gpadl_handle;
408 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
412 init_completion(&msginfo->waitevent);
414 gpadlmsg = (
struct vmbus_channel_gpadl_header *)msginfo->msg;
415 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
416 gpadlmsg->child_relid = channel->offermsg.child_relid;
417 gpadlmsg->gpadl = next_gpadl_handle;
434 submsginfo = (
struct vmbus_channel_msginfo *)curr;
436 (
struct vmbus_channel_gpadl_body *)submsginfo->msg;
438 gpadl_body->header.msgtype =
439 CHANNELMSG_GPADL_BODY;
440 gpadl_body->gpadl = next_gpadl_handle;
443 submsginfo->msgsize -
444 sizeof(*submsginfo));
455 *gpadl_handle = gpadlmsg->gpadl;
472 struct vmbus_channel_gpadl_teardown *
msg;
473 struct vmbus_channel_msginfo *
info;
478 sizeof(
struct vmbus_channel_gpadl_teardown),
GFP_KERNEL);
482 init_completion(&info->waitevent);
484 msg = (
struct vmbus_channel_gpadl_teardown *)info->msg;
486 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
487 msg->child_relid = channel->offermsg.child_relid;
488 msg->gpadl = gpadl_handle;
495 sizeof(
struct vmbus_channel_gpadl_teardown));
516 struct vmbus_channel_close_channel *
msg;
522 channel->onchannel_callback =
NULL;
523 spin_unlock_irqrestore(&channel->inbound_lock, flags);
527 msg = &channel->close_msg.msg;
529 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
530 msg->child_relid = channel->offermsg.child_relid;
532 ret =
vmbus_post_msg(msg,
sizeof(
struct vmbus_channel_close_channel));
536 if (channel->ringbuffer_gpadlhandle)
538 channel->ringbuffer_gpadlhandle);
544 free_pages((
unsigned long)channel->ringbuffer_pages,
566 u32 bufferlen,
u64 requestid,
569 struct vmpacket_descriptor desc;
570 u32 packetlen =
sizeof(
struct vmpacket_descriptor) + bufferlen;
571 u32 packetlen_aligned =
ALIGN(packetlen,
sizeof(
u64));
573 u64 aligned_data = 0;
581 desc.offset8 =
sizeof(
struct vmpacket_descriptor) >> 3;
582 desc.len8 = (
u16)(packetlen_aligned >> 3);
583 desc.trans_id = requestid;
586 sg_set_buf(&bufferlist[0], &desc,
sizeof(
struct vmpacket_descriptor));
587 sg_set_buf(&bufferlist[1], buffer, bufferlen);
588 sg_set_buf(&bufferlist[2], &aligned_data,
589 packetlen_aligned - packetlen);
594 vmbus_setevent(channel);
605 struct hv_page_buffer pagebuffers[],
611 struct vmbus_channel_packet_page_buffer desc;
614 u32 packetlen_aligned;
616 u64 aligned_data = 0;
618 if (pagecount > MAX_PAGE_BUFFER_COUNT)
626 descsize =
sizeof(
struct vmbus_channel_packet_page_buffer) -
627 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
628 sizeof(
struct hv_page_buffer));
629 packetlen = descsize + bufferlen;
630 packetlen_aligned =
ALIGN(packetlen,
sizeof(
u64));
633 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
634 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
635 desc.dataoffset8 = descsize >> 3;
636 desc.length8 = (
u16)(packetlen_aligned >> 3);
637 desc.transactionid = requestid;
638 desc.rangecount = pagecount;
640 for (i = 0; i < pagecount; i++) {
641 desc.range[
i].len = pagebuffers[
i].len;
642 desc.range[
i].offset = pagebuffers[
i].offset;
643 desc.range[
i].pfn = pagebuffers[
i].pfn;
647 sg_set_buf(&bufferlist[0], &desc, descsize);
648 sg_set_buf(&bufferlist[1], buffer, bufferlen);
649 sg_set_buf(&bufferlist[2], &aligned_data,
650 packetlen_aligned - packetlen);
655 vmbus_setevent(channel);
666 struct hv_multipage_buffer *multi_pagebuffer,
670 struct vmbus_channel_packet_multipage_buffer desc;
673 u32 packetlen_aligned;
675 u64 aligned_data = 0;
677 multi_pagebuffer->len);
680 if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
687 descsize =
sizeof(
struct vmbus_channel_packet_multipage_buffer) -
688 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
690 packetlen = descsize + bufferlen;
691 packetlen_aligned =
ALIGN(packetlen,
sizeof(
u64));
695 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
696 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
697 desc.dataoffset8 = descsize >> 3;
698 desc.length8 = (
u16)(packetlen_aligned >> 3);
699 desc.transactionid = requestid;
702 desc.range.len = multi_pagebuffer->len;
703 desc.range.offset = multi_pagebuffer->offset;
705 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
706 pfncount *
sizeof(
u64));
709 sg_set_buf(&bufferlist[0], &desc, descsize);
710 sg_set_buf(&bufferlist[1], buffer, bufferlen);
711 sg_set_buf(&bufferlist[2], &aligned_data,
712 packetlen_aligned - packetlen);
717 vmbus_setevent(channel);
737 u32 bufferlen,
u32 *buffer_actual_len,
u64 *requestid)
739 struct vmpacket_descriptor desc;
744 *buffer_actual_len = 0;
749 sizeof(
struct vmpacket_descriptor));
753 packetlen = desc.len8 << 3;
754 userlen = packetlen - (desc.offset8 << 3);
756 *buffer_actual_len = userlen;
758 if (userlen > bufferlen) {
760 pr_err(
"Buffer too small - got %d needs %d\n",
765 *requestid = desc.trans_id;
769 (desc.offset8 << 3));
780 u32 bufferlen,
u32 *buffer_actual_len,
783 struct vmpacket_descriptor desc;
788 *buffer_actual_len = 0;
793 sizeof(
struct vmpacket_descriptor));
798 packetlen = desc.len8 << 3;
799 userlen = packetlen - (desc.offset8 << 3);
801 *buffer_actual_len = packetlen;
803 if (packetlen > bufferlen) {
804 pr_err(
"Buffer too small - needed %d bytes but "
805 "got space for only %d bytes\n",
806 packetlen, bufferlen);
810 *requestid = desc.trans_id;