16 #include <linux/kernel.h>
20 #include <linux/device.h>
22 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <asm/uv/uv_hub.h>
26 #if defined CONFIG_X86_64
29 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
30 #include <asm/sn/intr.h>
33 #include "../sgi-gru/gru.h"
34 #include "../sgi-gru/grukservices.h"
37 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
54 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
55 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
56 XPC_ACTIVATE_MSG_SIZE_UV)
57 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
59 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
60 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
61 XPC_NOTIFY_MSG_SIZE_UV)
62 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
64 static int xpc_mq_node = -1;
70 xpc_setup_partitions_uv(
void)
86 xpc_teardown_partitions_uv(
void)
90 unsigned long irq_flags;
99 spin_unlock_irqrestore(&part_uv->
flags_lock, irq_flags);
111 int mmr_pnode = uv_blade_to_pnode(mq->
mmr_blade);
113 #if defined CONFIG_X86_64
121 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
132 #error not a supported configuration
141 #if defined CONFIG_X86_64
144 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
146 unsigned long mmr_value;
148 mmr_pnode = uv_blade_to_pnode(mq->
mmr_blade);
149 mmr_value = 1
UL << 16;
151 uv_write_global_mmr64(mmr_pnode, mq->
mmr_offset, mmr_value);
153 #error not a supported configuration
162 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
163 int mmr_pnode = uv_blade_to_pnode(mq->
mmr_blade);
165 ret = sn_mq_watchlist_alloc(mmr_pnode, (
void *)uv_gpa(mq->
address),
172 #elif defined CONFIG_X86_64
181 #error not a supported configuration
192 int mmr_pnode = uv_blade_to_pnode(mq->
mmr_blade);
194 #if defined CONFIG_X86_64
197 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
201 #error not a supported configuration
206 xpc_create_gru_mq_uv(
unsigned int mq_size,
int cpu,
char *irq_name,
221 "a xpc_gru_mq_uv structure\n");
230 "a gru_message_queue_desc structure\n");
242 page = alloc_pages_exact_node(nid,
247 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
254 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
258 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
294 xpc_release_gru_mq_irq_uv(mq);
296 xpc_gru_mq_watchlist_free_uv(mq);
310 unsigned int mq_size;
321 xpc_release_gru_mq_irq_uv(mq);
324 xpc_gru_mq_watchlist_free_uv(mq);
348 "error=MQE_QUEUE_FULL\n");
354 "error=MQE_CONGESTION\n");
369 xpc_process_activate_IRQ_rcvd_uv(
void)
371 unsigned long irq_flags;
382 if (part->
sn.
uv.act_state_req == 0)
388 act_state_req = part->
sn.
uv.act_state_req;
389 part->
sn.
uv.act_state_req = 0;
390 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
415 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
423 int *wakeup_hb_checker)
425 unsigned long irq_flags;
431 switch (msg_hdr->
type) {
463 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
465 (*wakeup_hb_checker)++;
479 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
481 (*wakeup_hb_checker)++;
498 spin_unlock_irqrestore(&part->
chctl_lock, irq_flags);
500 xpc_wakeup_channel_mgr(part);
515 spin_unlock_irqrestore(&part->
chctl_lock, irq_flags);
517 xpc_wakeup_channel_mgr(part);
535 spin_unlock_irqrestore(&part->
chctl_lock, irq_flags);
537 xpc_wakeup_channel_mgr(part);
555 spin_unlock_irqrestore(&part->
chctl_lock, irq_flags);
557 xpc_wakeup_channel_mgr(part);
570 spin_unlock_irqrestore(&part->
chctl_lock, irq_flags);
572 xpc_wakeup_channel_mgr(part);
577 spin_unlock_irqrestore(&part_uv->
flags_lock, irq_flags);
583 spin_unlock_irqrestore(&part_uv->
flags_lock, irq_flags);
596 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
598 (*wakeup_hb_checker)++;
612 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
614 (*wakeup_hb_checker)++;
619 xpc_handle_activate_IRQ_uv(
int irq,
void *
dev_id)
624 int wakeup_hb_checker = 0;
635 "received invalid partid=0x%x in message\n",
640 part_referenced = xpc_part_ref(part);
641 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
645 xpc_part_deref(part);
651 if (wakeup_hb_checker)
659 unsigned long gru_mq_desc_gpa)
672 xpc_send_activate_IRQ_uv(
struct xpc_partition *part,
void *msg,
size_t msg_size,
678 unsigned long irq_flags;
692 if (gru_mq_desc ==
NULL) {
693 gru_mq_desc =
kmalloc(
sizeof(
struct
696 if (gru_mq_desc ==
NULL) {
703 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
705 activate_gru_mq_desc_gpa);
711 spin_unlock_irqrestore(&part_uv->
flags_lock, irq_flags);
728 xpc_send_activate_IRQ_part_uv(
struct xpc_partition *part,
void *msg,
729 size_t msg_size,
int msg_type)
733 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
739 xpc_send_activate_IRQ_ch_uv(
struct xpc_channel *ch,
unsigned long *irq_flags,
740 void *msg,
size_t msg_size,
int msg_type)
745 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
747 if (irq_flags !=
NULL)
748 spin_unlock_irqrestore(&ch->
lock, *irq_flags);
752 if (irq_flags !=
NULL)
758 xpc_send_local_activate_IRQ_uv(
struct xpc_partition *part,
int act_state_req)
760 unsigned long irq_flags;
773 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
779 xpc_get_partition_rsvd_page_pa_uv(
void *
buf,
u64 *
cookie,
unsigned long *rp_pa,
785 #if defined CONFIG_X86_64
795 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
796 status = sn_partition_reserved_page_pa((
u64)buf, cookie, rp_pa, len);
805 #error not a supported configuration
816 rp->
sn.
uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
817 rp->
sn.
uv.activate_gru_mq_desc_gpa =
823 xpc_allow_hb_uv(
short partid)
828 xpc_disallow_hb_uv(
short partid)
833 xpc_disallow_all_hbs_uv(
void)
838 xpc_increment_heartbeat_uv(
void)
840 xpc_heartbeat_uv->
value++;
844 xpc_offline_heartbeat_uv(
void)
846 xpc_increment_heartbeat_uv();
851 xpc_online_heartbeat_uv(
void)
853 xpc_increment_heartbeat_uv();
858 xpc_heartbeat_init_uv(
void)
860 xpc_heartbeat_uv->
value = 1;
865 xpc_heartbeat_exit_uv(
void)
867 xpc_offline_heartbeat_uv();
878 sizeof(
struct xpc_heartbeat_uv));
893 xpc_request_partition_activation_uv(
struct xpc_rsvd_page *remote_rp,
894 unsigned long remote_rp_gpa,
int nasid)
902 part->
sn.
uv.heartbeat_gpa = remote_rp->
sn.
uv.heartbeat_gpa;
903 part->
sn.
uv.activate_gru_mq_desc_gpa =
904 remote_rp->
sn.
uv.activate_gru_mq_desc_gpa;
913 msg.activate_gru_mq_desc_gpa =
915 xpc_send_activate_IRQ_part_uv(part, &msg,
sizeof(msg),
924 xpc_request_partition_reactivation_uv(
struct xpc_partition *part)
930 xpc_request_partition_deactivation_uv(
struct xpc_partition *part)
942 xpc_send_activate_IRQ_part_uv(part, &msg,
sizeof(msg),
948 xpc_cancel_partition_deactivation_request_uv(
struct xpc_partition *part)
966 unsigned long irq_flags;
981 spin_unlock_irqrestore(&head->
lock, irq_flags);
989 unsigned long irq_flags;
994 head->
last->next = last;
999 spin_unlock_irqrestore(&head->
lock, irq_flags);
1017 for (ch_number = 0; ch_number < part->
nchannels; ch_number++) {
1018 ch_uv = &part->
channels[ch_number].sn.uv;
1047 xpc_send_activate_IRQ_part_uv(part, &msg,
sizeof(msg),
1069 unsigned long irq_flags;
1073 chctl = part->
chctl;
1074 if (chctl.all_flags != 0)
1077 spin_unlock_irqrestore(&part->
chctl_lock, irq_flags);
1078 return chctl.all_flags;
1082 xpc_allocate_send_msg_slot_uv(
struct xpc_channel *ch)
1086 unsigned long irq_flags;
1097 for (entry = 0; entry < nentries; entry++) {
1106 if (nentries < ch->local_nentries)
1108 spin_unlock_irqrestore(&ch->
lock, irq_flags);
1116 xpc_allocate_recv_msg_slot_uv(
struct xpc_channel *ch)
1120 unsigned long irq_flags;
1131 for (entry = 0; entry < nentries; entry++) {
1135 msg_slot->
hdr.msg_slot_number =
entry;
1139 if (nentries < ch->remote_nentries)
1141 spin_unlock_irqrestore(&ch->
lock, irq_flags);
1152 xpc_setup_msg_structures_uv(
struct xpc_channel *ch)
1165 ret = xpc_allocate_send_msg_slot_uv(ch);
1168 ret = xpc_allocate_recv_msg_slot_uv(ch);
1182 xpc_teardown_msg_structures_uv(
struct xpc_channel *ch)
1200 xpc_send_chctl_closerequest_uv(
struct xpc_channel *ch,
unsigned long *irq_flags)
1206 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg,
sizeof(msg),
1211 xpc_send_chctl_closereply_uv(
struct xpc_channel *ch,
unsigned long *irq_flags)
1216 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg,
sizeof(msg),
1221 xpc_send_chctl_openrequest_uv(
struct xpc_channel *ch,
unsigned long *irq_flags)
1228 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg,
sizeof(msg),
1233 xpc_send_chctl_openreply_uv(
struct xpc_channel *ch,
unsigned long *irq_flags)
1240 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->
gru_mq_desc);
1241 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg,
sizeof(msg),
1246 xpc_send_chctl_opencomplete_uv(
struct xpc_channel *ch,
unsigned long *irq_flags)
1251 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg,
sizeof(msg),
1256 xpc_send_chctl_local_msgrequest_uv(
struct xpc_partition *part,
int ch_number)
1258 unsigned long irq_flags;
1262 spin_unlock_irqrestore(&part->
chctl_lock, irq_flags);
1264 xpc_wakeup_channel_mgr(part);
1268 xpc_save_remote_msgqueue_pa_uv(
struct xpc_channel *ch,
1269 unsigned long gru_mq_desc_gpa)
1279 xpc_indicate_partition_engaged_uv(
struct xpc_partition *part)
1283 xpc_send_activate_IRQ_part_uv(part, &msg,
sizeof(msg),
1288 xpc_indicate_partition_disengaged_uv(
struct xpc_partition *part)
1292 xpc_send_activate_IRQ_part_uv(part, &msg,
sizeof(msg),
1297 xpc_assume_partition_disengaged_uv(
short partid)
1300 unsigned long irq_flags;
1304 spin_unlock_irqrestore(&part_uv->
flags_lock, irq_flags);
1308 xpc_partition_engaged_uv(
short partid)
1314 xpc_any_partition_engaged_uv(
void)
1336 entry = xpc_get_fifo_entry_uv(&ch->
sn.
uv.msg_slot_free_list);
1349 *address_of_msg_slot = msg_slot;
1357 xpc_put_fifo_entry_uv(&ch->
sn.
uv.msg_slot_free_list, &msg_slot->
next);
1376 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1382 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1388 xpc_handle_notify_mq_ack_uv(
struct xpc_channel *ch,
1394 msg_slot = &ch->
sn.
uv.send_msg_slots[
entry];
1402 xpc_free_msg_slot_uv(ch, msg_slot);
1413 unsigned long irq_flags;
1414 int ch_number = msg->
hdr.ch_number;
1418 "channel number=0x%x in message from partid=%d\n",
1427 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1434 xpc_msgqueue_ref(ch);
1437 xpc_msgqueue_deref(ch);
1442 if (msg->
hdr.size == 0) {
1443 xpc_handle_notify_mq_ack_uv(ch, msg);
1444 xpc_msgqueue_deref(ch);
1469 xpc_send_chctl_local_msgrequest_uv(part, ch->
number);
1471 xpc_msgqueue_deref(ch);
1475 xpc_handle_notify_IRQ_uv(
int irq,
void *dev_id)
1484 partid = msg->
hdr.partid;
1487 "invalid partid=0x%x in message\n", partid);
1491 if (xpc_part_ref(part)) {
1492 xpc_handle_notify_mq_msg_uv(part, msg);
1493 xpc_part_deref(part);
1504 xpc_n_of_deliverable_payloads_uv(
struct xpc_channel *ch)
1506 return xpc_n_of_fifo_entries_uv(&ch->
sn.
uv.recv_msg_list);
1510 xpc_process_msg_chctl_flags_uv(
struct xpc_partition *part,
int ch_number)
1513 int ndeliverable_payloads;
1515 xpc_msgqueue_ref(ch);
1517 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1519 if (ndeliverable_payloads > 0 &&
1526 xpc_msgqueue_deref(ch);
1546 xpc_msgqueue_ref(ch);
1557 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1581 ret = xpc_send_gru_msg(ch->
sn.
uv.cached_notify_gru_mq_desc, msg,
1607 xpc_free_msg_slot_uv(ch, msg_slot);
1609 xpc_msgqueue_deref(ch);
1621 xpc_notify_senders_of_disconnect_uv(
struct xpc_channel *ch)
1633 msg_slot = &ch->
sn.
uv.send_msg_slots[
entry];
1635 xpc_notify_sender_uv(ch, msg_slot, ch->
reason);
1643 xpc_get_deliverable_payload_uv(
struct xpc_channel *ch)
1647 void *payload =
NULL;
1650 entry = xpc_get_fifo_entry_uv(&ch->
sn.
uv.recv_msg_list);
1651 if (entry !=
NULL) {
1661 xpc_received_payload_uv(
struct xpc_channel *ch,
void *payload)
1673 ret = xpc_send_gru_msg(ch->
sn.
uv.cached_notify_gru_mq_desc, msg,
1680 .setup_partitions = xpc_setup_partitions_uv,
1681 .teardown_partitions = xpc_teardown_partitions_uv,
1682 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
1683 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
1684 .setup_rsvd_page = xpc_setup_rsvd_page_uv,
1686 .allow_hb = xpc_allow_hb_uv,
1687 .disallow_hb = xpc_disallow_hb_uv,
1688 .disallow_all_hbs = xpc_disallow_all_hbs_uv,
1689 .increment_heartbeat = xpc_increment_heartbeat_uv,
1690 .offline_heartbeat = xpc_offline_heartbeat_uv,
1691 .online_heartbeat = xpc_online_heartbeat_uv,
1692 .heartbeat_init = xpc_heartbeat_init_uv,
1693 .heartbeat_exit = xpc_heartbeat_exit_uv,
1694 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
1696 .request_partition_activation =
1697 xpc_request_partition_activation_uv,
1698 .request_partition_reactivation =
1699 xpc_request_partition_reactivation_uv,
1700 .request_partition_deactivation =
1701 xpc_request_partition_deactivation_uv,
1702 .cancel_partition_deactivation_request =
1703 xpc_cancel_partition_deactivation_request_uv,
1705 .setup_ch_structures = xpc_setup_ch_structures_uv,
1706 .teardown_ch_structures = xpc_teardown_ch_structures_uv,
1708 .make_first_contact = xpc_make_first_contact_uv,
1710 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
1711 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
1712 .send_chctl_closereply = xpc_send_chctl_closereply_uv,
1713 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
1714 .send_chctl_openreply = xpc_send_chctl_openreply_uv,
1715 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
1716 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
1718 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
1720 .setup_msg_structures = xpc_setup_msg_structures_uv,
1721 .teardown_msg_structures = xpc_teardown_msg_structures_uv,
1723 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
1724 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
1725 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
1726 .partition_engaged = xpc_partition_engaged_uv,
1727 .any_partition_engaged = xpc_any_partition_engaged_uv,
1729 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
1730 .send_payload = xpc_send_payload_uv,
1731 .get_deliverable_payload = xpc_get_deliverable_payload_uv,
1732 .received_payload = xpc_received_payload_uv,
1733 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1737 xpc_init_mq_node(
int nid)
1744 xpc_activate_mq_uv =
1747 xpc_handle_activate_IRQ_uv);
1748 if (!IS_ERR(xpc_activate_mq_uv))
1751 if (IS_ERR(xpc_activate_mq_uv)) {
1753 return PTR_ERR(xpc_activate_mq_uv);
1760 xpc_handle_notify_IRQ_uv);
1761 if (!IS_ERR(xpc_notify_mq_uv))
1764 if (IS_ERR(xpc_notify_mq_uv)) {
1765 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1767 return PTR_ERR(xpc_notify_mq_uv);
1788 if (xpc_mq_node < 0)
1790 ret = xpc_init_mq_node(nid);
1796 ret = xpc_init_mq_node(xpc_mq_node);
1808 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1809 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1813 MODULE_PARM_DESC(xpc_mq_node,
"Node number on which to allocate message queues.");