Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xpc_sn2.c
Go to the documentation of this file.
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License. See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
7  */
8 
9 /*
10  * Cross Partition Communication (XPC) sn2-based functions.
11  *
12  * Architecture specific implementation of common functions.
13  *
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/slab.h>
18 #include <asm/uncached.h>
19 #include <asm/sn/mspec.h>
20 #include <asm/sn/sn_sal.h>
21 #include "xpc.h"
22 
23 /*
24  * Define the number of u64s required to represent all the C-brick nasids
25  * as a bitmap. The cross-partition kernel modules deal only with
26  * C-brick nasids, thus the need for bitmaps which don't account for
27  * odd-numbered (non C-brick) nasids.
28  */
29 #define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2)
30 #define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8)
31 #define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64)
32 
33 /*
34  * Memory for XPC's amo variables is allocated by the MSPEC driver. These
35  * pages are located in the lowest granule. The lowest granule uses 4k pages
36  * for cached references and an alternate TLB handler to never provide a
37  * cacheable mapping for the entire region. This will prevent speculative
38  * reading of cached copies of our lines from being issued which will cause
39  * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
40  * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
41  * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify
42  * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote
43  * partitions (i.e., XPCs) consider themselves currently engaged with the
44  * local XPC and 1 amo variable to request partition deactivation.
45  */
46 #define XPC_NOTIFY_IRQ_AMOS_SN2 0
47 #define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \
48  XP_MAX_NPARTITIONS_SN2)
49 #define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \
50  XP_NASID_MASK_WORDS_SN2)
51 #define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1)
52 
53 /*
54  * Buffer used to store a local copy of portions of a remote partition's
55  * reserved page (either its header and part_nasids mask, or its vars).
56  */
57 static void *xpc_remote_copy_buffer_base_sn2;
58 static char *xpc_remote_copy_buffer_sn2;
59 
60 static struct xpc_vars_sn2 *xpc_vars_sn2;
62 
63 static int
64 xpc_setup_partitions_sn2(void)
65 {
66  /* nothing needs to be done */
67  return 0;
68 }
69 
70 static void
71 xpc_teardown_partitions_sn2(void)
72 {
73  /* nothing needs to be done */
74 }
75 
76 /* SH_IPI_ACCESS shub register value on startup */
77 static u64 xpc_sh1_IPI_access_sn2;
78 static u64 xpc_sh2_IPI_access0_sn2;
79 static u64 xpc_sh2_IPI_access1_sn2;
80 static u64 xpc_sh2_IPI_access2_sn2;
81 static u64 xpc_sh2_IPI_access3_sn2;
82 
83 /*
84  * Change protections to allow IPI operations.
85  */
86 static void
87 xpc_allow_IPI_ops_sn2(void)
88 {
89  int node;
90  int nasid;
91 
92  /* !!! The following should get moved into SAL. */
93  if (is_shub2()) {
94  xpc_sh2_IPI_access0_sn2 =
96  xpc_sh2_IPI_access1_sn2 =
98  xpc_sh2_IPI_access2_sn2 =
100  xpc_sh2_IPI_access3_sn2 =
102 
103  for_each_online_node(node) {
104  nasid = cnodeid_to_nasid(node);
106  -1UL);
108  -1UL);
110  -1UL);
112  -1UL);
113  }
114  } else {
115  xpc_sh1_IPI_access_sn2 =
117 
118  for_each_online_node(node) {
119  nasid = cnodeid_to_nasid(node);
121  -1UL);
122  }
123  }
124 }
125 
126 /*
127  * Restrict protections to disallow IPI operations.
128  */
129 static void
130 xpc_disallow_IPI_ops_sn2(void)
131 {
132  int node;
133  int nasid;
134 
135  /* !!! The following should get moved into SAL. */
136  if (is_shub2()) {
137  for_each_online_node(node) {
138  nasid = cnodeid_to_nasid(node);
140  xpc_sh2_IPI_access0_sn2);
142  xpc_sh2_IPI_access1_sn2);
144  xpc_sh2_IPI_access2_sn2);
146  xpc_sh2_IPI_access3_sn2);
147  }
148  } else {
149  for_each_online_node(node) {
150  nasid = cnodeid_to_nasid(node);
152  xpc_sh1_IPI_access_sn2);
153  }
154  }
155 }
156 
157 /*
158  * The following set of functions are used for the sending and receiving of
159  * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
160  * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
161  * is associated with channel activity (SGI_XPC_NOTIFY).
162  */
163 
164 static u64
165 xpc_receive_IRQ_amo_sn2(struct amo *amo)
166 {
167  return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
168 }
169 
170 static enum xp_retval
171 xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid,
172  int vector)
173 {
174  int ret = 0;
175  unsigned long irq_flags;
176 
177  local_irq_save(irq_flags);
178 
179  FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
180  sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
181 
182  /*
183  * We must always use the nofault function regardless of whether we
184  * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
185  * didn't, we'd never know that the other partition is down and would
186  * keep sending IRQs and amos to it until the heartbeat times out.
187  */
188  ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
190 
191  local_irq_restore(irq_flags);
192 
193  return (ret == 0) ? xpSuccess : xpPioReadError;
194 }
195 
196 static struct amo *
197 xpc_init_IRQ_amo_sn2(int index)
198 {
199  struct amo *amo = xpc_vars_sn2->amos_page + index;
200 
201  (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */
202  return amo;
203 }
204 
205 /*
206  * Functions associated with SGI_XPC_ACTIVATE IRQ.
207  */
208 
209 /*
210  * Notify the heartbeat check thread that an activate IRQ has been received.
211  */
212 static irqreturn_t
213 xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
214 {
215  unsigned long irq_flags;
216 
217  spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
219  spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
220 
221  wake_up_interruptible(&xpc_activate_IRQ_wq);
222  return IRQ_HANDLED;
223 }
224 
225 /*
226  * Flag the appropriate amo variable and send an IRQ to the specified node.
227  */
228 static void
229 xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid,
230  int to_nasid, int to_phys_cpuid)
231 {
232  struct amo *amos = (struct amo *)__va(amos_page_pa +
234  sizeof(struct amo)));
235 
236  (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)],
237  BIT_MASK(from_nasid / 2), to_nasid,
238  to_phys_cpuid, SGI_XPC_ACTIVATE);
239 }
240 
241 static void
242 xpc_send_local_activate_IRQ_sn2(int from_nasid)
243 {
244  unsigned long irq_flags;
245  struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa +
247  sizeof(struct amo)));
248 
249  /* fake the sending and receipt of an activate IRQ from remote nasid */
250  FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable),
251  FETCHOP_OR, BIT_MASK(from_nasid / 2));
252 
253  spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
255  spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
256 
257  wake_up_interruptible(&xpc_activate_IRQ_wq);
258 }
259 
260 /*
261  * Functions associated with SGI_XPC_NOTIFY IRQ.
262  */
263 
264 /*
265  * Check to see if any chctl flags were sent from the specified partition.
266  */
267 static void
268 xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
269 {
270  union xpc_channel_ctl_flags chctl;
271  unsigned long irq_flags;
272 
273  chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2.
274  local_chctl_amo_va);
275  if (chctl.all_flags == 0)
276  return;
277 
278  spin_lock_irqsave(&part->chctl_lock, irq_flags);
279  part->chctl.all_flags |= chctl.all_flags;
280  spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
281 
282  dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
283  "0x%llx\n", XPC_PARTID(part), chctl.all_flags);
284 
285  xpc_wakeup_channel_mgr(part);
286 }
287 
288 /*
289  * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
290  * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
291  * than one partition, we use an amo structure per partition to indicate
292  * whether a partition has sent an IRQ or not. If it has, then wake up the
293  * associated kthread to handle it.
294  *
295  * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
296  * running on other partitions.
297  *
298  * Noteworthy Arguments:
299  *
300  * irq - Interrupt ReQuest number. NOT USED.
301  *
302  * dev_id - partid of IRQ's potential sender.
303  */
304 static irqreturn_t
305 xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
306 {
307  short partid = (short)(u64)dev_id;
308  struct xpc_partition *part = &xpc_partitions[partid];
309 
311 
312  if (xpc_part_ref(part)) {
313  xpc_check_for_sent_chctl_flags_sn2(part);
314 
315  xpc_part_deref(part);
316  }
317  return IRQ_HANDLED;
318 }
319 
320 /*
321  * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
322  * because the write to their associated amo variable completed after the IRQ
323  * was received.
324  */
325 static void
326 xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part)
327 {
328  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
329 
330  if (xpc_part_ref(part)) {
331  xpc_check_for_sent_chctl_flags_sn2(part);
332 
333  part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
336  xpc_part_deref(part);
337  }
338 }
339 
340 /*
341  * Send a notify IRQ to the remote partition that is associated with the
342  * specified channel.
343  */
344 static void
345 xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
346  char *chctl_flag_string, unsigned long *irq_flags)
347 {
348  struct xpc_partition *part = &xpc_partitions[ch->partid];
349  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
350  union xpc_channel_ctl_flags chctl = { 0 };
351  enum xp_retval ret;
352 
353  if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) {
354  chctl.flags[ch->number] = chctl_flag;
355  ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va,
356  chctl.all_flags,
357  part_sn2->notify_IRQ_nasid,
358  part_sn2->notify_IRQ_phys_cpuid,
360  dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
361  chctl_flag_string, ch->partid, ch->number, ret);
362  if (unlikely(ret != xpSuccess)) {
363  if (irq_flags != NULL)
364  spin_unlock_irqrestore(&ch->lock, *irq_flags);
365  XPC_DEACTIVATE_PARTITION(part, ret);
366  if (irq_flags != NULL)
367  spin_lock_irqsave(&ch->lock, *irq_flags);
368  }
369  }
370 }
371 
372 #define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
373  xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
374 
375 /*
376  * Make it look like the remote partition, which is associated with the
377  * specified channel, sent us a notify IRQ. This faked IRQ will be handled
378  * by xpc_check_for_dropped_notify_IRQ_sn2().
379  */
380 static void
381 xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
382  char *chctl_flag_string)
383 {
384  struct xpc_partition *part = &xpc_partitions[ch->partid];
385  union xpc_channel_ctl_flags chctl = { 0 };
386 
387  chctl.flags[ch->number] = chctl_flag;
388  FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va->
389  variable), FETCHOP_OR, chctl.all_flags);
390  dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
391  chctl_flag_string, ch->partid, ch->number);
392 }
393 
394 #define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
395  xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
396 
397 static void
398 xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch,
399  unsigned long *irq_flags)
400 {
401  struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
402 
403  args->reason = ch->reason;
405 }
406 
407 static void
408 xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
409 {
411 }
412 
413 static void
414 xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
415 {
416  struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
417 
418  args->entry_size = ch->entry_size;
419  args->local_nentries = ch->local_nentries;
421 }
422 
423 static void
424 xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
425 {
426  struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
427 
428  args->remote_nentries = ch->remote_nentries;
429  args->local_nentries = ch->local_nentries;
430  args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue);
432 }
433 
434 static void
435 xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch,
436  unsigned long *irq_flags)
437 {
439 }
440 
441 static void
442 xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
443 {
445 }
446 
447 static void
448 xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
449 {
451 }
452 
453 static enum xp_retval
454 xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch,
455  unsigned long msgqueue_pa)
456 {
457  ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa;
458  return xpSuccess;
459 }
460 
461 /*
462  * This next set of functions are used to keep track of when a partition is
463  * potentially engaged in accessing memory belonging to another partition.
464  */
465 
466 static void
467 xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
468 {
469  unsigned long irq_flags;
470  struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
472  sizeof(struct amo)));
473 
474  local_irq_save(irq_flags);
475 
476  /* set bit corresponding to our partid in remote partition's amo */
477  FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
479 
480  /*
481  * We must always use the nofault function regardless of whether we
482  * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
483  * didn't, we'd never know that the other partition is down and would
484  * keep sending IRQs and amos to it until the heartbeat times out.
485  */
487  variable),
489 
490  local_irq_restore(irq_flags);
491 }
492 
493 static void
494 xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
495 {
496  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
497  unsigned long irq_flags;
498  struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
500  sizeof(struct amo)));
501 
502  local_irq_save(irq_flags);
503 
504  /* clear bit corresponding to our partid in remote partition's amo */
505  FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
506  ~BIT(sn_partition_id));
507 
508  /*
509  * We must always use the nofault function regardless of whether we
510  * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
511  * didn't, we'd never know that the other partition is down and would
512  * keep sending IRQs and amos to it until the heartbeat times out.
513  */
515  variable),
517 
518  local_irq_restore(irq_flags);
519 
520  /*
521  * Send activate IRQ to get other side to see that we've cleared our
522  * bit in their engaged partitions amo.
523  */
524  xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
525  cnodeid_to_nasid(0),
526  part_sn2->activate_IRQ_nasid,
527  part_sn2->activate_IRQ_phys_cpuid);
528 }
529 
530 static void
531 xpc_assume_partition_disengaged_sn2(short partid)
532 {
533  struct amo *amo = xpc_vars_sn2->amos_page +
535 
536  /* clear bit(s) based on partid mask in our partition's amo */
537  FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
538  ~BIT(partid));
539 }
540 
541 static int
542 xpc_partition_engaged_sn2(short partid)
543 {
544  struct amo *amo = xpc_vars_sn2->amos_page +
546 
547  /* our partition's amo variable ANDed with partid mask */
548  return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
549  BIT(partid)) != 0;
550 }
551 
552 static int
553 xpc_any_partition_engaged_sn2(void)
554 {
555  struct amo *amo = xpc_vars_sn2->amos_page +
557 
558  /* our partition's amo variable */
559  return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
560 }
561 
562 /* original protection values for each node */
563 static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
564 
565 /*
566  * Change protections to allow amo operations on non-Shub 1.1 systems.
567  */
568 static enum xp_retval
569 xpc_allow_amo_ops_sn2(struct amo *amos_page)
570 {
571  enum xp_retval ret = xpSuccess;
572 
573  /*
574  * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
575  * collides with memory operations. On those systems we call
576  * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
577  */
578  if (!enable_shub_wars_1_1())
579  ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
580 
581  return ret;
582 }
583 
584 /*
585  * Change protections to allow amo operations on Shub 1.1 systems.
586  */
587 static void
588 xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
589 {
590  int node;
591  int nasid;
592 
593  if (!enable_shub_wars_1_1())
594  return;
595 
596  for_each_online_node(node) {
597  nasid = cnodeid_to_nasid(node);
598  /* save current protection values */
599  xpc_prot_vec_sn2[node] =
600  (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
602  /* open up everything */
603  HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
605  -1UL);
606  HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
608  -1UL);
609  }
610 }
611 
612 static enum xp_retval
613 xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
614  size_t *len)
615 {
616  s64 status;
617  enum xp_retval ret;
618 
619  status = sn_partition_reserved_page_pa((u64)buf, cookie,
620  (u64 *)rp_pa, (u64 *)len);
621  if (status == SALRET_OK)
622  ret = xpSuccess;
623  else if (status == SALRET_MORE_PASSES)
624  ret = xpNeedMoreInfo;
625  else
626  ret = xpSalError;
627 
628  return ret;
629 }
630 
631 
632 static int
633 xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp)
634 {
635  struct amo *amos_page;
636  int i;
637  int ret;
638 
639  xpc_vars_sn2 = XPC_RP_VARS(rp);
640 
641  rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2);
642 
643  /* vars_part array follows immediately after vars */
644  xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
646 
647  /*
648  * Before clearing xpc_vars_sn2, see if a page of amos had been
649  * previously allocated. If not we'll need to allocate one and set
650  * permissions so that cross-partition amos are allowed.
651  *
652  * The allocated amo page needs MCA reporting to remain disabled after
653  * XPC has unloaded. To make this work, we keep a copy of the pointer
654  * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure,
655  * which is pointed to by the reserved page, and re-use that saved copy
656  * on subsequent loads of XPC. This amo page is never freed, and its
657  * memory protections are never restricted.
658  */
659  amos_page = xpc_vars_sn2->amos_page;
660  if (amos_page == NULL) {
661  amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
662  if (amos_page == NULL) {
663  dev_err(xpc_part, "can't allocate page of amos\n");
664  return -ENOMEM;
665  }
666 
667  /*
668  * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
669  * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
670  */
671  ret = xpc_allow_amo_ops_sn2(amos_page);
672  if (ret != xpSuccess) {
673  dev_err(xpc_part, "can't allow amo operations\n");
675  TO_PHYS((u64)amos_page), 1);
676  return -EPERM;
677  }
678  }
679 
680  /* clear xpc_vars_sn2 */
681  memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2));
682 
683  xpc_vars_sn2->version = XPC_V_VERSION;
684  xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0);
685  xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0);
686  xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2);
687  xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page);
688  xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */
689 
690  /* clear xpc_vars_part_sn2 */
691  memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) *
693 
694  /* initialize the activate IRQ related amo variables */
695  for (i = 0; i < xpc_nasid_mask_nlongs; i++)
696  (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i);
697 
698  /* initialize the engaged remote partitions related amo variables */
699  (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2);
700  (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2);
701 
702  return 0;
703 }
704 
705 static int
706 xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask)
707 {
708  return test_bit(partid, heartbeating_to_mask);
709 }
710 
711 static void
712 xpc_allow_hb_sn2(short partid)
713 {
714  DBUG_ON(xpc_vars_sn2 == NULL);
715  set_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
716 }
717 
718 static void
719 xpc_disallow_hb_sn2(short partid)
720 {
721  DBUG_ON(xpc_vars_sn2 == NULL);
722  clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
723 }
724 
725 static void
726 xpc_disallow_all_hbs_sn2(void)
727 {
728  DBUG_ON(xpc_vars_sn2 == NULL);
729  bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions);
730 }
731 
732 static void
733 xpc_increment_heartbeat_sn2(void)
734 {
735  xpc_vars_sn2->heartbeat++;
736 }
737 
738 static void
739 xpc_offline_heartbeat_sn2(void)
740 {
741  xpc_increment_heartbeat_sn2();
742  xpc_vars_sn2->heartbeat_offline = 1;
743 }
744 
745 static void
746 xpc_online_heartbeat_sn2(void)
747 {
748  xpc_increment_heartbeat_sn2();
749  xpc_vars_sn2->heartbeat_offline = 0;
750 }
751 
752 static void
753 xpc_heartbeat_init_sn2(void)
754 {
755  DBUG_ON(xpc_vars_sn2 == NULL);
756 
757  bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
758  xpc_online_heartbeat_sn2();
759 }
760 
761 static void
762 xpc_heartbeat_exit_sn2(void)
763 {
764  xpc_offline_heartbeat_sn2();
765 }
766 
767 static enum xp_retval
768 xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
769 {
770  struct xpc_vars_sn2 *remote_vars;
771  enum xp_retval ret;
772 
773  remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
774 
775  /* pull the remote vars structure that contains the heartbeat */
776  ret = xp_remote_memcpy(xp_pa(remote_vars),
777  part->sn.sn2.remote_vars_pa,
779  if (ret != xpSuccess)
780  return ret;
781 
782  dev_dbg(xpc_part, "partid=%d, heartbeat=%lld, last_heartbeat=%lld, "
783  "heartbeat_offline=%lld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
784  remote_vars->heartbeat, part->last_heartbeat,
785  remote_vars->heartbeat_offline,
786  remote_vars->heartbeating_to_mask[0]);
787 
788  if ((remote_vars->heartbeat == part->last_heartbeat &&
789  !remote_vars->heartbeat_offline) ||
790  !xpc_hb_allowed_sn2(sn_partition_id,
791  remote_vars->heartbeating_to_mask)) {
792  ret = xpNoHeartbeat;
793  } else {
794  part->last_heartbeat = remote_vars->heartbeat;
795  }
796 
797  return ret;
798 }
799 
800 /*
801  * Get a copy of the remote partition's XPC variables from the reserved page.
802  *
803  * remote_vars points to a buffer that is cacheline aligned for BTE copies and
804  * assumed to be of size XPC_RP_VARS_SIZE.
805  */
806 static enum xp_retval
807 xpc_get_remote_vars_sn2(unsigned long remote_vars_pa,
808  struct xpc_vars_sn2 *remote_vars)
809 {
810  enum xp_retval ret;
811 
812  if (remote_vars_pa == 0)
813  return xpVarsNotSet;
814 
815  /* pull over the cross partition variables */
816  ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa,
818  if (ret != xpSuccess)
819  return ret;
820 
821  if (XPC_VERSION_MAJOR(remote_vars->version) !=
823  return xpBadVersion;
824  }
825 
826  return xpSuccess;
827 }
828 
829 static void
830 xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
831  unsigned long remote_rp_pa, int nasid)
832 {
833  xpc_send_local_activate_IRQ_sn2(nasid);
834 }
835 
836 static void
837 xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
838 {
839  xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid);
840 }
841 
842 static void
843 xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
844 {
845  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
846  unsigned long irq_flags;
847  struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
849  sizeof(struct amo)));
850 
851  local_irq_save(irq_flags);
852 
853  /* set bit corresponding to our partid in remote partition's amo */
854  FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
856 
857  /*
858  * We must always use the nofault function regardless of whether we
859  * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
860  * didn't, we'd never know that the other partition is down and would
861  * keep sending IRQs and amos to it until the heartbeat times out.
862  */
864  variable),
866 
867  local_irq_restore(irq_flags);
868 
869  /*
870  * Send activate IRQ to get other side to see that we've set our
871  * bit in their deactivate request amo.
872  */
873  xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
874  cnodeid_to_nasid(0),
875  part_sn2->activate_IRQ_nasid,
876  part_sn2->activate_IRQ_phys_cpuid);
877 }
878 
879 static void
880 xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
881 {
882  unsigned long irq_flags;
883  struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
885  sizeof(struct amo)));
886 
887  local_irq_save(irq_flags);
888 
889  /* clear bit corresponding to our partid in remote partition's amo */
890  FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
891  ~BIT(sn_partition_id));
892 
893  /*
894  * We must always use the nofault function regardless of whether we
895  * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
896  * didn't, we'd never know that the other partition is down and would
897  * keep sending IRQs and amos to it until the heartbeat times out.
898  */
900  variable),
902 
903  local_irq_restore(irq_flags);
904 }
905 
906 static int
907 xpc_partition_deactivation_requested_sn2(short partid)
908 {
909  struct amo *amo = xpc_vars_sn2->amos_page +
911 
912  /* our partition's amo variable ANDed with partid mask */
913  return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
914  BIT(partid)) != 0;
915 }
916 
917 /*
918  * Update the remote partition's info.
919  */
920 static void
921 xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
922  unsigned long *remote_rp_ts_jiffies,
923  unsigned long remote_rp_pa,
924  unsigned long remote_vars_pa,
925  struct xpc_vars_sn2 *remote_vars)
926 {
927  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
928 
929  part->remote_rp_version = remote_rp_version;
930  dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
931  part->remote_rp_version);
932 
933  part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies;
934  dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n",
935  part->remote_rp_ts_jiffies);
936 
937  part->remote_rp_pa = remote_rp_pa;
938  dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
939 
940  part_sn2->remote_vars_pa = remote_vars_pa;
941  dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
942  part_sn2->remote_vars_pa);
943 
944  part->last_heartbeat = remote_vars->heartbeat - 1;
945  dev_dbg(xpc_part, " last_heartbeat = 0x%016llx\n",
946  part->last_heartbeat);
947 
948  part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
949  dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
950  part_sn2->remote_vars_part_pa);
951 
952  part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid;
953  dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n",
954  part_sn2->activate_IRQ_nasid);
955 
956  part_sn2->activate_IRQ_phys_cpuid =
957  remote_vars->activate_IRQ_phys_cpuid;
958  dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n",
959  part_sn2->activate_IRQ_phys_cpuid);
960 
961  part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa;
962  dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
963  part_sn2->remote_amos_page_pa);
964 
965  part_sn2->remote_vars_version = remote_vars->version;
966  dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
967  part_sn2->remote_vars_version);
968 }
969 
970 /*
971  * Prior code has determined the nasid which generated a activate IRQ.
972  * Inspect that nasid to determine if its partition needs to be activated
973  * or deactivated.
974  *
975  * A partition is considered "awaiting activation" if our partition
976  * flags indicate it is not active and it has a heartbeat. A
977  * partition is considered "awaiting deactivation" if our partition
978  * flags indicate it is active but it has no heartbeat or it is not
979  * sending its heartbeat to us.
980  *
981  * To determine the heartbeat, the remote nasid must have a properly
982  * initialized reserved page.
983  */
984 static void
985 xpc_identify_activate_IRQ_req_sn2(int nasid)
986 {
987  struct xpc_rsvd_page *remote_rp;
988  struct xpc_vars_sn2 *remote_vars;
989  unsigned long remote_rp_pa;
990  unsigned long remote_vars_pa;
991  int remote_rp_version;
992  int reactivate = 0;
993  unsigned long remote_rp_ts_jiffies = 0;
994  short partid;
995  struct xpc_partition *part;
996  struct xpc_partition_sn2 *part_sn2;
997  enum xp_retval ret;
998 
999  /* pull over the reserved page structure */
1000 
1001  remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2;
1002 
1003  ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
1004  if (ret != xpSuccess) {
1005  dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
1006  "which sent interrupt, reason=%d\n", nasid, ret);
1007  return;
1008  }
1009 
1010  remote_vars_pa = remote_rp->sn.sn2.vars_pa;
1011  remote_rp_version = remote_rp->version;
1012  remote_rp_ts_jiffies = remote_rp->ts_jiffies;
1013 
1014  partid = remote_rp->SAL_partid;
1015  part = &xpc_partitions[partid];
1016  part_sn2 = &part->sn.sn2;
1017 
1018  /* pull over the cross partition variables */
1019 
1020  remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
1021 
1022  ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars);
1023  if (ret != xpSuccess) {
1024  dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
1025  "which sent interrupt, reason=%d\n", nasid, ret);
1026 
1027  XPC_DEACTIVATE_PARTITION(part, ret);
1028  return;
1029  }
1030 
1031  part->activate_IRQ_rcvd++;
1032 
1033  dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
1034  "%lld:0x%lx\n", (int)nasid, (int)partid,
1035  part->activate_IRQ_rcvd,
1036  remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
1037 
1038  if (xpc_partition_disengaged(part) &&
1039  part->act_state == XPC_P_AS_INACTIVE) {
1040 
1041  xpc_update_partition_info_sn2(part, remote_rp_version,
1042  &remote_rp_ts_jiffies,
1043  remote_rp_pa, remote_vars_pa,
1044  remote_vars);
1045 
1046  if (xpc_partition_deactivation_requested_sn2(partid)) {
1047  /*
1048  * Other side is waiting on us to deactivate even though
1049  * we already have.
1050  */
1051  return;
1052  }
1053 
1054  xpc_activate_partition(part);
1055  return;
1056  }
1057 
1058  DBUG_ON(part->remote_rp_version == 0);
1059  DBUG_ON(part_sn2->remote_vars_version == 0);
1060 
1061  if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) {
1062 
1063  /* the other side rebooted */
1064 
1065  DBUG_ON(xpc_partition_engaged_sn2(partid));
1066  DBUG_ON(xpc_partition_deactivation_requested_sn2(partid));
1067 
1068  xpc_update_partition_info_sn2(part, remote_rp_version,
1069  &remote_rp_ts_jiffies,
1070  remote_rp_pa, remote_vars_pa,
1071  remote_vars);
1072  reactivate = 1;
1073  }
1074 
1075  if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) {
1076  /* still waiting on other side to disengage from us */
1077  return;
1078  }
1079 
1080  if (reactivate)
1082  else if (xpc_partition_deactivation_requested_sn2(partid))
1084 }
1085 
1086 /*
1087  * Loop through the activation amo variables and process any bits
1088  * which are set. Each bit indicates a nasid sending a partition
1089  * activation or deactivation request.
1090  *
1091  * Return #of IRQs detected.
1092  */
1093 int
1095 {
1096  int l;
1097  int b;
1098  unsigned long nasid_mask_long;
1099  u64 nasid; /* remote nasid */
1100  int n_IRQs_detected = 0;
1101  struct amo *act_amos;
1102 
1103  act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2;
1104 
1105  /* scan through activate amo variables looking for non-zero entries */
1106  for (l = 0; l < xpc_nasid_mask_nlongs; l++) {
1107 
1108  if (xpc_exiting)
1109  break;
1110 
1111  nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]);
1112 
1113  b = find_first_bit(&nasid_mask_long, BITS_PER_LONG);
1114  if (b >= BITS_PER_LONG) {
1115  /* no IRQs from nasids in this amo variable */
1116  continue;
1117  }
1118 
1119  dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l,
1120  nasid_mask_long);
1121 
1122  /*
1123  * If this nasid has been added to the machine since
1124  * our partition was reset, this will retain the
1125  * remote nasid in our reserved pages machine mask.
1126  * This is used in the event of module reload.
1127  */
1128  xpc_mach_nasids[l] |= nasid_mask_long;
1129 
1130  /* locate the nasid(s) which sent interrupts */
1131 
1132  do {
1133  n_IRQs_detected++;
1134  nasid = (l * BITS_PER_LONG + b) * 2;
1135  dev_dbg(xpc_part, "interrupt from nasid %lld\n", nasid);
1136  xpc_identify_activate_IRQ_req_sn2(nasid);
1137 
1138  b = find_next_bit(&nasid_mask_long, BITS_PER_LONG,
1139  b + 1);
1140  } while (b < BITS_PER_LONG);
1141  }
1142  return n_IRQs_detected;
1143 }
1144 
1145 static void
1146 xpc_process_activate_IRQ_rcvd_sn2(void)
1147 {
1148  unsigned long irq_flags;
1149  int n_IRQs_expected;
1150  int n_IRQs_detected;
1151 
1152  spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1153  n_IRQs_expected = xpc_activate_IRQ_rcvd;
1155  spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1156 
1157  n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
1158  if (n_IRQs_detected < n_IRQs_expected) {
1159  /* retry once to help avoid missing amo */
1161  }
1162 }
1163 
1164 /*
1165  * Setup the channel structures that are sn2 specific.
1166  */
1167 static enum xp_retval
1168 xpc_setup_ch_structures_sn2(struct xpc_partition *part)
1169 {
1170  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1171  struct xpc_channel_sn2 *ch_sn2;
1172  enum xp_retval retval;
1173  int ret;
1174  int cpuid;
1175  int ch_number;
1176  struct timer_list *timer;
1177  short partid = XPC_PARTID(part);
1178 
1179  /* allocate all the required GET/PUT values */
1180 
1181  part_sn2->local_GPs =
1183  &part_sn2->local_GPs_base);
1184  if (part_sn2->local_GPs == NULL) {
1185  dev_err(xpc_chan, "can't get memory for local get/put "
1186  "values\n");
1187  return xpNoMemory;
1188  }
1189 
1190  part_sn2->remote_GPs =
1192  &part_sn2->remote_GPs_base);
1193  if (part_sn2->remote_GPs == NULL) {
1194  dev_err(xpc_chan, "can't get memory for remote get/put "
1195  "values\n");
1196  retval = xpNoMemory;
1197  goto out_1;
1198  }
1199 
1200  part_sn2->remote_GPs_pa = 0;
1201 
1202  /* allocate all the required open and close args */
1203 
1204  part_sn2->local_openclose_args =
1206  GFP_KERNEL, &part_sn2->
1207  local_openclose_args_base);
1208  if (part_sn2->local_openclose_args == NULL) {
1209  dev_err(xpc_chan, "can't get memory for local connect args\n");
1210  retval = xpNoMemory;
1211  goto out_2;
1212  }
1213 
1214  part_sn2->remote_openclose_args_pa = 0;
1215 
1216  part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid);
1217 
1218  part_sn2->notify_IRQ_nasid = 0;
1219  part_sn2->notify_IRQ_phys_cpuid = 0;
1220  part_sn2->remote_chctl_amo_va = NULL;
1221 
1222  sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid);
1223  ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
1224  IRQF_SHARED, part_sn2->notify_IRQ_owner,
1225  (void *)(u64)partid);
1226  if (ret != 0) {
1227  dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
1228  "errno=%d\n", -ret);
1229  retval = xpLackOfResources;
1230  goto out_3;
1231  }
1232 
1233  /* Setup a timer to check for dropped notify IRQs */
1234  timer = &part_sn2->dropped_notify_IRQ_timer;
1235  init_timer(timer);
1236  timer->function =
1237  (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2;
1238  timer->data = (unsigned long)part;
1240  add_timer(timer);
1241 
1242  for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1243  ch_sn2 = &part->channels[ch_number].sn.sn2;
1244 
1245  ch_sn2->local_GP = &part_sn2->local_GPs[ch_number];
1246  ch_sn2->local_openclose_args =
1247  &part_sn2->local_openclose_args[ch_number];
1248 
1249  mutex_init(&ch_sn2->msg_to_pull_mutex);
1250  }
1251 
1252  /*
1253  * Setup the per partition specific variables required by the
1254  * remote partition to establish channel connections with us.
1255  *
1256  * The setting of the magic # indicates that these per partition
1257  * specific variables are ready to be used.
1258  */
1259  xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs);
1260  xpc_vars_part_sn2[partid].openclose_args_pa =
1261  xp_pa(part_sn2->local_openclose_args);
1262  xpc_vars_part_sn2[partid].chctl_amo_pa =
1263  xp_pa(part_sn2->local_chctl_amo_va);
1264  cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
1265  xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
1266  xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
1267  cpu_physical_id(cpuid);
1268  xpc_vars_part_sn2[partid].nchannels = part->nchannels;
1269  xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2;
1270 
1271  return xpSuccess;
1272 
1273  /* setup of ch structures failed */
1274 out_3:
1275  kfree(part_sn2->local_openclose_args_base);
1276  part_sn2->local_openclose_args = NULL;
1277 out_2:
1278  kfree(part_sn2->remote_GPs_base);
1279  part_sn2->remote_GPs = NULL;
1280 out_1:
1281  kfree(part_sn2->local_GPs_base);
1282  part_sn2->local_GPs = NULL;
1283  return retval;
1284 }
1285 
1286 /*
1287  * Teardown the channel structures that are sn2 specific.
1288  */
1289 static void
1290 xpc_teardown_ch_structures_sn2(struct xpc_partition *part)
1291 {
1292  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1293  short partid = XPC_PARTID(part);
1294 
1295  /*
1296  * Indicate that the variables specific to the remote partition are no
1297  * longer available for its use.
1298  */
1299  xpc_vars_part_sn2[partid].magic = 0;
1300 
1301  /* in case we've still got outstanding timers registered... */
1303  free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1304 
1305  kfree(part_sn2->local_openclose_args_base);
1306  part_sn2->local_openclose_args = NULL;
1307  kfree(part_sn2->remote_GPs_base);
1308  part_sn2->remote_GPs = NULL;
1309  kfree(part_sn2->local_GPs_base);
1310  part_sn2->local_GPs = NULL;
1311  part_sn2->local_chctl_amo_va = NULL;
1312 }
1313 
1314 /*
1315  * Create a wrapper that hides the underlying mechanism for pulling a cacheline
1316  * (or multiple cachelines) from a remote partition.
1317  *
1318  * src_pa must be a cacheline aligned physical address on the remote partition.
1319  * dst must be a cacheline aligned virtual address on this partition.
1320  * cnt must be cacheline sized
1321  */
1322 /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
1323 static enum xp_retval
1324 xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
1325  const unsigned long src_pa, size_t cnt)
1326 {
1327  enum xp_retval ret;
1328 
1329  DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa));
1330  DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst));
1331  DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
1332 
1333  if (part->act_state == XPC_P_AS_DEACTIVATING)
1334  return part->reason;
1335 
1336  ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt);
1337  if (ret != xpSuccess) {
1338  dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
1339  " ret=%d\n", XPC_PARTID(part), ret);
1340  }
1341  return ret;
1342 }
1343 
1344 /*
1345  * Pull the remote per partition specific variables from the specified
1346  * partition.
1347  */
1348 static enum xp_retval
1349 xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1350 {
1351  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1352  u8 buffer[L1_CACHE_BYTES * 2];
1353  struct xpc_vars_part_sn2 *pulled_entry_cacheline =
1354  (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
1355  struct xpc_vars_part_sn2 *pulled_entry;
1356  unsigned long remote_entry_cacheline_pa;
1357  unsigned long remote_entry_pa;
1358  short partid = XPC_PARTID(part);
1359  enum xp_retval ret;
1360 
1361  /* pull the cacheline that contains the variables we're interested in */
1362 
1363  DBUG_ON(part_sn2->remote_vars_part_pa !=
1364  L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa));
1365  DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2);
1366 
1367  remote_entry_pa = part_sn2->remote_vars_part_pa +
1368  sn_partition_id * sizeof(struct xpc_vars_part_sn2);
1369 
1370  remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
1371 
1372  pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline
1373  + (remote_entry_pa &
1374  (L1_CACHE_BYTES - 1)));
1375 
1376  ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline,
1377  remote_entry_cacheline_pa,
1378  L1_CACHE_BYTES);
1379  if (ret != xpSuccess) {
1380  dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
1381  "partition %d, ret=%d\n", partid, ret);
1382  return ret;
1383  }
1384 
1385  /* see if they've been set up yet */
1386 
1387  if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 &&
1388  pulled_entry->magic != XPC_VP_MAGIC2_SN2) {
1389 
1390  if (pulled_entry->magic != 0) {
1391  dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
1392  "partition %d has bad magic value (=0x%llx)\n",
1393  partid, sn_partition_id, pulled_entry->magic);
1394  return xpBadMagic;
1395  }
1396 
1397  /* they've not been initialized yet */
1398  return xpRetry;
1399  }
1400 
1401  if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) {
1402 
1403  /* validate the variables */
1404 
1405  if (pulled_entry->GPs_pa == 0 ||
1406  pulled_entry->openclose_args_pa == 0 ||
1407  pulled_entry->chctl_amo_pa == 0) {
1408 
1409  dev_err(xpc_chan, "partition %d's XPC vars_part for "
1410  "partition %d are not valid\n", partid,
1411  sn_partition_id);
1412  return xpInvalidAddress;
1413  }
1414 
1415  /* the variables we imported look to be valid */
1416 
1417  part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
1418  part_sn2->remote_openclose_args_pa =
1419  pulled_entry->openclose_args_pa;
1420  part_sn2->remote_chctl_amo_va =
1421  (struct amo *)__va(pulled_entry->chctl_amo_pa);
1422  part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid;
1423  part_sn2->notify_IRQ_phys_cpuid =
1424  pulled_entry->notify_IRQ_phys_cpuid;
1425 
1426  if (part->nchannels > pulled_entry->nchannels)
1427  part->nchannels = pulled_entry->nchannels;
1428 
1429  /* let the other side know that we've pulled their variables */
1430 
1431  xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2;
1432  }
1433 
1434  if (pulled_entry->magic == XPC_VP_MAGIC1_SN2)
1435  return xpRetry;
1436 
1437  return xpSuccess;
1438 }
1439 
1440 /*
1441  * Establish first contact with the remote partititon. This involves pulling
1442  * the XPC per partition variables from the remote partition and waiting for
1443  * the remote partition to pull ours.
1444  */
1445 static enum xp_retval
1446 xpc_make_first_contact_sn2(struct xpc_partition *part)
1447 {
1448  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1449  enum xp_retval ret;
1450 
1451  /*
1452  * Register the remote partition's amos with SAL so it can handle
1453  * and cleanup errors within that address range should the remote
1454  * partition go down. We don't unregister this range because it is
1455  * difficult to tell when outstanding writes to the remote partition
1456  * are finished and thus when it is safe to unregister. This should
1457  * not result in wasted space in the SAL xp_addr_region table because
1458  * we should get the same page for remote_amos_page_pa after module
1459  * reloads and system reboots.
1460  */
1461  if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa,
1462  PAGE_SIZE, 1) < 0) {
1463  dev_warn(xpc_part, "xpc_activating(%d) failed to register "
1464  "xp_addr region\n", XPC_PARTID(part));
1465 
1466  ret = xpPhysAddrRegFailed;
1467  XPC_DEACTIVATE_PARTITION(part, ret);
1468  return ret;
1469  }
1470 
1471  /*
1472  * Send activate IRQ to get other side to activate if they've not
1473  * already begun to do so.
1474  */
1475  xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
1476  cnodeid_to_nasid(0),
1477  part_sn2->activate_IRQ_nasid,
1478  part_sn2->activate_IRQ_phys_cpuid);
1479 
1480  while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) {
1481  if (ret != xpRetry) {
1482  XPC_DEACTIVATE_PARTITION(part, ret);
1483  return ret;
1484  }
1485 
1486  dev_dbg(xpc_part, "waiting to make first contact with "
1487  "partition %d\n", XPC_PARTID(part));
1488 
1489  /* wait a 1/4 of a second or so */
1490  (void)msleep_interruptible(250);
1491 
1492  if (part->act_state == XPC_P_AS_DEACTIVATING)
1493  return part->reason;
1494  }
1495 
1496  return xpSuccess;
1497 }
1498 
1499 /*
1500  * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
1501  */
1502 static u64
1503 xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
1504 {
1505  struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1506  unsigned long irq_flags;
1507  union xpc_channel_ctl_flags chctl;
1508  enum xp_retval ret;
1509 
1510  /*
1511  * See if there are any chctl flags to be handled.
1512  */
1513 
1514  spin_lock_irqsave(&part->chctl_lock, irq_flags);
1515  chctl = part->chctl;
1516  if (chctl.all_flags != 0)
1517  part->chctl.all_flags = 0;
1518 
1519  spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1520 
1521  if (xpc_any_openclose_chctl_flags_set(&chctl)) {
1522  ret = xpc_pull_remote_cachelines_sn2(part, part->
1523  remote_openclose_args,
1524  part_sn2->
1525  remote_openclose_args_pa,
1527  if (ret != xpSuccess) {
1528  XPC_DEACTIVATE_PARTITION(part, ret);
1529 
1530  dev_dbg(xpc_chan, "failed to pull openclose args from "
1531  "partition %d, ret=%d\n", XPC_PARTID(part),
1532  ret);
1533 
1534  /* don't bother processing chctl flags anymore */
1535  chctl.all_flags = 0;
1536  }
1537  }
1538 
1539  if (xpc_any_msg_chctl_flags_set(&chctl)) {
1540  ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
1541  part_sn2->remote_GPs_pa,
1542  XPC_GP_SIZE);
1543  if (ret != xpSuccess) {
1544  XPC_DEACTIVATE_PARTITION(part, ret);
1545 
1546  dev_dbg(xpc_chan, "failed to pull GPs from partition "
1547  "%d, ret=%d\n", XPC_PARTID(part), ret);
1548 
1549  /* don't bother processing chctl flags anymore */
1550  chctl.all_flags = 0;
1551  }
1552  }
1553 
1554  return chctl.all_flags;
1555 }
1556 
1557 /*
1558  * Allocate the local message queue and the notify queue.
1559  */
1560 static enum xp_retval
1561 xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
1562 {
1563  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1564  unsigned long irq_flags;
1565  int nentries;
1566  size_t nbytes;
1567 
1568  for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1569 
1570  nbytes = nentries * ch->entry_size;
1571  ch_sn2->local_msgqueue =
1573  &ch_sn2->local_msgqueue_base);
1574  if (ch_sn2->local_msgqueue == NULL)
1575  continue;
1576 
1577  nbytes = nentries * sizeof(struct xpc_notify_sn2);
1578  ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL);
1579  if (ch_sn2->notify_queue == NULL) {
1580  kfree(ch_sn2->local_msgqueue_base);
1581  ch_sn2->local_msgqueue = NULL;
1582  continue;
1583  }
1584 
1585  spin_lock_irqsave(&ch->lock, irq_flags);
1586  if (nentries < ch->local_nentries) {
1587  dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
1588  "partid=%d, channel=%d\n", nentries,
1589  ch->local_nentries, ch->partid, ch->number);
1590 
1591  ch->local_nentries = nentries;
1592  }
1593  spin_unlock_irqrestore(&ch->lock, irq_flags);
1594  return xpSuccess;
1595  }
1596 
1597  dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
1598  "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
1599  return xpNoMemory;
1600 }
1601 
1602 /*
1603  * Allocate the cached remote message queue.
1604  */
1605 static enum xp_retval
1606 xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
1607 {
1608  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1609  unsigned long irq_flags;
1610  int nentries;
1611  size_t nbytes;
1612 
1613  DBUG_ON(ch->remote_nentries <= 0);
1614 
1615  for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1616 
1617  nbytes = nentries * ch->entry_size;
1618  ch_sn2->remote_msgqueue =
1619  xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->
1621  if (ch_sn2->remote_msgqueue == NULL)
1622  continue;
1623 
1624  spin_lock_irqsave(&ch->lock, irq_flags);
1625  if (nentries < ch->remote_nentries) {
1626  dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
1627  "partid=%d, channel=%d\n", nentries,
1628  ch->remote_nentries, ch->partid, ch->number);
1629 
1630  ch->remote_nentries = nentries;
1631  }
1632  spin_unlock_irqrestore(&ch->lock, irq_flags);
1633  return xpSuccess;
1634  }
1635 
1636  dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
1637  "partid=%d, channel=%d\n", ch->partid, ch->number);
1638  return xpNoMemory;
1639 }
1640 
1641 /*
1642  * Allocate message queues and other stuff associated with a channel.
1643  *
1644  * Note: Assumes all of the channel sizes are filled in.
1645  */
1646 static enum xp_retval
1647 xpc_setup_msg_structures_sn2(struct xpc_channel *ch)
1648 {
1649  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1650  enum xp_retval ret;
1651 
1652  DBUG_ON(ch->flags & XPC_C_SETUP);
1653 
1654  ret = xpc_allocate_local_msgqueue_sn2(ch);
1655  if (ret == xpSuccess) {
1656 
1657  ret = xpc_allocate_remote_msgqueue_sn2(ch);
1658  if (ret != xpSuccess) {
1659  kfree(ch_sn2->local_msgqueue_base);
1660  ch_sn2->local_msgqueue = NULL;
1661  kfree(ch_sn2->notify_queue);
1662  ch_sn2->notify_queue = NULL;
1663  }
1664  }
1665  return ret;
1666 }
1667 
1668 /*
1669  * Free up message queues and other stuff that were allocated for the specified
1670  * channel.
1671  */
1672 static void
1673 xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
1674 {
1675  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1676 
1677  DBUG_ON(!spin_is_locked(&ch->lock));
1678 
1679  ch_sn2->remote_msgqueue_pa = 0;
1680 
1681  ch_sn2->local_GP->get = 0;
1682  ch_sn2->local_GP->put = 0;
1683  ch_sn2->remote_GP.get = 0;
1684  ch_sn2->remote_GP.put = 0;
1685  ch_sn2->w_local_GP.get = 0;
1686  ch_sn2->w_local_GP.put = 0;
1687  ch_sn2->w_remote_GP.get = 0;
1688  ch_sn2->w_remote_GP.put = 0;
1689  ch_sn2->next_msg_to_pull = 0;
1690 
1691  if (ch->flags & XPC_C_SETUP) {
1692  dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
1693  ch->flags, ch->partid, ch->number);
1694 
1695  kfree(ch_sn2->local_msgqueue_base);
1696  ch_sn2->local_msgqueue = NULL;
1697  kfree(ch_sn2->remote_msgqueue_base);
1698  ch_sn2->remote_msgqueue = NULL;
1699  kfree(ch_sn2->notify_queue);
1700  ch_sn2->notify_queue = NULL;
1701  }
1702 }
1703 
1704 /*
1705  * Notify those who wanted to be notified upon delivery of their message.
1706  */
1707 static void
1708 xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
1709 {
1710  struct xpc_notify_sn2 *notify;
1711  u8 notify_type;
1712  s64 get = ch->sn.sn2.w_remote_GP.get - 1;
1713 
1714  while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1715 
1716  notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries];
1717 
1718  /*
1719  * See if the notify entry indicates it was associated with
1720  * a message who's sender wants to be notified. It is possible
1721  * that it is, but someone else is doing or has done the
1722  * notification.
1723  */
1724  notify_type = notify->type;
1725  if (notify_type == 0 ||
1726  cmpxchg(&notify->type, notify_type, 0) != notify_type) {
1727  continue;
1728  }
1729 
1730  DBUG_ON(notify_type != XPC_N_CALL);
1731 
1732  atomic_dec(&ch->n_to_notify);
1733 
1734  if (notify->func != NULL) {
1735  dev_dbg(xpc_chan, "notify->func() called, notify=0x%p "
1736  "msg_number=%lld partid=%d channel=%d\n",
1737  (void *)notify, get, ch->partid, ch->number);
1738 
1739  notify->func(reason, ch->partid, ch->number,
1740  notify->key);
1741 
1742  dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p"
1743  " msg_number=%lld partid=%d channel=%d\n",
1744  (void *)notify, get, ch->partid, ch->number);
1745  }
1746  }
1747 }
1748 
1749 static void
1750 xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch)
1751 {
1752  xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put);
1753 }
1754 
1755 /*
1756  * Clear some of the msg flags in the local message queue.
1757  */
1758 static inline void
1759 xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
1760 {
1761  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1762  struct xpc_msg_sn2 *msg;
1763  s64 get;
1764 
1765  get = ch_sn2->w_remote_GP.get;
1766  do {
1767  msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
1768  (get % ch->local_nentries) *
1769  ch->entry_size);
1770  DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
1771  msg->flags = 0;
1772  } while (++get < ch_sn2->remote_GP.get);
1773 }
1774 
1775 /*
1776  * Clear some of the msg flags in the remote message queue.
1777  */
1778 static inline void
1779 xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1780 {
1781  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1782  struct xpc_msg_sn2 *msg;
1783  s64 put, remote_nentries = ch->remote_nentries;
1784 
1785  /* flags are zeroed when the buffer is allocated */
1786  if (ch_sn2->remote_GP.put < remote_nentries)
1787  return;
1788 
1789  put = max(ch_sn2->w_remote_GP.put, remote_nentries);
1790  do {
1791  msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
1792  (put % remote_nentries) *
1793  ch->entry_size);
1794  DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
1795  DBUG_ON(!(msg->flags & XPC_M_SN2_DONE));
1796  DBUG_ON(msg->number != put - remote_nentries);
1797  msg->flags = 0;
1798  } while (++put < ch_sn2->remote_GP.put);
1799 }
1800 
1801 static int
1802 xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch)
1803 {
1804  return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get;
1805 }
1806 
1807 static void
1808 xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
1809 {
1810  struct xpc_channel *ch = &part->channels[ch_number];
1811  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1812  int npayloads_sent;
1813 
1814  ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number];
1815 
1816  /* See what, if anything, has changed for each connected channel */
1817 
1818  xpc_msgqueue_ref(ch);
1819 
1820  if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get &&
1821  ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) {
1822  /* nothing changed since GPs were last pulled */
1823  xpc_msgqueue_deref(ch);
1824  return;
1825  }
1826 
1827  if (!(ch->flags & XPC_C_CONNECTED)) {
1828  xpc_msgqueue_deref(ch);
1829  return;
1830  }
1831 
1832  /*
1833  * First check to see if messages recently sent by us have been
1834  * received by the other side. (The remote GET value will have
1835  * changed since we last looked at it.)
1836  */
1837 
1838  if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) {
1839 
1840  /*
1841  * We need to notify any senders that want to be notified
1842  * that their sent messages have been received by their
1843  * intended recipients. We need to do this before updating
1844  * w_remote_GP.get so that we don't allocate the same message
1845  * queue entries prematurely (see xpc_allocate_msg()).
1846  */
1847  if (atomic_read(&ch->n_to_notify) > 0) {
1848  /*
1849  * Notify senders that messages sent have been
1850  * received and delivered by the other side.
1851  */
1852  xpc_notify_senders_sn2(ch, xpMsgDelivered,
1853  ch_sn2->remote_GP.get);
1854  }
1855 
1856  /*
1857  * Clear msg->flags in previously sent messages, so that
1858  * they're ready for xpc_allocate_msg().
1859  */
1860  xpc_clear_local_msgqueue_flags_sn2(ch);
1861 
1862  ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
1863 
1864  dev_dbg(xpc_chan, "w_remote_GP.get changed to %lld, partid=%d, "
1865  "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
1866  ch->number);
1867 
1868  /*
1869  * If anyone was waiting for message queue entries to become
1870  * available, wake them up.
1871  */
1872  if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1873  wake_up(&ch->msg_allocate_wq);
1874  }
1875 
1876  /*
1877  * Now check for newly sent messages by the other side. (The remote
1878  * PUT value will have changed since we last looked at it.)
1879  */
1880 
1881  if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) {
1882  /*
1883  * Clear msg->flags in previously received messages, so that
1884  * they're ready for xpc_get_deliverable_payload_sn2().
1885  */
1886  xpc_clear_remote_msgqueue_flags_sn2(ch);
1887 
1888  smp_wmb(); /* ensure flags have been cleared before bte_copy */
1889  ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
1890 
1891  dev_dbg(xpc_chan, "w_remote_GP.put changed to %lld, partid=%d, "
1892  "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
1893  ch->number);
1894 
1895  npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch);
1896  if (npayloads_sent > 0) {
1897  dev_dbg(xpc_chan, "msgs waiting to be copied and "
1898  "delivered=%d, partid=%d, channel=%d\n",
1899  npayloads_sent, ch->partid, ch->number);
1900 
1902  xpc_activate_kthreads(ch, npayloads_sent);
1903  }
1904  }
1905 
1906  xpc_msgqueue_deref(ch);
1907 }
1908 
1909 static struct xpc_msg_sn2 *
1910 xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1911 {
1912  struct xpc_partition *part = &xpc_partitions[ch->partid];
1913  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1914  unsigned long remote_msg_pa;
1915  struct xpc_msg_sn2 *msg;
1916  u32 msg_index;
1917  u32 nmsgs;
1918  u64 msg_offset;
1919  enum xp_retval ret;
1920 
1921  if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) {
1922  /* we were interrupted by a signal */
1923  return NULL;
1924  }
1925 
1926  while (get >= ch_sn2->next_msg_to_pull) {
1927 
1928  /* pull as many messages as are ready and able to be pulled */
1929 
1930  msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries;
1931 
1932  DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put);
1933  nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull;
1934  if (msg_index + nmsgs > ch->remote_nentries) {
1935  /* ignore the ones that wrap the msg queue for now */
1936  nmsgs = ch->remote_nentries - msg_index;
1937  }
1938 
1939  msg_offset = msg_index * ch->entry_size;
1940  msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
1941  msg_offset);
1942  remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset;
1943 
1944  ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa,
1945  nmsgs * ch->entry_size);
1946  if (ret != xpSuccess) {
1947 
1948  dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
1949  " msg %lld from partition %d, channel=%d, "
1950  "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
1951  ch->partid, ch->number, ret);
1952 
1953  XPC_DEACTIVATE_PARTITION(part, ret);
1954 
1955  mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1956  return NULL;
1957  }
1958 
1959  ch_sn2->next_msg_to_pull += nmsgs;
1960  }
1961 
1962  mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1963 
1964  /* return the message we were looking for */
1965  msg_offset = (get % ch->remote_nentries) * ch->entry_size;
1966  msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset);
1967 
1968  return msg;
1969 }
1970 
1971 /*
1972  * Get the next deliverable message's payload.
1973  */
1974 static void *
1975 xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
1976 {
1977  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1978  struct xpc_msg_sn2 *msg;
1979  void *payload = NULL;
1980  s64 get;
1981 
1982  do {
1983  if (ch->flags & XPC_C_DISCONNECTING)
1984  break;
1985 
1986  get = ch_sn2->w_local_GP.get;
1987  smp_rmb(); /* guarantee that .get loads before .put */
1988  if (get == ch_sn2->w_remote_GP.put)
1989  break;
1990 
1991  /* There are messages waiting to be pulled and delivered.
1992  * We need to try to secure one for ourselves. We'll do this
1993  * by trying to increment w_local_GP.get and hope that no one
1994  * else beats us to it. If they do, we'll we'll simply have
1995  * to try again for the next one.
1996  */
1997 
1998  if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
1999  /* we got the entry referenced by get */
2000 
2001  dev_dbg(xpc_chan, "w_local_GP.get changed to %lld, "
2002  "partid=%d, channel=%d\n", get + 1,
2003  ch->partid, ch->number);
2004 
2005  /* pull the message from the remote partition */
2006 
2007  msg = xpc_pull_remote_msg_sn2(ch, get);
2008 
2009  if (msg != NULL) {
2010  DBUG_ON(msg->number != get);
2011  DBUG_ON(msg->flags & XPC_M_SN2_DONE);
2012  DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
2013 
2014  payload = &msg->payload;
2015  }
2016  break;
2017  }
2018 
2019  } while (1);
2020 
2021  return payload;
2022 }
2023 
2024 /*
2025  * Now we actually send the messages that are ready to be sent by advancing
2026  * the local message queue's Put value and then send a chctl msgrequest to the
2027  * recipient partition.
2028  */
2029 static void
2030 xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
2031 {
2032  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2033  struct xpc_msg_sn2 *msg;
2034  s64 put = initial_put + 1;
2035  int send_msgrequest = 0;
2036 
2037  while (1) {
2038 
2039  while (1) {
2040  if (put == ch_sn2->w_local_GP.put)
2041  break;
2042 
2043  msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
2044  local_msgqueue + (put %
2045  ch->local_nentries) *
2046  ch->entry_size);
2047 
2048  if (!(msg->flags & XPC_M_SN2_READY))
2049  break;
2050 
2051  put++;
2052  }
2053 
2054  if (put == initial_put) {
2055  /* nothing's changed */
2056  break;
2057  }
2058 
2059  if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) !=
2060  initial_put) {
2061  /* someone else beat us to it */
2062  DBUG_ON(ch_sn2->local_GP->put < initial_put);
2063  break;
2064  }
2065 
2066  /* we just set the new value of local_GP->put */
2067 
2068  dev_dbg(xpc_chan, "local_GP->put changed to %lld, partid=%d, "
2069  "channel=%d\n", put, ch->partid, ch->number);
2070 
2071  send_msgrequest = 1;
2072 
2073  /*
2074  * We need to ensure that the message referenced by
2075  * local_GP->put is not XPC_M_SN2_READY or that local_GP->put
2076  * equals w_local_GP.put, so we'll go have a look.
2077  */
2078  initial_put = put;
2079  }
2080 
2081  if (send_msgrequest)
2082  xpc_send_chctl_msgrequest_sn2(ch);
2083 }
2084 
2085 /*
2086  * Allocate an entry for a message from the message queue associated with the
2087  * specified channel.
2088  */
2089 static enum xp_retval
2090 xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
2091  struct xpc_msg_sn2 **address_of_msg)
2092 {
2093  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2094  struct xpc_msg_sn2 *msg;
2095  enum xp_retval ret;
2096  s64 put;
2097 
2098  /*
2099  * Get the next available message entry from the local message queue.
2100  * If none are available, we'll make sure that we grab the latest
2101  * GP values.
2102  */
2103  ret = xpTimeout;
2104 
2105  while (1) {
2106 
2107  put = ch_sn2->w_local_GP.put;
2108  smp_rmb(); /* guarantee that .put loads before .get */
2109  if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
2110 
2111  /* There are available message entries. We need to try
2112  * to secure one for ourselves. We'll do this by trying
2113  * to increment w_local_GP.put as long as someone else
2114  * doesn't beat us to it. If they do, we'll have to
2115  * try again.
2116  */
2117  if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) ==
2118  put) {
2119  /* we got the entry referenced by put */
2120  break;
2121  }
2122  continue; /* try again */
2123  }
2124 
2125  /*
2126  * There aren't any available msg entries at this time.
2127  *
2128  * In waiting for a message entry to become available,
2129  * we set a timeout in case the other side is not sending
2130  * completion interrupts. This lets us fake a notify IRQ
2131  * that will cause the notify IRQ handler to fetch the latest
2132  * GP values as if an interrupt was sent by the other side.
2133  */
2134  if (ret == xpTimeout)
2135  xpc_send_chctl_local_msgrequest_sn2(ch);
2136 
2137  if (flags & XPC_NOWAIT)
2138  return xpNoWait;
2139 
2140  ret = xpc_allocate_msg_wait(ch);
2141  if (ret != xpInterrupted && ret != xpTimeout)
2142  return ret;
2143  }
2144 
2145  /* get the message's address and initialize it */
2146  msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
2147  (put % ch->local_nentries) *
2148  ch->entry_size);
2149 
2150  DBUG_ON(msg->flags != 0);
2151  msg->number = put;
2152 
2153  dev_dbg(xpc_chan, "w_local_GP.put changed to %lld; msg=0x%p, "
2154  "msg_number=%lld, partid=%d, channel=%d\n", put + 1,
2155  (void *)msg, msg->number, ch->partid, ch->number);
2156 
2157  *address_of_msg = msg;
2158  return xpSuccess;
2159 }
2160 
2161 /*
2162  * Common code that does the actual sending of the message by advancing the
2163  * local message queue's Put value and sends a chctl msgrequest to the
2164  * partition the message is being sent to.
2165  */
2166 static enum xp_retval
2167 xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload,
2168  u16 payload_size, u8 notify_type, xpc_notify_func func,
2169  void *key)
2170 {
2171  enum xp_retval ret = xpSuccess;
2172  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2173  struct xpc_msg_sn2 *msg = msg;
2174  struct xpc_notify_sn2 *notify = notify;
2175  s64 msg_number;
2176  s64 put;
2177 
2178  DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
2179 
2180  if (XPC_MSG_SIZE(payload_size) > ch->entry_size)
2181  return xpPayloadTooBig;
2182 
2183  xpc_msgqueue_ref(ch);
2184 
2185  if (ch->flags & XPC_C_DISCONNECTING) {
2186  ret = ch->reason;
2187  goto out_1;
2188  }
2189  if (!(ch->flags & XPC_C_CONNECTED)) {
2190  ret = xpNotConnected;
2191  goto out_1;
2192  }
2193 
2194  ret = xpc_allocate_msg_sn2(ch, flags, &msg);
2195  if (ret != xpSuccess)
2196  goto out_1;
2197 
2198  msg_number = msg->number;
2199 
2200  if (notify_type != 0) {
2201  /*
2202  * Tell the remote side to send an ACK interrupt when the
2203  * message has been delivered.
2204  */
2205  msg->flags |= XPC_M_SN2_INTERRUPT;
2206 
2207  atomic_inc(&ch->n_to_notify);
2208 
2209  notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries];
2210  notify->func = func;
2211  notify->key = key;
2212  notify->type = notify_type;
2213 
2214  /* ??? Is a mb() needed here? */
2215 
2216  if (ch->flags & XPC_C_DISCONNECTING) {
2217  /*
2218  * An error occurred between our last error check and
2219  * this one. We will try to clear the type field from
2220  * the notify entry. If we succeed then
2221  * xpc_disconnect_channel() didn't already process
2222  * the notify entry.
2223  */
2224  if (cmpxchg(&notify->type, notify_type, 0) ==
2225  notify_type) {
2226  atomic_dec(&ch->n_to_notify);
2227  ret = ch->reason;
2228  }
2229  goto out_1;
2230  }
2231  }
2232 
2233  memcpy(&msg->payload, payload, payload_size);
2234 
2235  msg->flags |= XPC_M_SN2_READY;
2236 
2237  /*
2238  * The preceding store of msg->flags must occur before the following
2239  * load of local_GP->put.
2240  */
2241  smp_mb();
2242 
2243  /* see if the message is next in line to be sent, if so send it */
2244 
2245  put = ch_sn2->local_GP->put;
2246  if (put == msg_number)
2247  xpc_send_msgs_sn2(ch, put);
2248 
2249 out_1:
2250  xpc_msgqueue_deref(ch);
2251  return ret;
2252 }
2253 
2254 /*
2255  * Now we actually acknowledge the messages that have been delivered and ack'd
2256  * by advancing the cached remote message queue's Get value and if requested
2257  * send a chctl msgrequest to the message sender's partition.
2258  *
2259  * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition
2260  * that sent the message.
2261  */
2262 static void
2263 xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2264 {
2265  struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2266  struct xpc_msg_sn2 *msg;
2267  s64 get = initial_get + 1;
2268  int send_msgrequest = 0;
2269 
2270  while (1) {
2271 
2272  while (1) {
2273  if (get == ch_sn2->w_local_GP.get)
2274  break;
2275 
2276  msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
2277  remote_msgqueue + (get %
2278  ch->remote_nentries) *
2279  ch->entry_size);
2280 
2281  if (!(msg->flags & XPC_M_SN2_DONE))
2282  break;
2283 
2284  msg_flags |= msg->flags;
2285  get++;
2286  }
2287 
2288  if (get == initial_get) {
2289  /* nothing's changed */
2290  break;
2291  }
2292 
2293  if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) !=
2294  initial_get) {
2295  /* someone else beat us to it */
2296  DBUG_ON(ch_sn2->local_GP->get <= initial_get);
2297  break;
2298  }
2299 
2300  /* we just set the new value of local_GP->get */
2301 
2302  dev_dbg(xpc_chan, "local_GP->get changed to %lld, partid=%d, "
2303  "channel=%d\n", get, ch->partid, ch->number);
2304 
2305  send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT);
2306 
2307  /*
2308  * We need to ensure that the message referenced by
2309  * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get
2310  * equals w_local_GP.get, so we'll go have a look.
2311  */
2312  initial_get = get;
2313  }
2314 
2315  if (send_msgrequest)
2316  xpc_send_chctl_msgrequest_sn2(ch);
2317 }
2318 
2319 static void
2320 xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
2321 {
2322  struct xpc_msg_sn2 *msg;
2323  s64 msg_number;
2324  s64 get;
2325 
2326  msg = container_of(payload, struct xpc_msg_sn2, payload);
2327  msg_number = msg->number;
2328 
2329  dev_dbg(xpc_chan, "msg=0x%p, msg_number=%lld, partid=%d, channel=%d\n",
2330  (void *)msg, msg_number, ch->partid, ch->number);
2331 
2332  DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) !=
2333  msg_number % ch->remote_nentries);
2334  DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
2335  DBUG_ON(msg->flags & XPC_M_SN2_DONE);
2336 
2337  msg->flags |= XPC_M_SN2_DONE;
2338 
2339  /*
2340  * The preceding store of msg->flags must occur before the following
2341  * load of local_GP->get.
2342  */
2343  smp_mb();
2344 
2345  /*
2346  * See if this message is next in line to be acknowledged as having
2347  * been delivered.
2348  */
2349  get = ch->sn.sn2.local_GP->get;
2350  if (get == msg_number)
2351  xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
2352 }
2353 
2354 static struct xpc_arch_operations xpc_arch_ops_sn2 = {
2355  .setup_partitions = xpc_setup_partitions_sn2,
2356  .teardown_partitions = xpc_teardown_partitions_sn2,
2357  .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
2358  .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2,
2359  .setup_rsvd_page = xpc_setup_rsvd_page_sn2,
2360 
2361  .allow_hb = xpc_allow_hb_sn2,
2362  .disallow_hb = xpc_disallow_hb_sn2,
2363  .disallow_all_hbs = xpc_disallow_all_hbs_sn2,
2364  .increment_heartbeat = xpc_increment_heartbeat_sn2,
2365  .offline_heartbeat = xpc_offline_heartbeat_sn2,
2366  .online_heartbeat = xpc_online_heartbeat_sn2,
2367  .heartbeat_init = xpc_heartbeat_init_sn2,
2368  .heartbeat_exit = xpc_heartbeat_exit_sn2,
2369  .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2,
2370 
2371  .request_partition_activation =
2372  xpc_request_partition_activation_sn2,
2373  .request_partition_reactivation =
2374  xpc_request_partition_reactivation_sn2,
2375  .request_partition_deactivation =
2376  xpc_request_partition_deactivation_sn2,
2377  .cancel_partition_deactivation_request =
2378  xpc_cancel_partition_deactivation_request_sn2,
2379 
2380  .setup_ch_structures = xpc_setup_ch_structures_sn2,
2381  .teardown_ch_structures = xpc_teardown_ch_structures_sn2,
2382 
2383  .make_first_contact = xpc_make_first_contact_sn2,
2384 
2385  .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2,
2386  .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2,
2387  .send_chctl_closereply = xpc_send_chctl_closereply_sn2,
2388  .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2,
2389  .send_chctl_openreply = xpc_send_chctl_openreply_sn2,
2390  .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2,
2391  .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2,
2392 
2393  .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2,
2394 
2395  .setup_msg_structures = xpc_setup_msg_structures_sn2,
2396  .teardown_msg_structures = xpc_teardown_msg_structures_sn2,
2397 
2398  .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2,
2399  .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2,
2400  .partition_engaged = xpc_partition_engaged_sn2,
2401  .any_partition_engaged = xpc_any_partition_engaged_sn2,
2402  .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2,
2403 
2404  .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2,
2405  .send_payload = xpc_send_payload_sn2,
2406  .get_deliverable_payload = xpc_get_deliverable_payload_sn2,
2407  .received_payload = xpc_received_payload_sn2,
2408  .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2,
2409 };
2410 
2411 int
2413 {
2414  int ret;
2415  size_t buf_size;
2416 
2417  xpc_arch_ops = xpc_arch_ops_sn2;
2418 
2419  if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
2420  dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
2421  "larger than %d\n", XPC_MSG_HDR_MAX_SIZE);
2422  return -E2BIG;
2423  }
2424 
2425  buf_size = max(XPC_RP_VARS_SIZE,
2427  xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size,
2428  GFP_KERNEL,
2429  &xpc_remote_copy_buffer_base_sn2);
2430  if (xpc_remote_copy_buffer_sn2 == NULL) {
2431  dev_err(xpc_part, "can't get memory for remote copy buffer\n");
2432  return -ENOMEM;
2433  }
2434 
2435  /* open up protections for IPI and [potentially] amo operations */
2436  xpc_allow_IPI_ops_sn2();
2437  xpc_allow_amo_ops_shub_wars_1_1_sn2();
2438 
2439  /*
2440  * This is safe to do before the xpc_hb_checker thread has started
2441  * because the handler releases a wait queue. If an interrupt is
2442  * received before the thread is waiting, it will not go to sleep,
2443  * but rather immediately process the interrupt.
2444  */
2445  ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
2446  "xpc hb", NULL);
2447  if (ret != 0) {
2448  dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
2449  "errno=%d\n", -ret);
2450  xpc_disallow_IPI_ops_sn2();
2451  kfree(xpc_remote_copy_buffer_base_sn2);
2452  }
2453  return ret;
2454 }
2455 
2456 void
2458 {
2460  xpc_disallow_IPI_ops_sn2();
2461  kfree(xpc_remote_copy_buffer_base_sn2);
2462 }