Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vxge-traffic.c
Go to the documentation of this file.
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice. This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  * Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/prefetch.h>
16 
17 #include "vxge-traffic.h"
18 #include "vxge-config.h"
19 #include "vxge-main.h"
20 
21 /*
22  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23  * @vp: Virtual Path handle.
24  *
25  * Enable vpath interrupts. The function is to be executed the last in
26  * vpath initialization sequence.
27  *
28  * See also: vxge_hw_vpath_intr_disable()
29  */
31 {
32  u64 val64;
33 
34  struct __vxge_hw_virtualpath *vpath;
35  struct vxge_hw_vpath_reg __iomem *vp_reg;
37  if (vp == NULL) {
39  goto exit;
40  }
41 
42  vpath = vp->vpath;
43 
44  if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
46  goto exit;
47  }
48 
49  vp_reg = vpath->vp_reg;
50 
52 
53  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54  &vp_reg->general_errors_reg);
55 
56  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57  &vp_reg->pci_config_errors_reg);
58 
59  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60  &vp_reg->mrpcim_to_vpath_alarm_reg);
61 
62  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63  &vp_reg->srpcim_to_vpath_alarm_reg);
64 
65  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66  &vp_reg->vpath_ppif_int_status);
67 
68  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69  &vp_reg->srpcim_msg_to_vpath_reg);
70 
71  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72  &vp_reg->vpath_pcipif_int_status);
73 
74  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75  &vp_reg->prc_alarm_reg);
76 
77  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78  &vp_reg->wrdma_alarm_status);
79 
80  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81  &vp_reg->asic_ntwk_vp_err_reg);
82 
83  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84  &vp_reg->xgmac_vp_int_status);
85 
86  val64 = readq(&vp_reg->vpath_general_int_status);
87 
88  /* Mask unwanted interrupts */
89 
90  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91  &vp_reg->vpath_pcipif_int_mask);
92 
93  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94  &vp_reg->srpcim_msg_to_vpath_mask);
95 
96  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
98 
99  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100  &vp_reg->mrpcim_to_vpath_alarm_mask);
101 
102  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103  &vp_reg->pci_config_errors_mask);
104 
105  /* Unmask the individual interrupts */
106 
111  &vp_reg->general_errors_mask);
112 
113  __vxge_hw_pio_mem_write32_upper(
120  &vp_reg->kdfcctl_errors_mask);
121 
122  __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123 
124  __vxge_hw_pio_mem_write32_upper(
126  &vp_reg->prc_alarm_mask);
127 
128  __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129  __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130 
131  if (vpath->hldev->first_vp_id != vpath->vp_id)
132  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133  &vp_reg->asic_ntwk_vp_err_mask);
134  else
135  __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
138  &vp_reg->asic_ntwk_vp_err_mask);
139 
140  __vxge_hw_pio_mem_write32_upper(0,
141  &vp_reg->vpath_general_int_mask);
142 exit:
143  return status;
144 
145 }
146 
147 /*
148  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149  * @vp: Virtual Path handle.
150  *
151  * Disable vpath interrupts. The function is to be executed the last in
152  * vpath initialization sequence.
153  *
154  * See also: vxge_hw_vpath_intr_enable()
155  */
157  struct __vxge_hw_vpath_handle *vp)
158 {
159  u64 val64;
160 
161  struct __vxge_hw_virtualpath *vpath;
163  struct vxge_hw_vpath_reg __iomem *vp_reg;
164  if (vp == NULL) {
166  goto exit;
167  }
168 
169  vpath = vp->vpath;
170 
171  if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
173  goto exit;
174  }
175  vp_reg = vpath->vp_reg;
176 
177  __vxge_hw_pio_mem_write32_upper(
179  &vp_reg->vpath_general_int_mask);
180 
181  val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182 
183  writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184 
185  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186  &vp_reg->general_errors_mask);
187 
188  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189  &vp_reg->pci_config_errors_mask);
190 
191  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192  &vp_reg->mrpcim_to_vpath_alarm_mask);
193 
194  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195  &vp_reg->srpcim_to_vpath_alarm_mask);
196 
197  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198  &vp_reg->vpath_ppif_int_mask);
199 
200  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201  &vp_reg->srpcim_msg_to_vpath_mask);
202 
203  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204  &vp_reg->vpath_pcipif_int_mask);
205 
206  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207  &vp_reg->wrdma_alarm_mask);
208 
209  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210  &vp_reg->prc_alarm_mask);
211 
212  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213  &vp_reg->xgmac_vp_int_mask);
214 
215  __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216  &vp_reg->asic_ntwk_vp_err_mask);
217 
218 exit:
219  return status;
220 }
221 
223 {
224  struct vxge_hw_vpath_reg __iomem *vp_reg;
225  struct vxge_hw_vp_config *config;
226  u64 val64;
227 
228  if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229  return;
230 
231  vp_reg = fifo->vp_reg;
232  config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233 
234  if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235  config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236  val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
238  fifo->tim_tti_cfg1_saved = val64;
240  }
241 }
242 
244 {
245  u64 val64 = ring->tim_rti_cfg1_saved;
246 
248  ring->tim_rti_cfg1_saved = val64;
249  writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250 }
251 
253 {
254  u64 val64 = fifo->tim_tti_cfg3_saved;
255  u64 timer = (fifo->rtimer * 1000) / 272;
256 
257  val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258  if (timer)
259  val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
261 
262  writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263  /* tti_cfg3_saved is not updated again because it is
264  * initialized at one place only - init time.
265  */
266 }
267 
269 {
270  u64 val64 = ring->tim_rti_cfg3_saved;
271  u64 timer = (ring->rtimer * 1000) / 272;
272 
273  val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274  if (timer)
275  val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
277 
278  writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279  /* rti_cfg3_saved is not updated again because it is
280  * initialized at one place only - init time.
281  */
282 }
283 
294 {
295 
296  __vxge_hw_pio_mem_write32_upper(
297  (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298  &channel->common_reg->set_msix_mask_vect[msix_id%4]);
299 }
300 
310 void
312 {
313 
314  __vxge_hw_pio_mem_write32_upper(
315  (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316  &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317 }
318 
330 {
331  __vxge_hw_pio_mem_write32_upper(
332  (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333  &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334 }
335 
343 {
344 
345  if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346  (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347  (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348  (intr_mode != VXGE_HW_INTR_MODE_DEF))
349  intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350 
351  hldev->config.intr_mode = intr_mode;
352  return intr_mode;
353 }
354 
367 {
368  u32 i;
369  u64 val64;
370  u32 val32;
371 
373 
374  for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375 
376  if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377  continue;
378 
381  }
382 
383  if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384  val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
386 
387  if (val64 != 0) {
388  writeq(val64, &hldev->common_reg->tim_int_status0);
389 
390  writeq(~val64, &hldev->common_reg->tim_int_mask0);
391  }
392 
393  val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
395 
396  if (val32 != 0) {
397  __vxge_hw_pio_mem_write32_upper(val32,
398  &hldev->common_reg->tim_int_status1);
399 
400  __vxge_hw_pio_mem_write32_upper(~val32,
401  &hldev->common_reg->tim_int_mask1);
402  }
403  }
404 
405  val64 = readq(&hldev->common_reg->titan_general_int_status);
406 
408 }
409 
421 {
422  u32 i;
423 
425 
426  /* mask all the tim interrupts */
427  writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428  __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429  &hldev->common_reg->tim_int_mask1);
430 
431  for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432 
433  if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434  continue;
435 
438  }
439 }
440 
450 {
451  u64 val64;
452 
455 
456  __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457  &hldev->common_reg->titan_mask_all_int);
458 }
459 
469 {
470  u64 val64 = 0;
471 
472  if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
474 
475  __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476  &hldev->common_reg->titan_mask_all_int);
477 }
478 
488 {
489  u32 val32;
490 
491  val32 = readl(&hldev->common_reg->titan_general_int_status);
492 }
493 
502 static enum vxge_hw_status
503 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504  enum vxge_hw_event type)
505 {
506  switch (type) {
508  break;
513  goto out;
515  goto out;
518  goto out;
522  case VXGE_HW_EVENT_SERR:
523  break;
526  goto out;
528  break;
529  default:
530  vxge_assert(0);
531  goto out;
532  }
533 
534  /* notify driver */
535  if (hldev->uld_callbacks->crit_err)
536  hldev->uld_callbacks->crit_err(hldev,
537  type, vp_id);
538 out:
539 
540  return VXGE_HW_OK;
541 }
542 
543 /*
544  * __vxge_hw_device_handle_link_down_ind
545  * @hldev: HW device handle.
546  *
547  * Link down indication handler. The function is invoked by HW when
548  * Titan indicates that the link is down.
549  */
550 static enum vxge_hw_status
551 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
552 {
553  /*
554  * If the previous link state is not down, return.
555  */
556  if (hldev->link_state == VXGE_HW_LINK_DOWN)
557  goto exit;
558 
559  hldev->link_state = VXGE_HW_LINK_DOWN;
560 
561  /* notify driver */
562  if (hldev->uld_callbacks->link_down)
563  hldev->uld_callbacks->link_down(hldev);
564 exit:
565  return VXGE_HW_OK;
566 }
567 
568 /*
569  * __vxge_hw_device_handle_link_up_ind
570  * @hldev: HW device handle.
571  *
572  * Link up indication handler. The function is invoked by HW when
573  * Titan indicates that the link is up for programmable amount of time.
574  */
575 static enum vxge_hw_status
576 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
577 {
578  /*
579  * If the previous link state is not down, return.
580  */
581  if (hldev->link_state == VXGE_HW_LINK_UP)
582  goto exit;
583 
584  hldev->link_state = VXGE_HW_LINK_UP;
585 
586  /* notify driver */
587  if (hldev->uld_callbacks->link_up)
588  hldev->uld_callbacks->link_up(hldev);
589 exit:
590  return VXGE_HW_OK;
591 }
592 
593 /*
594  * __vxge_hw_vpath_alarm_process - Process Alarms.
595  * @vpath: Virtual Path.
596  * @skip_alarms: Do not clear the alarms
597  *
598  * Process vpath alarms.
599  *
600  */
601 static enum vxge_hw_status
602 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
603  u32 skip_alarms)
604 {
605  u64 val64;
606  u64 alarm_status;
607  u64 pic_status;
608  struct __vxge_hw_device *hldev = NULL;
609  enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
610  u64 mask64;
611  struct vxge_hw_vpath_stats_sw_info *sw_stats;
612  struct vxge_hw_vpath_reg __iomem *vp_reg;
613 
614  if (vpath == NULL) {
616  alarm_event);
617  goto out2;
618  }
619 
620  hldev = vpath->hldev;
621  vp_reg = vpath->vp_reg;
622  alarm_status = readq(&vp_reg->vpath_general_int_status);
623 
624  if (alarm_status == VXGE_HW_ALL_FOXES) {
626  alarm_event);
627  goto out;
628  }
629 
630  sw_stats = vpath->sw_stats;
631 
632  if (alarm_status & ~(
637  sw_stats->error_stats.unknown_alarms++;
638 
640  alarm_event);
641  goto out;
642  }
643 
644  if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
645 
646  val64 = readq(&vp_reg->xgmac_vp_int_status);
647 
648  if (val64 &
650 
651  val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
652 
653  if (((val64 &
655  (!(val64 &
657  ((val64 &
659  (!(val64 &
661  ))) {
662  sw_stats->error_stats.network_sustained_fault++;
663 
664  writeq(
665  VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
666  &vp_reg->asic_ntwk_vp_err_mask);
667 
668  __vxge_hw_device_handle_link_down_ind(hldev);
669  alarm_event = VXGE_HW_SET_LEVEL(
670  VXGE_HW_EVENT_LINK_DOWN, alarm_event);
671  }
672 
673  if (((val64 &
674  VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
675  (!(val64 &
676  VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
677  ((val64 &
678  VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
679  (!(val64 &
680  VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
681  ))) {
682 
683  sw_stats->error_stats.network_sustained_ok++;
684 
685  writeq(
686  VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
687  &vp_reg->asic_ntwk_vp_err_mask);
688 
689  __vxge_hw_device_handle_link_up_ind(hldev);
690  alarm_event = VXGE_HW_SET_LEVEL(
691  VXGE_HW_EVENT_LINK_UP, alarm_event);
692  }
693 
695  &vp_reg->asic_ntwk_vp_err_reg);
696 
697  alarm_event = VXGE_HW_SET_LEVEL(
698  VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
699 
700  if (skip_alarms)
701  return VXGE_HW_OK;
702  }
703  }
704 
705  if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
706 
707  pic_status = readq(&vp_reg->vpath_ppif_int_status);
708 
709  if (pic_status &
711 
712  val64 = readq(&vp_reg->general_errors_reg);
713  mask64 = readq(&vp_reg->general_errors_mask);
714 
715  if ((val64 &
717  ~mask64) {
718  sw_stats->error_stats.ini_serr_det++;
719 
720  alarm_event = VXGE_HW_SET_LEVEL(
721  VXGE_HW_EVENT_SERR, alarm_event);
722  }
723 
724  if ((val64 &
726  ~mask64) {
727  sw_stats->error_stats.dblgen_fifo0_overflow++;
728 
729  alarm_event = VXGE_HW_SET_LEVEL(
730  VXGE_HW_EVENT_FIFO_ERR, alarm_event);
731  }
732 
733  if ((val64 &
735  ~mask64)
736  sw_stats->error_stats.statsb_pif_chain_error++;
737 
738  if ((val64 &
740  ~mask64)
741  sw_stats->error_stats.statsb_drop_timeout++;
742 
743  if ((val64 &
745  ~mask64)
746  sw_stats->error_stats.target_illegal_access++;
747 
748  if (!skip_alarms) {
750  &vp_reg->general_errors_reg);
751  alarm_event = VXGE_HW_SET_LEVEL(
753  alarm_event);
754  }
755  }
756 
757  if (pic_status &
759 
760  val64 = readq(&vp_reg->kdfcctl_errors_reg);
761  mask64 = readq(&vp_reg->kdfcctl_errors_mask);
762 
763  if ((val64 &
765  ~mask64) {
766  sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
767 
768  alarm_event = VXGE_HW_SET_LEVEL(
770  alarm_event);
771  }
772 
773  if ((val64 &
775  ~mask64) {
776  sw_stats->error_stats.kdfcctl_fifo0_poison++;
777 
778  alarm_event = VXGE_HW_SET_LEVEL(
780  alarm_event);
781  }
782 
783  if ((val64 &
785  ~mask64) {
786  sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
787 
788  alarm_event = VXGE_HW_SET_LEVEL(
790  alarm_event);
791  }
792 
793  if (!skip_alarms) {
795  &vp_reg->kdfcctl_errors_reg);
796  alarm_event = VXGE_HW_SET_LEVEL(
798  alarm_event);
799  }
800  }
801 
802  }
803 
804  if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
805 
806  val64 = readq(&vp_reg->wrdma_alarm_status);
807 
809 
810  val64 = readq(&vp_reg->prc_alarm_reg);
811  mask64 = readq(&vp_reg->prc_alarm_mask);
812 
814  ~mask64)
815  sw_stats->error_stats.prc_ring_bumps++;
816 
818  ~mask64) {
819  sw_stats->error_stats.prc_rxdcm_sc_err++;
820 
821  alarm_event = VXGE_HW_SET_LEVEL(
823  alarm_event);
824  }
825 
827  & ~mask64) {
828  sw_stats->error_stats.prc_rxdcm_sc_abort++;
829 
830  alarm_event = VXGE_HW_SET_LEVEL(
832  alarm_event);
833  }
834 
836  & ~mask64) {
837  sw_stats->error_stats.prc_quanta_size_err++;
838 
839  alarm_event = VXGE_HW_SET_LEVEL(
841  alarm_event);
842  }
843 
844  if (!skip_alarms) {
846  &vp_reg->prc_alarm_reg);
847  alarm_event = VXGE_HW_SET_LEVEL(
849  alarm_event);
850  }
851  }
852  }
853 out:
854  hldev->stats.sw_dev_err_stats.vpath_alarms++;
855 out2:
856  if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
857  (alarm_event == VXGE_HW_EVENT_UNKNOWN))
858  return VXGE_HW_OK;
859 
860  __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
861 
862  if (alarm_event == VXGE_HW_EVENT_SERR)
863  return VXGE_HW_ERR_CRITICAL;
864 
865  return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867  (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
869 }
870 
891  u32 skip_alarms, u64 *reason)
892 {
893  u32 i;
894  u64 val64;
895  u64 adapter_status;
896  u64 vpath_mask;
898 
899  val64 = readq(&hldev->common_reg->titan_general_int_status);
900 
901  if (unlikely(!val64)) {
902  /* not Titan interrupt */
903  *reason = 0;
904  ret = VXGE_HW_ERR_WRONG_IRQ;
905  goto exit;
906  }
907 
908  if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
909 
910  adapter_status = readq(&hldev->common_reg->adapter_status);
911 
912  if (adapter_status == VXGE_HW_ALL_FOXES) {
913 
914  __vxge_hw_device_handle_error(hldev,
916  *reason = 0;
918  goto exit;
919  }
920  }
921 
922  hldev->stats.sw_dev_info_stats.total_intr_cnt++;
923 
924  *reason = val64;
925 
926  vpath_mask = hldev->vpaths_deployed >>
928 
929  if (val64 &
931  hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
932 
933  return VXGE_HW_OK;
934  }
935 
936  hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
937 
938  if (unlikely(val64 &
940 
941  enum vxge_hw_status error_level = VXGE_HW_OK;
942 
943  hldev->stats.sw_dev_err_stats.vpath_alarms++;
944 
945  for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
946 
947  if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
948  continue;
949 
950  ret = __vxge_hw_vpath_alarm_process(
951  &hldev->virtual_paths[i], skip_alarms);
952 
953  error_level = VXGE_HW_SET_LEVEL(ret, error_level);
954 
955  if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
956  (ret == VXGE_HW_ERR_SLOT_FREEZE)))
957  break;
958  }
959 
960  ret = error_level;
961  }
962 exit:
963  return ret;
964 }
965 
977 {
978 
979  if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
980  (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
983  &hldev->common_reg->tim_int_status0);
984  }
985 
986  if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
987  (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
988  __vxge_hw_pio_mem_write32_upper(
991  &hldev->common_reg->tim_int_status1);
992  }
993 }
994 
995 /*
996  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
997  * @channel: Channel
998  * @dtrh: Buffer to return the DTR pointer
999  *
1000  * Allocates a dtr from the reserve array. If the reserve array is empty,
1001  * it swaps the reserve and free arrays.
1002  *
1003  */
1004 static enum vxge_hw_status
1005 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1006 {
1007  void **tmp_arr;
1008 
1009  if (channel->reserve_ptr - channel->reserve_top > 0) {
1010 _alloc_after_swap:
1011  *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1012 
1013  return VXGE_HW_OK;
1014  }
1015 
1016  /* switch between empty and full arrays */
1017 
1018  /* the idea behind such a design is that by having free and reserved
1019  * arrays separated we basically separated irq and non-irq parts.
1020  * i.e. no additional lock need to be done when we free a resource */
1021 
1022  if (channel->length - channel->free_ptr > 0) {
1023 
1024  tmp_arr = channel->reserve_arr;
1025  channel->reserve_arr = channel->free_arr;
1026  channel->free_arr = tmp_arr;
1027  channel->reserve_ptr = channel->length;
1028  channel->reserve_top = channel->free_ptr;
1029  channel->free_ptr = channel->length;
1030 
1031  channel->stats->reserve_free_swaps_cnt++;
1032 
1033  goto _alloc_after_swap;
1034  }
1035 
1036  channel->stats->full_cnt++;
1037 
1038  *dtrh = NULL;
1040 }
1041 
1042 /*
1043  * vxge_hw_channel_dtr_post - Post a dtr to the channel
1044  * @channelh: Channel
1045  * @dtrh: DTR pointer
1046  *
1047  * Posts a dtr to work array.
1048  *
1049  */
1050 static void
1051 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1052 {
1053  vxge_assert(channel->work_arr[channel->post_index] == NULL);
1054 
1055  channel->work_arr[channel->post_index++] = dtrh;
1056 
1057  /* wrap-around */
1058  if (channel->post_index == channel->length)
1059  channel->post_index = 0;
1060 }
1061 
1062 /*
1063  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1064  * @channel: Channel
1065  * @dtr: Buffer to return the next completed DTR pointer
1066  *
1067  * Returns the next completed dtr with out removing it from work array
1068  *
1069  */
1070 void
1072 {
1073  vxge_assert(channel->compl_index < channel->length);
1074 
1075  *dtrh = channel->work_arr[channel->compl_index];
1076  prefetch(*dtrh);
1077 }
1078 
1079 /*
1080  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1081  * @channel: Channel handle
1082  *
1083  * Removes the next completed dtr from work array
1084  *
1085  */
1087 {
1088  channel->work_arr[channel->compl_index] = NULL;
1089 
1090  /* wrap-around */
1091  if (++channel->compl_index == channel->length)
1092  channel->compl_index = 0;
1093 
1094  channel->stats->total_compl_cnt++;
1095 }
1096 
1097 /*
1098  * vxge_hw_channel_dtr_free - Frees a dtr
1099  * @channel: Channel handle
1100  * @dtr: DTR pointer
1101  *
1102  * Returns the dtr to free array
1103  *
1104  */
1105 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1106 {
1107  channel->free_arr[--channel->free_ptr] = dtrh;
1108 }
1109 
1110 /*
1111  * vxge_hw_channel_dtr_count
1112  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1113  *
1114  * Retrieve number of DTRs available. This function can not be called
1115  * from data path. ring_initial_replenishi() is the only user.
1116  */
1118 {
1119  return (channel->reserve_ptr - channel->reserve_top) +
1120  (channel->length - channel->free_ptr);
1121 }
1122 
1138  void **rxdh)
1139 {
1140  enum vxge_hw_status status;
1141  struct __vxge_hw_channel *channel;
1142 
1143  channel = &ring->channel;
1144 
1145  status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1146 
1147  if (status == VXGE_HW_OK) {
1148  struct vxge_hw_ring_rxd_1 *rxdp =
1149  (struct vxge_hw_ring_rxd_1 *)*rxdh;
1150 
1151  rxdp->control_0 = rxdp->control_1 = 0;
1152  }
1153 
1154  return status;
1155 }
1156 
1181 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1182 {
1183  struct __vxge_hw_channel *channel;
1184 
1185  channel = &ring->channel;
1186 
1187  vxge_hw_channel_dtr_free(channel, rxdh);
1188 
1189 }
1190 
1199 {
1200  struct __vxge_hw_channel *channel;
1201 
1202  channel = &ring->channel;
1203 
1204  vxge_hw_channel_dtr_post(channel, rxdh);
1205 }
1206 
1215 {
1216  struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1217  struct __vxge_hw_channel *channel;
1218 
1219  channel = &ring->channel;
1220 
1222 
1223  if (ring->stats->common_stats.usage_cnt > 0)
1224  ring->stats->common_stats.usage_cnt--;
1225 }
1226 
1237 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1238 {
1239  struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1240  struct __vxge_hw_channel *channel;
1241 
1242  channel = &ring->channel;
1243 
1244  wmb();
1246 
1247  vxge_hw_channel_dtr_post(channel, rxdh);
1248 
1249  if (ring->stats->common_stats.usage_cnt > 0)
1250  ring->stats->common_stats.usage_cnt--;
1251 }
1252 
1261 {
1262  wmb();
1263  vxge_hw_ring_rxd_post_post(ring, rxdh);
1264 }
1265 
1300  struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1301 {
1302  struct __vxge_hw_channel *channel;
1303  struct vxge_hw_ring_rxd_1 *rxdp;
1305  u64 control_0, own;
1306 
1307  channel = &ring->channel;
1308 
1309  vxge_hw_channel_dtr_try_complete(channel, rxdh);
1310 
1311  rxdp = *rxdh;
1312  if (rxdp == NULL) {
1314  goto exit;
1315  }
1316 
1317  control_0 = rxdp->control_0;
1318  own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1319  *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1320 
1321  /* check whether it is not the end */
1322  if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1323 
1324  vxge_assert((rxdp)->host_control !=
1325  0);
1326 
1327  ++ring->cmpl_cnt;
1329 
1331 
1332  ring->stats->common_stats.usage_cnt++;
1333  if (ring->stats->common_stats.usage_max <
1334  ring->stats->common_stats.usage_cnt)
1335  ring->stats->common_stats.usage_max =
1336  ring->stats->common_stats.usage_cnt;
1337 
1338  status = VXGE_HW_OK;
1339  goto exit;
1340  }
1341 
1342  /* reset it. since we don't want to return
1343  * garbage to the driver */
1344  *rxdh = NULL;
1346 exit:
1347  return status;
1348 }
1349 
1365  struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1366 {
1367  struct __vxge_hw_channel *channel;
1369 
1370  channel = &ring->channel;
1371 
1372  /* If the t_code is not supported and if the
1373  * t_code is other than 0x5 (unparseable packet
1374  * such as unknown UPV6 header), Drop it !!!
1375  */
1376 
1377  if (t_code == VXGE_HW_RING_T_CODE_OK ||
1378  t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1379  status = VXGE_HW_OK;
1380  goto exit;
1381  }
1382 
1383  if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1384  status = VXGE_HW_ERR_INVALID_TCODE;
1385  goto exit;
1386  }
1387 
1388  ring->stats->rxd_t_code_err_cnt[t_code]++;
1389 exit:
1390  return status;
1391 }
1392 
1404 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1405  u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1406 {
1407  struct __vxge_hw_channel *channel;
1408 
1409  channel = &fifo->channel;
1410 
1412  VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1413  VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1414  &fifo->nofl_db->control_0);
1415 
1416  mmiowb();
1417 
1418  writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1419 
1420  mmiowb();
1421 }
1422 
1429 {
1430  return vxge_hw_channel_dtr_count(&fifoh->channel);
1431 }
1432 
1454  struct __vxge_hw_fifo *fifo,
1455  void **txdlh, void **txdl_priv)
1456 {
1457  struct __vxge_hw_channel *channel;
1458  enum vxge_hw_status status;
1459  int i;
1460 
1461  channel = &fifo->channel;
1462 
1463  status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1464 
1465  if (status == VXGE_HW_OK) {
1466  struct vxge_hw_fifo_txd *txdp =
1467  (struct vxge_hw_fifo_txd *)*txdlh;
1469 
1470  priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1471 
1472  /* reset the TxDL's private */
1473  priv->align_dma_offset = 0;
1474  priv->align_vaddr_start = priv->align_vaddr;
1475  priv->align_used_frags = 0;
1476  priv->frags = 0;
1477  priv->alloc_frags = fifo->config->max_frags;
1478  priv->next_txdl_priv = NULL;
1479 
1480  *txdl_priv = (void *)(size_t)txdp->host_control;
1481 
1482  for (i = 0; i < fifo->config->max_frags; i++) {
1483  txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1484  txdp->control_0 = txdp->control_1 = 0;
1485  }
1486  }
1487 
1488  return status;
1489 }
1490 
1509  void *txdlh, u32 frag_idx,
1510  dma_addr_t dma_pointer, u32 size)
1511 {
1512  struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1513  struct vxge_hw_fifo_txd *txdp, *txdp_last;
1514  struct __vxge_hw_channel *channel;
1515 
1516  channel = &fifo->channel;
1517 
1518  txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1519  txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1520 
1521  if (frag_idx != 0)
1522  txdp->control_0 = txdp->control_1 = 0;
1523  else {
1526  txdp->control_1 |= fifo->interrupt_type;
1528  fifo->tx_intr_num);
1529  if (txdl_priv->frags) {
1530  txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1531  (txdl_priv->frags - 1);
1534  }
1535  }
1536 
1537  vxge_assert(frag_idx < txdl_priv->alloc_frags);
1538 
1539  txdp->buffer_pointer = (u64)dma_pointer;
1540  txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1541  fifo->stats->total_buffers++;
1542  txdl_priv->frags++;
1543 }
1544 
1557 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1558 {
1559  struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1560  struct vxge_hw_fifo_txd *txdp_last;
1561  struct vxge_hw_fifo_txd *txdp_first;
1562  struct __vxge_hw_channel *channel;
1563 
1564  channel = &fifo->channel;
1565 
1566  txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1567  txdp_first = txdlh;
1568 
1569  txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1570  txdp_last->control_0 |=
1573 
1574  vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1575 
1576  __vxge_hw_non_offload_db_post(fifo,
1577  (u64)txdl_priv->dma_addr,
1578  txdl_priv->frags - 1,
1579  fifo->no_snoop_bits);
1580 
1581  fifo->stats->total_posts++;
1582  fifo->stats->common_stats.usage_cnt++;
1583  if (fifo->stats->common_stats.usage_max <
1584  fifo->stats->common_stats.usage_cnt)
1585  fifo->stats->common_stats.usage_max =
1586  fifo->stats->common_stats.usage_cnt;
1587 }
1588 
1622  struct __vxge_hw_fifo *fifo, void **txdlh,
1623  enum vxge_hw_fifo_tcode *t_code)
1624 {
1625  struct __vxge_hw_channel *channel;
1626  struct vxge_hw_fifo_txd *txdp;
1628 
1629  channel = &fifo->channel;
1630 
1631  vxge_hw_channel_dtr_try_complete(channel, txdlh);
1632 
1633  txdp = *txdlh;
1634  if (txdp == NULL) {
1636  goto exit;
1637  }
1638 
1639  /* check whether host owns it */
1641 
1642  vxge_assert(txdp->host_control != 0);
1643 
1645 
1646  *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1647 
1648  if (fifo->stats->common_stats.usage_cnt > 0)
1649  fifo->stats->common_stats.usage_cnt--;
1650 
1651  status = VXGE_HW_OK;
1652  goto exit;
1653  }
1654 
1655  /* no more completions */
1656  *txdlh = NULL;
1658 exit:
1659  return status;
1660 }
1661 
1677  void *txdlh,
1678  enum vxge_hw_fifo_tcode t_code)
1679 {
1680  struct __vxge_hw_channel *channel;
1681 
1683  channel = &fifo->channel;
1684 
1685  if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1686  status = VXGE_HW_ERR_INVALID_TCODE;
1687  goto exit;
1688  }
1689 
1690  fifo->stats->txd_t_code_err_cnt[t_code]++;
1691 exit:
1692  return status;
1693 }
1694 
1719 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1720 {
1721  struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1722  u32 max_frags;
1723  struct __vxge_hw_channel *channel;
1724 
1725  channel = &fifo->channel;
1726 
1727  txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1728  (struct vxge_hw_fifo_txd *)txdlh);
1729 
1730  max_frags = fifo->config->max_frags;
1731 
1732  vxge_hw_channel_dtr_free(channel, txdlh);
1733 }
1734 
1750 enum vxge_hw_status
1752  struct __vxge_hw_vpath_handle *vp,
1753  u8 (macaddr)[ETH_ALEN],
1754  u8 (macaddr_mask)[ETH_ALEN],
1755  enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1756 {
1757  u32 i;
1758  u64 data1 = 0ULL;
1759  u64 data2 = 0ULL;
1761 
1762  if (vp == NULL) {
1763  status = VXGE_HW_ERR_INVALID_HANDLE;
1764  goto exit;
1765  }
1766 
1767  for (i = 0; i < ETH_ALEN; i++) {
1768  data1 <<= 8;
1769  data1 |= (u8)macaddr[i];
1770 
1771  data2 <<= 8;
1772  data2 |= (u8)macaddr_mask[i];
1773  }
1774 
1775  switch (duplicate_mode) {
1777  i = 0;
1778  break;
1780  i = 1;
1781  break;
1783  i = 2;
1784  break;
1785  default:
1786  i = 0;
1787  break;
1788  }
1789 
1790  status = __vxge_hw_vpath_rts_table_set(vp,
1793  0,
1797 exit:
1798  return status;
1799 }
1800 
1813 enum vxge_hw_status
1815  struct __vxge_hw_vpath_handle *vp,
1816  u8 (macaddr)[ETH_ALEN],
1817  u8 (macaddr_mask)[ETH_ALEN])
1818 {
1819  u32 i;
1820  u64 data1 = 0ULL;
1821  u64 data2 = 0ULL;
1823 
1824  if (vp == NULL) {
1825  status = VXGE_HW_ERR_INVALID_HANDLE;
1826  goto exit;
1827  }
1828 
1829  status = __vxge_hw_vpath_rts_table_get(vp,
1832  0, &data1, &data2);
1833 
1834  if (status != VXGE_HW_OK)
1835  goto exit;
1836 
1838 
1840 
1841  for (i = ETH_ALEN; i > 0; i--) {
1842  macaddr[i-1] = (u8)(data1 & 0xFF);
1843  data1 >>= 8;
1844 
1845  macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1846  data2 >>= 8;
1847  }
1848 exit:
1849  return status;
1850 }
1851 
1865 enum vxge_hw_status
1867  struct __vxge_hw_vpath_handle *vp,
1868  u8 (macaddr)[ETH_ALEN],
1869  u8 (macaddr_mask)[ETH_ALEN])
1870 {
1871  u32 i;
1872  u64 data1 = 0ULL;
1873  u64 data2 = 0ULL;
1875 
1876  if (vp == NULL) {
1877  status = VXGE_HW_ERR_INVALID_HANDLE;
1878  goto exit;
1879  }
1880 
1881  status = __vxge_hw_vpath_rts_table_get(vp,
1884  0, &data1, &data2);
1885 
1886  if (status != VXGE_HW_OK)
1887  goto exit;
1888 
1890 
1892 
1893  for (i = ETH_ALEN; i > 0; i--) {
1894  macaddr[i-1] = (u8)(data1 & 0xFF);
1895  data1 >>= 8;
1896 
1897  macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1898  data2 >>= 8;
1899  }
1900 
1901 exit:
1902  return status;
1903 }
1904 
1918 enum vxge_hw_status
1920  struct __vxge_hw_vpath_handle *vp,
1921  u8 (macaddr)[ETH_ALEN],
1922  u8 (macaddr_mask)[ETH_ALEN])
1923 {
1924  u32 i;
1925  u64 data1 = 0ULL;
1926  u64 data2 = 0ULL;
1928 
1929  if (vp == NULL) {
1930  status = VXGE_HW_ERR_INVALID_HANDLE;
1931  goto exit;
1932  }
1933 
1934  for (i = 0; i < ETH_ALEN; i++) {
1935  data1 <<= 8;
1936  data1 |= (u8)macaddr[i];
1937 
1938  data2 <<= 8;
1939  data2 |= (u8)macaddr_mask[i];
1940  }
1941 
1942  status = __vxge_hw_vpath_rts_table_set(vp,
1945  0,
1948 exit:
1949  return status;
1950 }
1951 
1963 enum vxge_hw_status
1965 {
1967 
1968  if (vp == NULL) {
1969  status = VXGE_HW_ERR_INVALID_HANDLE;
1970  goto exit;
1971  }
1972 
1973  status = __vxge_hw_vpath_rts_table_set(vp,
1977 exit:
1978  return status;
1979 }
1980 
1991 enum vxge_hw_status
1993 {
1994  u64 data;
1996 
1997  if (vp == NULL) {
1998  status = VXGE_HW_ERR_INVALID_HANDLE;
1999  goto exit;
2000  }
2001 
2002  status = __vxge_hw_vpath_rts_table_get(vp,
2005  0, vid, &data);
2006 
2008 exit:
2009  return status;
2010 }
2011 
2023 enum vxge_hw_status
2025 {
2027 
2028  if (vp == NULL) {
2029  status = VXGE_HW_ERR_INVALID_HANDLE;
2030  goto exit;
2031  }
2032 
2033  status = __vxge_hw_vpath_rts_table_set(vp,
2037 exit:
2038  return status;
2039 }
2040 
2050  struct __vxge_hw_vpath_handle *vp)
2051 {
2052  u64 val64;
2053  struct __vxge_hw_virtualpath *vpath;
2055 
2056  if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2057  status = VXGE_HW_ERR_INVALID_HANDLE;
2058  goto exit;
2059  }
2060 
2061  vpath = vp->vpath;
2062 
2063  /* Enable promiscuous mode for function 0 only */
2064  if (!(vpath->hldev->access_rights &
2066  return VXGE_HW_OK;
2067 
2068  val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2069 
2070  if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2071 
2072  val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2076 
2077  writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2078  }
2079 exit:
2080  return status;
2081 }
2082 
2092  struct __vxge_hw_vpath_handle *vp)
2093 {
2094  u64 val64;
2095  struct __vxge_hw_virtualpath *vpath;
2097 
2098  if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2099  status = VXGE_HW_ERR_INVALID_HANDLE;
2100  goto exit;
2101  }
2102 
2103  vpath = vp->vpath;
2104 
2105  val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2106 
2108 
2109  val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2112 
2113  writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2114  }
2115 exit:
2116  return status;
2117 }
2118 
2119 /*
2120  * vxge_hw_vpath_bcast_enable - Enable broadcast
2121  * @vp: Vpath handle.
2122  *
2123  * Enable receiving broadcasts.
2124  */
2126  struct __vxge_hw_vpath_handle *vp)
2127 {
2128  u64 val64;
2129  struct __vxge_hw_virtualpath *vpath;
2131 
2132  if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2133  status = VXGE_HW_ERR_INVALID_HANDLE;
2134  goto exit;
2135  }
2136 
2137  vpath = vp->vpath;
2138 
2139  val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2140 
2141  if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2143  writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2144  }
2145 exit:
2146  return status;
2147 }
2148 
2158  struct __vxge_hw_vpath_handle *vp)
2159 {
2160  u64 val64;
2161  struct __vxge_hw_virtualpath *vpath;
2163 
2164  if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2165  status = VXGE_HW_ERR_INVALID_HANDLE;
2166  goto exit;
2167  }
2168 
2169  vpath = vp->vpath;
2170 
2171  val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2172 
2173  if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2175  writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2176  }
2177 exit:
2178  return status;
2179 }
2180 
2190 enum vxge_hw_status
2192 {
2193  u64 val64;
2194  struct __vxge_hw_virtualpath *vpath;
2196 
2197  if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2198  status = VXGE_HW_ERR_INVALID_HANDLE;
2199  goto exit;
2200  }
2201 
2202  vpath = vp->vpath;
2203 
2204  val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2205 
2207  val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2208  writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2209  }
2210 exit:
2211  return status;
2212 }
2213 
2214 /*
2215  * vxge_hw_vpath_alarm_process - Process Alarms.
2216  * @vpath: Virtual Path.
2217  * @skip_alarms: Do not clear the alarms
2218  *
2219  * Process vpath alarms.
2220  *
2221  */
2223  struct __vxge_hw_vpath_handle *vp,
2224  u32 skip_alarms)
2225 {
2227 
2228  if (vp == NULL) {
2229  status = VXGE_HW_ERR_INVALID_HANDLE;
2230  goto exit;
2231  }
2232 
2233  status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2234 exit:
2235  return status;
2236 }
2237 
2250 void
2251 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2252  int alarm_msix_id)
2253 {
2254  u64 val64;
2255  struct __vxge_hw_virtualpath *vpath = vp->vpath;
2256  struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2257  u32 vp_id = vp->vpath->vp_id;
2258 
2260  (vp_id * 4) + tim_msix_id[0]) |
2262  (vp_id * 4) + tim_msix_id[1]);
2263 
2264  writeq(val64, &vp_reg->interrupt_cfg0);
2265 
2267  (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2268  &vp_reg->interrupt_cfg2);
2269 
2270  if (vpath->hldev->config.intr_mode ==
2272  __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2274  0, 32), &vp_reg->one_shot_vect0_en);
2275  __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2277  0, 32), &vp_reg->one_shot_vect1_en);
2278  __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2280  0, 32), &vp_reg->one_shot_vect2_en);
2281  }
2282 }
2283 
2296 void
2298 {
2299  struct __vxge_hw_device *hldev = vp->vpath->hldev;
2300  __vxge_hw_pio_mem_write32_upper(
2301  (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2302  &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2303 }
2304 
2318 {
2319  struct __vxge_hw_device *hldev = vp->vpath->hldev;
2320 
2321  if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2322  __vxge_hw_pio_mem_write32_upper(
2323  (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2324  &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2325  else
2326  __vxge_hw_pio_mem_write32_upper(
2327  (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2328  &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2329 }
2330 
2343 void
2345 {
2346  struct __vxge_hw_device *hldev = vp->vpath->hldev;
2347  __vxge_hw_pio_mem_write32_upper(
2348  (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2349  &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2350 }
2351 
2361 {
2362  u64 tim_int_mask0[4] = {[0 ...3] = 0};
2363  u32 tim_int_mask1[4] = {[0 ...3] = 0};
2364  u64 val64;
2365  struct __vxge_hw_device *hldev = vp->vpath->hldev;
2366 
2367  VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2368  tim_int_mask1, vp->vpath->vp_id);
2369 
2370  val64 = readq(&hldev->common_reg->tim_int_mask0);
2371 
2372  if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2373  (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2374  writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2375  tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2376  &hldev->common_reg->tim_int_mask0);
2377  }
2378 
2379  val64 = readl(&hldev->common_reg->tim_int_mask1);
2380 
2381  if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2382  (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2383  __vxge_hw_pio_mem_write32_upper(
2384  (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2385  tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2386  &hldev->common_reg->tim_int_mask1);
2387  }
2388 }
2389 
2399 {
2400  u64 tim_int_mask0[4] = {[0 ...3] = 0};
2401  u32 tim_int_mask1[4] = {[0 ...3] = 0};
2402  u64 val64;
2403  struct __vxge_hw_device *hldev = vp->vpath->hldev;
2404 
2405  VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2406  tim_int_mask1, vp->vpath->vp_id);
2407 
2408  val64 = readq(&hldev->common_reg->tim_int_mask0);
2409 
2410  if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2411  (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2412  writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2413  tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2414  &hldev->common_reg->tim_int_mask0);
2415  }
2416 
2417  if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2418  (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2419  __vxge_hw_pio_mem_write32_upper(
2420  (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2421  tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2422  &hldev->common_reg->tim_int_mask1);
2423  }
2424 }
2425 
2441 {
2442  u8 t_code;
2444  void *first_rxdh;
2445  u64 val64 = 0;
2446  int new_count = 0;
2447 
2448  ring->cmpl_cnt = 0;
2449 
2450  status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2451  if (status == VXGE_HW_OK)
2452  ring->callback(ring, first_rxdh,
2453  t_code, ring->channel.userdata);
2454 
2455  if (ring->cmpl_cnt != 0) {
2456  ring->doorbell_cnt += ring->cmpl_cnt;
2457  if (ring->doorbell_cnt >= ring->rxds_limit) {
2458  /*
2459  * Each RxD is of 4 qwords, update the number of
2460  * qwords replenished
2461  */
2462  new_count = (ring->doorbell_cnt * 4);
2463 
2464  /* For each block add 4 more qwords */
2465  ring->total_db_cnt += ring->doorbell_cnt;
2466  if (ring->total_db_cnt >= ring->rxds_per_block) {
2467  new_count += 4;
2468  /* Reset total count */
2469  ring->total_db_cnt %= ring->rxds_per_block;
2470  }
2472  &ring->vp_reg->prc_rxd_doorbell);
2473  val64 =
2474  readl(&ring->common_reg->titan_general_int_status);
2475  ring->doorbell_cnt = 0;
2476  }
2477  }
2478 
2479  return status;
2480 }
2481 
2495  struct sk_buff ***skb_ptr, int nr_skb,
2496  int *more)
2497 {
2498  enum vxge_hw_fifo_tcode t_code;
2499  void *first_txdlh;
2501  struct __vxge_hw_channel *channel;
2502 
2503  channel = &fifo->channel;
2504 
2505  status = vxge_hw_fifo_txdl_next_completed(fifo,
2506  &first_txdlh, &t_code);
2507  if (status == VXGE_HW_OK)
2508  if (fifo->callback(fifo, first_txdlh, t_code,
2509  channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2510  status = VXGE_HW_COMPLETIONS_REMAIN;
2511 
2512  return status;
2513 }