Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qlcnic_hw.c
Go to the documentation of this file.
1 /*
2  * QLogic qlcnic NIC Driver
3  * Copyright (c) 2009-2010 QLogic Corporation
4  *
5  * See LICENSE.qlcnic for copyright and licensing details.
6  */
7 
8 #include "qlcnic.h"
9 
10 #include <linux/slab.h>
11 #include <net/ip.h>
12 #include <linux/bitops.h>
13 
14 #define MASK(n) ((1ULL<<(n))-1)
15 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
16 
17 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
18 
19 #define CRB_BLK(off) ((off >> 20) & 0x3f)
20 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
21 #define CRB_WINDOW_2M (0x130060)
22 #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23 #define CRB_INDIRECT_2M (0x1e0000UL)
24 
25 
26 #ifndef readq
27 static inline u64 readq(void __iomem *addr)
28 {
29  return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
30 }
31 #endif
32 
33 #ifndef writeq
34 static inline void writeq(u64 val, void __iomem *addr)
35 {
36  writel(((u32) (val)), (addr));
37  writel(((u32) (val >> 32)), (addr + 4));
38 }
39 #endif
40 
41 static struct crb_128M_2M_block_map
43  {{{0, 0, 0, 0} } }, /* 0: PCI */
44  {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
45  {1, 0x0110000, 0x0120000, 0x130000},
46  {1, 0x0120000, 0x0122000, 0x124000},
47  {1, 0x0130000, 0x0132000, 0x126000},
48  {1, 0x0140000, 0x0142000, 0x128000},
49  {1, 0x0150000, 0x0152000, 0x12a000},
50  {1, 0x0160000, 0x0170000, 0x110000},
51  {1, 0x0170000, 0x0172000, 0x12e000},
52  {0, 0x0000000, 0x0000000, 0x000000},
53  {0, 0x0000000, 0x0000000, 0x000000},
54  {0, 0x0000000, 0x0000000, 0x000000},
55  {0, 0x0000000, 0x0000000, 0x000000},
56  {0, 0x0000000, 0x0000000, 0x000000},
57  {0, 0x0000000, 0x0000000, 0x000000},
58  {1, 0x01e0000, 0x01e0800, 0x122000},
59  {0, 0x0000000, 0x0000000, 0x000000} } },
60  {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
61  {{{0, 0, 0, 0} } }, /* 3: */
62  {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
63  {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
64  {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
65  {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
66  {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
67  {0, 0x0000000, 0x0000000, 0x000000},
68  {0, 0x0000000, 0x0000000, 0x000000},
69  {0, 0x0000000, 0x0000000, 0x000000},
70  {0, 0x0000000, 0x0000000, 0x000000},
71  {0, 0x0000000, 0x0000000, 0x000000},
72  {0, 0x0000000, 0x0000000, 0x000000},
73  {0, 0x0000000, 0x0000000, 0x000000},
74  {0, 0x0000000, 0x0000000, 0x000000},
75  {0, 0x0000000, 0x0000000, 0x000000},
76  {0, 0x0000000, 0x0000000, 0x000000},
77  {0, 0x0000000, 0x0000000, 0x000000},
78  {0, 0x0000000, 0x0000000, 0x000000},
79  {0, 0x0000000, 0x0000000, 0x000000},
80  {0, 0x0000000, 0x0000000, 0x000000},
81  {1, 0x08f0000, 0x08f2000, 0x172000} } },
82  {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
83  {0, 0x0000000, 0x0000000, 0x000000},
84  {0, 0x0000000, 0x0000000, 0x000000},
85  {0, 0x0000000, 0x0000000, 0x000000},
86  {0, 0x0000000, 0x0000000, 0x000000},
87  {0, 0x0000000, 0x0000000, 0x000000},
88  {0, 0x0000000, 0x0000000, 0x000000},
89  {0, 0x0000000, 0x0000000, 0x000000},
90  {0, 0x0000000, 0x0000000, 0x000000},
91  {0, 0x0000000, 0x0000000, 0x000000},
92  {0, 0x0000000, 0x0000000, 0x000000},
93  {0, 0x0000000, 0x0000000, 0x000000},
94  {0, 0x0000000, 0x0000000, 0x000000},
95  {0, 0x0000000, 0x0000000, 0x000000},
96  {0, 0x0000000, 0x0000000, 0x000000},
97  {1, 0x09f0000, 0x09f2000, 0x176000} } },
98  {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
99  {0, 0x0000000, 0x0000000, 0x000000},
100  {0, 0x0000000, 0x0000000, 0x000000},
101  {0, 0x0000000, 0x0000000, 0x000000},
102  {0, 0x0000000, 0x0000000, 0x000000},
103  {0, 0x0000000, 0x0000000, 0x000000},
104  {0, 0x0000000, 0x0000000, 0x000000},
105  {0, 0x0000000, 0x0000000, 0x000000},
106  {0, 0x0000000, 0x0000000, 0x000000},
107  {0, 0x0000000, 0x0000000, 0x000000},
108  {0, 0x0000000, 0x0000000, 0x000000},
109  {0, 0x0000000, 0x0000000, 0x000000},
110  {0, 0x0000000, 0x0000000, 0x000000},
111  {0, 0x0000000, 0x0000000, 0x000000},
112  {0, 0x0000000, 0x0000000, 0x000000},
113  {1, 0x0af0000, 0x0af2000, 0x17a000} } },
114  {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
115  {0, 0x0000000, 0x0000000, 0x000000},
116  {0, 0x0000000, 0x0000000, 0x000000},
117  {0, 0x0000000, 0x0000000, 0x000000},
118  {0, 0x0000000, 0x0000000, 0x000000},
119  {0, 0x0000000, 0x0000000, 0x000000},
120  {0, 0x0000000, 0x0000000, 0x000000},
121  {0, 0x0000000, 0x0000000, 0x000000},
122  {0, 0x0000000, 0x0000000, 0x000000},
123  {0, 0x0000000, 0x0000000, 0x000000},
124  {0, 0x0000000, 0x0000000, 0x000000},
125  {0, 0x0000000, 0x0000000, 0x000000},
126  {0, 0x0000000, 0x0000000, 0x000000},
127  {0, 0x0000000, 0x0000000, 0x000000},
128  {0, 0x0000000, 0x0000000, 0x000000},
129  {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
130  {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
131  {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
132  {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
133  {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
134  {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
135  {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
136  {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
137  {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
138  {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
139  {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
140  {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
141  {{{0, 0, 0, 0} } }, /* 23: */
142  {{{0, 0, 0, 0} } }, /* 24: */
143  {{{0, 0, 0, 0} } }, /* 25: */
144  {{{0, 0, 0, 0} } }, /* 26: */
145  {{{0, 0, 0, 0} } }, /* 27: */
146  {{{0, 0, 0, 0} } }, /* 28: */
147  {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
148  {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
149  {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
150  {{{0} } }, /* 32: PCI */
151  {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
152  {1, 0x2110000, 0x2120000, 0x130000},
153  {1, 0x2120000, 0x2122000, 0x124000},
154  {1, 0x2130000, 0x2132000, 0x126000},
155  {1, 0x2140000, 0x2142000, 0x128000},
156  {1, 0x2150000, 0x2152000, 0x12a000},
157  {1, 0x2160000, 0x2170000, 0x110000},
158  {1, 0x2170000, 0x2172000, 0x12e000},
159  {0, 0x0000000, 0x0000000, 0x000000},
160  {0, 0x0000000, 0x0000000, 0x000000},
161  {0, 0x0000000, 0x0000000, 0x000000},
162  {0, 0x0000000, 0x0000000, 0x000000},
163  {0, 0x0000000, 0x0000000, 0x000000},
164  {0, 0x0000000, 0x0000000, 0x000000},
165  {0, 0x0000000, 0x0000000, 0x000000},
166  {0, 0x0000000, 0x0000000, 0x000000} } },
167  {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
168  {{{0} } }, /* 35: */
169  {{{0} } }, /* 36: */
170  {{{0} } }, /* 37: */
171  {{{0} } }, /* 38: */
172  {{{0} } }, /* 39: */
173  {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
174  {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
175  {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
176  {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
177  {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
178  {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
179  {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
180  {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
181  {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
182  {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
183  {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
184  {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
185  {{{0} } }, /* 52: */
186  {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
187  {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
188  {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
189  {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
190  {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
191  {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
192  {{{0} } }, /* 59: I2C0 */
193  {{{0} } }, /* 60: I2C1 */
194  {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
195  {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
196  {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
197 };
198 
199 /*
200  * top 12 bits of crb internal address (hub, agent)
201  */
202 static const unsigned crb_hub_agt[64] = {
203  0,
207  0,
230  0,
233  0,
235  0,
238  0,
239  0,
240  0,
241  0,
242  0,
244  0,
255  0,
260  0,
264  0,
266  0,
267 };
268 
269 /* PCI Windowing for DDR regions. */
270 
271 #define QLCNIC_PCIE_SEM_TIMEOUT 10000
272 
273 int
275 {
276  int done = 0, timeout = 0;
277 
278  while (!done) {
279  done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
280  if (done == 1)
281  break;
282  if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
283  dev_err(&adapter->pdev->dev,
284  "Failed to acquire sem=%d lock; holdby=%d\n",
285  sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
286  return -EIO;
287  }
288  msleep(1);
289  }
290 
291  if (id_reg)
292  QLCWR32(adapter, id_reg, adapter->portnum);
293 
294  return 0;
295 }
296 
297 void
299 {
300  QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
301 }
302 
303 static int
304 qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
305  struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
306 {
307  u32 i, producer, consumer;
308  struct qlcnic_cmd_buffer *pbuf;
309  struct cmd_desc_type0 *cmd_desc;
311 
312  i = 0;
313 
314  if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
315  return -EIO;
316 
317  tx_ring = adapter->tx_ring;
318  __netif_tx_lock_bh(tx_ring->txq);
319 
320  producer = tx_ring->producer;
321  consumer = tx_ring->sw_consumer;
322 
323  if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
324  netif_tx_stop_queue(tx_ring->txq);
325  smp_mb();
326  if (qlcnic_tx_avail(tx_ring) > nr_desc) {
327  if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
328  netif_tx_wake_queue(tx_ring->txq);
329  } else {
330  adapter->stats.xmit_off++;
331  __netif_tx_unlock_bh(tx_ring->txq);
332  return -EBUSY;
333  }
334  }
335 
336  do {
337  cmd_desc = &cmd_desc_arr[i];
338 
339  pbuf = &tx_ring->cmd_buf_arr[producer];
340  pbuf->skb = NULL;
341  pbuf->frag_count = 0;
342 
343  memcpy(&tx_ring->desc_head[producer],
344  &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
345 
346  producer = get_next_index(producer, tx_ring->num_desc);
347  i++;
348 
349  } while (i != nr_desc);
350 
351  tx_ring->producer = producer;
352 
353  qlcnic_update_cmd_producer(adapter, tx_ring);
354 
355  __netif_tx_unlock_bh(tx_ring->txq);
356 
357  return 0;
358 }
359 
360 static int
361 qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
362  __le16 vlan_id, unsigned op)
363 {
364  struct qlcnic_nic_req req;
365  struct qlcnic_mac_req *mac_req;
366  struct qlcnic_vlan_req *vlan_req;
367  u64 word;
368 
369  memset(&req, 0, sizeof(struct qlcnic_nic_req));
370  req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
371 
372  word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
373  req.req_hdr = cpu_to_le64(word);
374 
375  mac_req = (struct qlcnic_mac_req *)&req.words[0];
376  mac_req->op = op;
377  memcpy(mac_req->mac_addr, addr, 6);
378 
379  vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
380  vlan_req->vlan_id = vlan_id;
381 
382  return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
383 }
384 
385 static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
386 {
387  struct list_head *head;
388  struct qlcnic_mac_list_s *cur;
389 
390  /* look up if already exists */
391  list_for_each(head, &adapter->mac_list) {
392  cur = list_entry(head, struct qlcnic_mac_list_s, list);
393  if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
394  return 0;
395  }
396 
397  cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
398  if (cur == NULL) {
399  dev_err(&adapter->netdev->dev,
400  "failed to add mac address filter\n");
401  return -ENOMEM;
402  }
403  memcpy(cur->mac_addr, addr, ETH_ALEN);
404 
405  if (qlcnic_sre_macaddr_change(adapter,
406  cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
407  kfree(cur);
408  return -EIO;
409  }
410 
411  list_add_tail(&cur->list, &adapter->mac_list);
412  return 0;
413 }
414 
415 void qlcnic_set_multi(struct net_device *netdev)
416 {
417  struct qlcnic_adapter *adapter = netdev_priv(netdev);
418  struct netdev_hw_addr *ha;
419  static const u8 bcast_addr[ETH_ALEN] = {
420  0xff, 0xff, 0xff, 0xff, 0xff, 0xff
421  };
423 
424  if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
425  return;
426 
427  qlcnic_nic_add_mac(adapter, adapter->mac_addr);
428  qlcnic_nic_add_mac(adapter, bcast_addr);
429 
430  if (netdev->flags & IFF_PROMISC) {
431  if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
433  goto send_fw_cmd;
434  }
435 
436  if ((netdev->flags & IFF_ALLMULTI) ||
437  (netdev_mc_count(netdev) > adapter->max_mc_count)) {
439  goto send_fw_cmd;
440  }
441 
442  if (!netdev_mc_empty(netdev)) {
443  netdev_for_each_mc_addr(ha, netdev) {
444  qlcnic_nic_add_mac(adapter, ha->addr);
445  }
446  }
447 
448 send_fw_cmd:
449  if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
451  adapter->mac_learn = 1;
452  } else {
453  adapter->mac_learn = 0;
454  }
455 
456  qlcnic_nic_set_promisc(adapter, mode);
457 }
458 
460 {
461  struct qlcnic_nic_req req;
462  u64 word;
463 
464  memset(&req, 0, sizeof(struct qlcnic_nic_req));
465 
466  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
467 
469  ((u64)adapter->portnum << 16);
470  req.req_hdr = cpu_to_le64(word);
471 
472  req.words[0] = cpu_to_le64(mode);
473 
474  return qlcnic_send_cmd_descs(adapter,
475  (struct cmd_desc_type0 *)&req, 1);
476 }
477 
479 {
480  struct qlcnic_mac_list_s *cur;
481  struct list_head *head = &adapter->mac_list;
482 
483  while (!list_empty(head)) {
484  cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
485  qlcnic_sre_macaddr_change(adapter,
486  cur->mac_addr, 0, QLCNIC_MAC_DEL);
487  list_del(&cur->list);
488  kfree(cur);
489  }
490 }
491 
493 {
494  struct qlcnic_filter *tmp_fil;
495  struct hlist_node *tmp_hnode, *n;
496  struct hlist_head *head;
497  int i;
498 
499  for (i = 0; i < adapter->fhash.fmax; i++) {
500  head = &(adapter->fhash.fhead[i]);
501 
502  hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
503  {
504  if (jiffies >
505  (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
506  qlcnic_sre_macaddr_change(adapter,
507  tmp_fil->faddr, tmp_fil->vlan_id,
508  tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
510  spin_lock_bh(&adapter->mac_learn_lock);
511  adapter->fhash.fnum--;
512  hlist_del(&tmp_fil->fnode);
513  spin_unlock_bh(&adapter->mac_learn_lock);
514  kfree(tmp_fil);
515  }
516  }
517  }
518 }
519 
521 {
522  struct qlcnic_filter *tmp_fil;
523  struct hlist_node *tmp_hnode, *n;
524  struct hlist_head *head;
525  int i;
526 
527  for (i = 0; i < adapter->fhash.fmax; i++) {
528  head = &(adapter->fhash.fhead[i]);
529 
530  hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
531  qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
532  tmp_fil->vlan_id, tmp_fil->vlan_id ?
534  spin_lock_bh(&adapter->mac_learn_lock);
535  adapter->fhash.fnum--;
536  hlist_del(&tmp_fil->fnode);
537  spin_unlock_bh(&adapter->mac_learn_lock);
538  kfree(tmp_fil);
539  }
540  }
541 }
542 
544 {
545  struct qlcnic_nic_req req;
546  int rv;
547 
548  memset(&req, 0, sizeof(struct qlcnic_nic_req));
549 
550  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
552  ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
553 
554  req.words[0] = cpu_to_le64(flag);
555 
556  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
557  if (rv != 0)
558  dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
559  flag ? "Set" : "Reset");
560  return rv;
561 }
562 
564 {
565  if (qlcnic_set_fw_loopback(adapter, mode))
566  return -EIO;
567 
569  qlcnic_set_fw_loopback(adapter, 0);
570  return -EIO;
571  }
572 
573  msleep(1000);
574  return 0;
575 }
576 
578 {
580  struct net_device *netdev = adapter->netdev;
581 
582  qlcnic_set_fw_loopback(adapter, 0);
583 
584  if (netdev->flags & IFF_PROMISC)
586  else if (netdev->flags & IFF_ALLMULTI)
588 
589  qlcnic_nic_set_promisc(adapter, mode);
590  msleep(1000);
591 }
592 
593 /*
594  * Send the interrupt coalescing parameter set by ethtool to the card.
595  */
597 {
598  struct qlcnic_nic_req req;
599  int rv;
600 
601  memset(&req, 0, sizeof(struct qlcnic_nic_req));
602 
603  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
604 
606  ((u64) adapter->portnum << 16));
607 
608  req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
609  req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
610  ((u64) adapter->ahw->coal.rx_time_us) << 16);
611  req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
612  ((u64) adapter->ahw->coal.type) << 32 |
613  ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
614  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
615  if (rv != 0)
616  dev_err(&adapter->netdev->dev,
617  "Could not send interrupt coalescing parameters\n");
618  return rv;
619 }
620 
621 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
622 {
623  struct qlcnic_nic_req req;
624  u64 word;
625  int rv;
626 
627  if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
628  return 0;
629 
630  memset(&req, 0, sizeof(struct qlcnic_nic_req));
631 
632  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
633 
634  word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
635  req.req_hdr = cpu_to_le64(word);
636 
637  req.words[0] = cpu_to_le64(enable);
638 
639  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
640  if (rv != 0)
641  dev_err(&adapter->netdev->dev,
642  "Could not send configure hw lro request\n");
643 
644  return rv;
645 }
646 
648 {
649  struct qlcnic_nic_req req;
650  u64 word;
651  int rv;
652 
653  if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
654  return 0;
655 
656  memset(&req, 0, sizeof(struct qlcnic_nic_req));
657 
658  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
659 
661  ((u64)adapter->portnum << 16);
662  req.req_hdr = cpu_to_le64(word);
663 
664  req.words[0] = cpu_to_le64(enable);
665 
666  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
667  if (rv != 0)
668  dev_err(&adapter->netdev->dev,
669  "Could not send configure bridge mode request\n");
670 
671  adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
672 
673  return rv;
674 }
675 
676 
677 #define RSS_HASHTYPE_IP_TCP 0x3
678 
679 int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
680 {
681  struct qlcnic_nic_req req;
682  u64 word;
683  int i, rv;
684 
685  static const u64 key[] = {
686  0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
687  0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
688  0x255b0ec26d5a56daULL
689  };
690 
691  memset(&req, 0, sizeof(struct qlcnic_nic_req));
692  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
693 
694  word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
695  req.req_hdr = cpu_to_le64(word);
696 
697  /*
698  * RSS request:
699  * bits 3-0: hash_method
700  * 5-4: hash_type_ipv4
701  * 7-6: hash_type_ipv6
702  * 8: enable
703  * 9: use indirection table
704  * 47-10: reserved
705  * 63-48: indirection table mask
706  */
707  word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
708  ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
709  ((u64)(enable & 0x1) << 8) |
710  ((0x7ULL) << 48);
711  req.words[0] = cpu_to_le64(word);
712  for (i = 0; i < 5; i++)
713  req.words[i+1] = cpu_to_le64(key[i]);
714 
715  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
716  if (rv != 0)
717  dev_err(&adapter->netdev->dev, "could not configure RSS\n");
718 
719  return rv;
720 }
721 
722 int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
723 {
724  struct qlcnic_nic_req req;
725  struct qlcnic_ipaddr *ipa;
726  u64 word;
727  int rv;
728 
729  memset(&req, 0, sizeof(struct qlcnic_nic_req));
730  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
731 
732  word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
733  req.req_hdr = cpu_to_le64(word);
734 
735  req.words[0] = cpu_to_le64(cmd);
736  ipa = (struct qlcnic_ipaddr *)&req.words[1];
737  ipa->ipv4 = ip;
738 
739  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
740  if (rv != 0)
741  dev_err(&adapter->netdev->dev,
742  "could not notify %s IP 0x%x reuqest\n",
743  (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
744 
745  return rv;
746 }
747 
749 {
750  struct qlcnic_nic_req req;
751  u64 word;
752  int rv;
753 
754  memset(&req, 0, sizeof(struct qlcnic_nic_req));
755  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
756 
757  word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
758  req.req_hdr = cpu_to_le64(word);
759  req.words[0] = cpu_to_le64(enable | (enable << 8));
760 
761  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
762  if (rv != 0)
763  dev_err(&adapter->netdev->dev,
764  "could not configure link notification\n");
765 
766  return rv;
767 }
768 
770 {
771  struct qlcnic_nic_req req;
772  u64 word;
773  int rv;
774 
775  if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
776  return 0;
777 
778  memset(&req, 0, sizeof(struct qlcnic_nic_req));
779  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
780 
782  ((u64)adapter->portnum << 16) |
784 
785  req.req_hdr = cpu_to_le64(word);
786 
787  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
788  if (rv != 0)
789  dev_err(&adapter->netdev->dev,
790  "could not cleanup lro flows\n");
791 
792  return rv;
793 }
794 
795 /*
796  * qlcnic_change_mtu - Change the Maximum Transfer Unit
797  * @returns 0 on success, negative on failure
798  */
799 
800 int qlcnic_change_mtu(struct net_device *netdev, int mtu)
801 {
802  struct qlcnic_adapter *adapter = netdev_priv(netdev);
803  int rc = 0;
804 
805  if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
806  dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
807  " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
808  return -EINVAL;
809  }
810 
811  rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
812 
813  if (!rc)
814  netdev->mtu = mtu;
815 
816  return rc;
817 }
818 
819 
822 {
823  struct qlcnic_adapter *adapter = netdev_priv(netdev);
824 
825  if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
826  netdev_features_t changed = features ^ netdev->features;
827  features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
828  }
829 
830  if (!(features & NETIF_F_RXCSUM))
831  features &= ~NETIF_F_LRO;
832 
833  return features;
834 }
835 
836 
838 {
839  struct qlcnic_adapter *adapter = netdev_priv(netdev);
841  int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
842 
843  if (!(changed & NETIF_F_LRO))
844  return 0;
845 
846  netdev->features = features ^ NETIF_F_LRO;
847 
848  if (qlcnic_config_hw_lro(adapter, hw_lro))
849  return -EIO;
850 
851  if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
852  return -EIO;
853 
854  return 0;
855 }
856 
857 /*
858  * Changes the CRB window to the specified window.
859  */
860  /* Returns < 0 if off is not valid,
861  * 1 if window access is needed. 'off' is set to offset from
862  * CRB space in 128M pci map
863  * 0 if no window access is needed. 'off' is set to 2M addr
864  * In: 'off' is offset from base in 128M pci map
865  */
866 static int
867 qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
868  ulong off, void __iomem **addr)
869 {
870  const struct crb_128M_2M_sub_block_map *m;
871 
872  if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
873  return -EINVAL;
874 
875  off -= QLCNIC_PCI_CRBSPACE;
876 
877  /*
878  * Try direct map
879  */
880  m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
881 
882  if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
883  *addr = adapter->ahw->pci_base0 + m->start_2M +
884  (off - m->start_128M);
885  return 0;
886  }
887 
888  /*
889  * Not in direct map, use crb window
890  */
891  *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
892  return 1;
893 }
894 
895 /*
896  * In: 'off' is offset from CRB space in 128M pci map
897  * Out: 'off' is 2M pci map addr
898  * side effect: lock crb window
899  */
900 static int
901 qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
902 {
903  u32 window;
904  void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
905 
906  off -= QLCNIC_PCI_CRBSPACE;
907 
908  window = CRB_HI(off);
909  if (window == 0) {
910  dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
911  return -EIO;
912  }
913 
914  writel(window, addr);
915  if (readl(addr) != window) {
916  if (printk_ratelimit())
917  dev_warn(&adapter->pdev->dev,
918  "failed to set CRB window to %d off 0x%lx\n",
919  window, off);
920  return -EIO;
921  }
922  return 0;
923 }
924 
925 int
927 {
928  unsigned long flags;
929  int rv;
930  void __iomem *addr = NULL;
931 
932  rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
933 
934  if (rv == 0) {
935  writel(data, addr);
936  return 0;
937  }
938 
939  if (rv > 0) {
940  /* indirect access */
941  write_lock_irqsave(&adapter->ahw->crb_lock, flags);
942  crb_win_lock(adapter);
943  rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
944  if (!rv)
945  writel(data, addr);
946  crb_win_unlock(adapter);
947  write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
948  return rv;
949  }
950 
951  dev_err(&adapter->pdev->dev,
952  "%s: invalid offset: 0x%016lx\n", __func__, off);
953  dump_stack();
954  return -EIO;
955 }
956 
957 u32
959 {
960  unsigned long flags;
961  int rv;
962  u32 data = -1;
963  void __iomem *addr = NULL;
964 
965  rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
966 
967  if (rv == 0)
968  return readl(addr);
969 
970  if (rv > 0) {
971  /* indirect access */
972  write_lock_irqsave(&adapter->ahw->crb_lock, flags);
973  crb_win_lock(adapter);
974  if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
975  data = readl(addr);
976  crb_win_unlock(adapter);
977  write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
978  return data;
979  }
980 
981  dev_err(&adapter->pdev->dev,
982  "%s: invalid offset: 0x%016lx\n", __func__, off);
983  dump_stack();
984  return -1;
985 }
986 
987 
988 void __iomem *
990 {
991  void __iomem *addr = NULL;
992 
993  WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
994 
995  return addr;
996 }
997 
998 
999 static int
1000 qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
1001  u64 addr, u32 *start)
1002 {
1003  u32 window;
1004 
1005  window = OCM_WIN_P3P(addr);
1006 
1007  writel(window, adapter->ahw->ocm_win_crb);
1008  /* read back to flush */
1009  readl(adapter->ahw->ocm_win_crb);
1010 
1011  *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
1012  return 0;
1013 }
1014 
1015 static int
1016 qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
1017  u64 *data, int op)
1018 {
1019  void __iomem *addr;
1020  int ret;
1021  u32 start;
1022 
1023  mutex_lock(&adapter->ahw->mem_lock);
1024 
1025  ret = qlcnic_pci_set_window_2M(adapter, off, &start);
1026  if (ret != 0)
1027  goto unlock;
1028 
1029  addr = adapter->ahw->pci_base0 + start;
1030 
1031  if (op == 0) /* read */
1032  *data = readq(addr);
1033  else /* write */
1034  writeq(*data, addr);
1035 
1036 unlock:
1037  mutex_unlock(&adapter->ahw->mem_lock);
1038 
1039  return ret;
1040 }
1041 
1042 void
1043 qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1044 {
1045  void __iomem *addr = adapter->ahw->pci_base0 +
1047 
1048  mutex_lock(&adapter->ahw->mem_lock);
1049  *data = readq(addr);
1050  mutex_unlock(&adapter->ahw->mem_lock);
1051 }
1052 
1053 void
1055 {
1056  void __iomem *addr = adapter->ahw->pci_base0 +
1058 
1059  mutex_lock(&adapter->ahw->mem_lock);
1060  writeq(data, addr);
1061  mutex_unlock(&adapter->ahw->mem_lock);
1062 }
1063 
1064 #define MAX_CTL_CHECK 1000
1065 
1066 int
1068  u64 off, u64 data)
1069 {
1070  int i, j, ret;
1071  u32 temp, off8;
1072  void __iomem *mem_crb;
1073 
1074  /* Only 64-bit aligned access */
1075  if (off & 7)
1076  return -EIO;
1077 
1078  /* P3 onward, test agent base for MIU and SIU is same */
1081  mem_crb = qlcnic_get_ioaddr(adapter,
1083  goto correct;
1084  }
1085 
1087  mem_crb = qlcnic_get_ioaddr(adapter,
1089  goto correct;
1090  }
1091 
1093  return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
1094 
1095  return -EIO;
1096 
1097 correct:
1098  off8 = off & ~0xf;
1099 
1100  mutex_lock(&adapter->ahw->mem_lock);
1101 
1102  writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1103  writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1104 
1105  i = 0;
1106  writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1108  (mem_crb + TEST_AGT_CTRL));
1109 
1110  for (j = 0; j < MAX_CTL_CHECK; j++) {
1111  temp = readl(mem_crb + TEST_AGT_CTRL);
1112  if ((temp & TA_CTL_BUSY) == 0)
1113  break;
1114  }
1115 
1116  if (j >= MAX_CTL_CHECK) {
1117  ret = -EIO;
1118  goto done;
1119  }
1120 
1121  i = (off & 0xf) ? 0 : 2;
1122  writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1123  mem_crb + MIU_TEST_AGT_WRDATA(i));
1124  writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1125  mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1126  i = (off & 0xf) ? 2 : 0;
1127 
1128  writel(data & 0xffffffff,
1129  mem_crb + MIU_TEST_AGT_WRDATA(i));
1130  writel((data >> 32) & 0xffffffff,
1131  mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1132 
1133  writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1135  (mem_crb + TEST_AGT_CTRL));
1136 
1137  for (j = 0; j < MAX_CTL_CHECK; j++) {
1138  temp = readl(mem_crb + TEST_AGT_CTRL);
1139  if ((temp & TA_CTL_BUSY) == 0)
1140  break;
1141  }
1142 
1143  if (j >= MAX_CTL_CHECK) {
1144  if (printk_ratelimit())
1145  dev_err(&adapter->pdev->dev,
1146  "failed to write through agent\n");
1147  ret = -EIO;
1148  } else
1149  ret = 0;
1150 
1151 done:
1152  mutex_unlock(&adapter->ahw->mem_lock);
1153 
1154  return ret;
1155 }
1156 
1157 int
1159  u64 off, u64 *data)
1160 {
1161  int j, ret;
1162  u32 temp, off8;
1163  u64 val;
1164  void __iomem *mem_crb;
1165 
1166  /* Only 64-bit aligned access */
1167  if (off & 7)
1168  return -EIO;
1169 
1170  /* P3 onward, test agent base for MIU and SIU is same */
1173  mem_crb = qlcnic_get_ioaddr(adapter,
1175  goto correct;
1176  }
1177 
1179  mem_crb = qlcnic_get_ioaddr(adapter,
1181  goto correct;
1182  }
1183 
1185  return qlcnic_pci_mem_access_direct(adapter,
1186  off, data, 0);
1187  }
1188 
1189  return -EIO;
1190 
1191 correct:
1192  off8 = off & ~0xf;
1193 
1194  mutex_lock(&adapter->ahw->mem_lock);
1195 
1196  writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1197  writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1198  writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1199  writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1200 
1201  for (j = 0; j < MAX_CTL_CHECK; j++) {
1202  temp = readl(mem_crb + TEST_AGT_CTRL);
1203  if ((temp & TA_CTL_BUSY) == 0)
1204  break;
1205  }
1206 
1207  if (j >= MAX_CTL_CHECK) {
1208  if (printk_ratelimit())
1209  dev_err(&adapter->pdev->dev,
1210  "failed to read through agent\n");
1211  ret = -EIO;
1212  } else {
1213  off8 = MIU_TEST_AGT_RDDATA_LO;
1214  if (off & 0xf)
1216 
1217  temp = readl(mem_crb + off8 + 4);
1218  val = (u64)temp << 32;
1219  val |= readl(mem_crb + off8);
1220  *data = val;
1221  ret = 0;
1222  }
1223 
1224  mutex_unlock(&adapter->ahw->mem_lock);
1225 
1226  return ret;
1227 }
1228 
1230 {
1231  int offset, board_type, magic;
1232  struct pci_dev *pdev = adapter->pdev;
1233 
1234  offset = QLCNIC_FW_MAGIC_OFFSET;
1235  if (qlcnic_rom_fast_read(adapter, offset, &magic))
1236  return -EIO;
1237 
1238  if (magic != QLCNIC_BDINFO_MAGIC) {
1239  dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1240  magic);
1241  return -EIO;
1242  }
1243 
1244  offset = QLCNIC_BRDTYPE_OFFSET;
1245  if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1246  return -EIO;
1247 
1248  adapter->ahw->board_type = board_type;
1249 
1250  if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1252  if ((gpio & 0x8000) == 0)
1253  board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1254  }
1255 
1256  switch (board_type) {
1267  adapter->ahw->port_type = QLCNIC_XGBE;
1268  break;
1272  adapter->ahw->port_type = QLCNIC_GBE;
1273  break;
1275  adapter->ahw->port_type = (adapter->portnum < 2) ?
1277  break;
1278  default:
1279  dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1280  adapter->ahw->port_type = QLCNIC_XGBE;
1281  break;
1282  }
1283 
1284  return 0;
1285 }
1286 
1287 int
1289 {
1290  u32 wol_cfg;
1291 
1292  wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1293  if (wol_cfg & (1UL << adapter->portnum)) {
1294  wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1295  if (wol_cfg & (1 << adapter->portnum))
1296  return 1;
1297  }
1298 
1299  return 0;
1300 }
1301 
1303 {
1304  struct qlcnic_nic_req req;
1305  int rv;
1306  u64 word;
1307 
1308  memset(&req, 0, sizeof(struct qlcnic_nic_req));
1309  req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1310 
1311  word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1312  req.req_hdr = cpu_to_le64(word);
1313 
1314  req.words[0] = cpu_to_le64((u64)rate << 32);
1315  req.words[1] = cpu_to_le64(state);
1316 
1317  rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1318  if (rv)
1319  dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1320 
1321  return rv;
1322 }
1323 
1324 /* FW dump related functions */
1325 static u32
1326 qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1327  u32 *buffer)
1328 {
1329  int i;
1330  u32 addr, data;
1331  struct __crb *crb = &entry->region.crb;
1332  void __iomem *base = adapter->ahw->pci_base0;
1333 
1334  addr = crb->addr;
1335 
1336  for (i = 0; i < crb->no_ops; i++) {
1337  QLCNIC_RD_DUMP_REG(addr, base, &data);
1338  *buffer++ = cpu_to_le32(addr);
1339  *buffer++ = cpu_to_le32(data);
1340  addr += crb->stride;
1341  }
1342  return crb->no_ops * 2 * sizeof(u32);
1343 }
1344 
1345 static u32
1346 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
1347  struct qlcnic_dump_entry *entry, u32 *buffer)
1348 {
1349  int i, k, timeout = 0;
1350  void __iomem *base = adapter->ahw->pci_base0;
1351  u32 addr, data;
1352  u8 opcode, no_ops;
1353  struct __ctrl *ctr = &entry->region.ctrl;
1354  struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
1355 
1356  addr = ctr->addr;
1357  no_ops = ctr->no_ops;
1358 
1359  for (i = 0; i < no_ops; i++) {
1360  k = 0;
1361  opcode = 0;
1362  for (k = 0; k < 8; k++) {
1363  if (!(ctr->opcode & (1 << k)))
1364  continue;
1365  switch (1 << k) {
1366  case QLCNIC_DUMP_WCRB:
1367  QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
1368  break;
1369  case QLCNIC_DUMP_RWCRB:
1370  QLCNIC_RD_DUMP_REG(addr, base, &data);
1371  QLCNIC_WR_DUMP_REG(addr, base, data);
1372  break;
1373  case QLCNIC_DUMP_ANDCRB:
1374  QLCNIC_RD_DUMP_REG(addr, base, &data);
1375  QLCNIC_WR_DUMP_REG(addr, base,
1376  (data & ctr->val2));
1377  break;
1378  case QLCNIC_DUMP_ORCRB:
1379  QLCNIC_RD_DUMP_REG(addr, base, &data);
1380  QLCNIC_WR_DUMP_REG(addr, base,
1381  (data | ctr->val3));
1382  break;
1383  case QLCNIC_DUMP_POLLCRB:
1384  while (timeout <= ctr->timeout) {
1385  QLCNIC_RD_DUMP_REG(addr, base, &data);
1386  if ((data & ctr->val2) == ctr->val1)
1387  break;
1388  msleep(1);
1389  timeout++;
1390  }
1391  if (timeout > ctr->timeout) {
1392  dev_info(&adapter->pdev->dev,
1393  "Timed out, aborting poll CRB\n");
1394  return -EINVAL;
1395  }
1396  break;
1397  case QLCNIC_DUMP_RD_SAVE:
1398  if (ctr->index_a)
1399  addr = t_hdr->saved_state[ctr->index_a];
1400  QLCNIC_RD_DUMP_REG(addr, base, &data);
1401  t_hdr->saved_state[ctr->index_v] = data;
1402  break;
1403  case QLCNIC_DUMP_WRT_SAVED:
1404  if (ctr->index_v)
1405  data = t_hdr->saved_state[ctr->index_v];
1406  else
1407  data = ctr->val1;
1408  if (ctr->index_a)
1409  addr = t_hdr->saved_state[ctr->index_a];
1410  QLCNIC_WR_DUMP_REG(addr, base, data);
1411  break;
1413  data = t_hdr->saved_state[ctr->index_v];
1414  data <<= ctr->shl_val;
1415  data >>= ctr->shr_val;
1416  if (ctr->val2)
1417  data &= ctr->val2;
1418  data |= ctr->val3;
1419  data += ctr->val1;
1420  t_hdr->saved_state[ctr->index_v] = data;
1421  break;
1422  default:
1423  dev_info(&adapter->pdev->dev,
1424  "Unknown opcode\n");
1425  break;
1426  }
1427  }
1428  addr += ctr->stride;
1429  }
1430  return 0;
1431 }
1432 
1433 static u32
1434 qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1435  u32 *buffer)
1436 {
1437  int loop;
1438  u32 val, data = 0;
1439  struct __mux *mux = &entry->region.mux;
1440  void __iomem *base = adapter->ahw->pci_base0;
1441 
1442  val = mux->val;
1443  for (loop = 0; loop < mux->no_ops; loop++) {
1444  QLCNIC_WR_DUMP_REG(mux->addr, base, val);
1445  QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
1446  *buffer++ = cpu_to_le32(val);
1447  *buffer++ = cpu_to_le32(data);
1448  val += mux->val_stride;
1449  }
1450  return 2 * mux->no_ops * sizeof(u32);
1451 }
1452 
1453 static u32
1454 qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1455  u32 *buffer)
1456 {
1457  int i, loop;
1458  u32 cnt, addr, data, que_id = 0;
1459  void __iomem *base = adapter->ahw->pci_base0;
1460  struct __queue *que = &entry->region.que;
1461 
1462  addr = que->read_addr;
1463  cnt = que->read_addr_cnt;
1464 
1465  for (loop = 0; loop < que->no_ops; loop++) {
1466  QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
1467  addr = que->read_addr;
1468  for (i = 0; i < cnt; i++) {
1469  QLCNIC_RD_DUMP_REG(addr, base, &data);
1470  *buffer++ = cpu_to_le32(data);
1471  addr += que->read_addr_stride;
1472  }
1473  que_id += que->stride;
1474  }
1475  return que->no_ops * cnt * sizeof(u32);
1476 }
1477 
1478 static u32
1479 qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1480  u32 *buffer)
1481 {
1482  int i;
1483  u32 data;
1484  void __iomem *addr;
1485  struct __ocm *ocm = &entry->region.ocm;
1486 
1487  addr = adapter->ahw->pci_base0 + ocm->read_addr;
1488  for (i = 0; i < ocm->no_ops; i++) {
1489  data = readl(addr);
1490  *buffer++ = cpu_to_le32(data);
1491  addr += ocm->read_addr_stride;
1492  }
1493  return ocm->no_ops * sizeof(u32);
1494 }
1495 
1496 static u32
1497 qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1498  u32 *buffer)
1499 {
1500  int i, count = 0;
1501  u32 fl_addr, size, val, lck_val, addr;
1502  struct __mem *rom = &entry->region.mem;
1503  void __iomem *base = adapter->ahw->pci_base0;
1504 
1505  fl_addr = rom->addr;
1506  size = rom->size/4;
1507 lock_try:
1508  lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
1509  if (!lck_val && count < MAX_CTL_CHECK) {
1510  msleep(10);
1511  count++;
1512  goto lock_try;
1513  }
1514  writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
1515  for (i = 0; i < size; i++) {
1516  addr = fl_addr & 0xFFFF0000;
1517  QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
1518  addr = LSW(fl_addr) + FLASH_ROM_DATA;
1519  QLCNIC_RD_DUMP_REG(addr, base, &val);
1520  fl_addr += 4;
1521  *buffer++ = cpu_to_le32(val);
1522  }
1523  readl(base + QLCNIC_FLASH_SEM2_ULK);
1524  return rom->size;
1525 }
1526 
1527 static u32
1528 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
1529  struct qlcnic_dump_entry *entry, u32 *buffer)
1530 {
1531  int i;
1532  u32 cnt, val, data, addr;
1533  void __iomem *base = adapter->ahw->pci_base0;
1534  struct __cache *l1 = &entry->region.cache;
1535 
1536  val = l1->init_tag_val;
1537 
1538  for (i = 0; i < l1->no_ops; i++) {
1539  QLCNIC_WR_DUMP_REG(l1->addr, base, val);
1540  QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
1541  addr = l1->read_addr;
1542  cnt = l1->read_addr_num;
1543  while (cnt) {
1544  QLCNIC_RD_DUMP_REG(addr, base, &data);
1545  *buffer++ = cpu_to_le32(data);
1546  addr += l1->read_addr_stride;
1547  cnt--;
1548  }
1549  val += l1->stride;
1550  }
1551  return l1->no_ops * l1->read_addr_num * sizeof(u32);
1552 }
1553 
1554 static u32
1555 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
1556  struct qlcnic_dump_entry *entry, u32 *buffer)
1557 {
1558  int i;
1559  u32 cnt, val, data, addr;
1560  u8 poll_mask, poll_to, time_out = 0;
1561  void __iomem *base = adapter->ahw->pci_base0;
1562  struct __cache *l2 = &entry->region.cache;
1563 
1564  val = l2->init_tag_val;
1565  poll_mask = LSB(MSW(l2->ctrl_val));
1566  poll_to = MSB(MSW(l2->ctrl_val));
1567 
1568  for (i = 0; i < l2->no_ops; i++) {
1569  QLCNIC_WR_DUMP_REG(l2->addr, base, val);
1570  if (LSW(l2->ctrl_val))
1571  QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
1572  LSW(l2->ctrl_val));
1573  if (!poll_mask)
1574  goto skip_poll;
1575  do {
1576  QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
1577  if (!(data & poll_mask))
1578  break;
1579  msleep(1);
1580  time_out++;
1581  } while (time_out <= poll_to);
1582 
1583  if (time_out > poll_to) {
1584  dev_err(&adapter->pdev->dev,
1585  "Timeout exceeded in %s, aborting dump\n",
1586  __func__);
1587  return -EINVAL;
1588  }
1589 skip_poll:
1590  addr = l2->read_addr;
1591  cnt = l2->read_addr_num;
1592  while (cnt) {
1593  QLCNIC_RD_DUMP_REG(addr, base, &data);
1594  *buffer++ = cpu_to_le32(data);
1595  addr += l2->read_addr_stride;
1596  cnt--;
1597  }
1598  val += l2->stride;
1599  }
1600  return l2->no_ops * l2->read_addr_num * sizeof(u32);
1601 }
1602 
1603 static u32
1604 qlcnic_read_memory(struct qlcnic_adapter *adapter,
1605  struct qlcnic_dump_entry *entry, u32 *buffer)
1606 {
1607  u32 addr, data, test, ret = 0;
1608  int i, reg_read;
1609  struct __mem *mem = &entry->region.mem;
1610  void __iomem *base = adapter->ahw->pci_base0;
1611 
1612  reg_read = mem->size;
1613  addr = mem->addr;
1614  /* check for data size of multiple of 16 and 16 byte alignment */
1615  if ((addr & 0xf) || (reg_read%16)) {
1616  dev_info(&adapter->pdev->dev,
1617  "Unaligned memory addr:0x%x size:0x%x\n",
1618  addr, reg_read);
1619  return -EINVAL;
1620  }
1621 
1622  mutex_lock(&adapter->ahw->mem_lock);
1623 
1624  while (reg_read != 0) {
1625  QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
1629 
1630  for (i = 0; i < MAX_CTL_CHECK; i++) {
1631  QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
1632  if (!(test & TA_CTL_BUSY))
1633  break;
1634  }
1635  if (i == MAX_CTL_CHECK) {
1636  if (printk_ratelimit()) {
1637  dev_err(&adapter->pdev->dev,
1638  "failed to read through agent\n");
1639  ret = -EINVAL;
1640  goto out;
1641  }
1642  }
1643  for (i = 0; i < 4; i++) {
1644  QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
1645  *buffer++ = cpu_to_le32(data);
1646  }
1647  addr += 16;
1648  reg_read -= 16;
1649  ret += 16;
1650  }
1651 out:
1652  mutex_unlock(&adapter->ahw->mem_lock);
1653  return mem->size;
1654 }
1655 
1656 static u32
1657 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
1658  struct qlcnic_dump_entry *entry, u32 *buffer)
1659 {
1660  entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1661  return 0;
1662 }
1663 
1665  { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
1666  { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
1667  { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
1668  { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
1669  { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
1670  { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
1671  { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
1672  { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
1673  { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
1674  { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
1675  { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
1676  { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
1677  { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
1678  { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
1679  { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
1680  { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
1681  { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
1682  { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
1683  { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
1684  { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
1685 };
1686 
1687 /* Walk the template and collect dump for each entry in the dump template */
1688 static int
1689 qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
1690  u32 size)
1691 {
1692  int ret = 1;
1693  if (size != entry->hdr.cap_size) {
1694  dev_info(dev,
1695  "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1696  entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
1697  dev_info(dev, "Aborting further dump capture\n");
1698  ret = 0;
1699  }
1700  return ret;
1701 }
1702 
1703 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1704 {
1705  u32 *buffer;
1706  char mesg[64];
1707  char *msg[] = {mesg, NULL};
1708  int i, k, ops_cnt, ops_index, dump_size = 0;
1709  u32 entry_offset, dump, no_entries, buf_offset = 0;
1710  struct qlcnic_dump_entry *entry;
1711  struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1712  struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1713 
1714  if (fw_dump->clr) {
1715  dev_info(&adapter->pdev->dev,
1716  "Previous dump not cleared, not capturing dump\n");
1717  return -EIO;
1718  }
1719  /* Calculate the size for dump data area only */
1720  for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1721  if (i & tmpl_hdr->drv_cap_mask)
1722  dump_size += tmpl_hdr->cap_sizes[k];
1723  if (!dump_size)
1724  return -EIO;
1725 
1726  fw_dump->data = vzalloc(dump_size);
1727  if (!fw_dump->data) {
1728  dev_info(&adapter->pdev->dev,
1729  "Unable to allocate (%d KB) for fw dump\n",
1730  dump_size/1024);
1731  return -ENOMEM;
1732  }
1733  buffer = fw_dump->data;
1734  fw_dump->size = dump_size;
1735  no_entries = tmpl_hdr->num_entries;
1736  ops_cnt = ARRAY_SIZE(fw_dump_ops);
1737  entry_offset = tmpl_hdr->offset;
1738  tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1739  tmpl_hdr->sys_info[1] = adapter->fw_version;
1740 
1741  for (i = 0; i < no_entries; i++) {
1742  entry = (void *)tmpl_hdr + entry_offset;
1743  if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1744  entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1745  entry_offset += entry->hdr.offset;
1746  continue;
1747  }
1748  /* Find the handler for this entry */
1749  ops_index = 0;
1750  while (ops_index < ops_cnt) {
1751  if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1752  break;
1753  ops_index++;
1754  }
1755  if (ops_index == ops_cnt) {
1756  dev_info(&adapter->pdev->dev,
1757  "Invalid entry type %d, exiting dump\n",
1758  entry->hdr.type);
1759  goto error;
1760  }
1761  /* Collect dump for this entry */
1762  dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1763  if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
1764  dump))
1765  entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1766  buf_offset += entry->hdr.cap_size;
1767  entry_offset += entry->hdr.offset;
1768  buffer = fw_dump->data + buf_offset;
1769  }
1770  if (dump_size != buf_offset) {
1771  dev_info(&adapter->pdev->dev,
1772  "Captured(%d) and expected size(%d) do not match\n",
1773  buf_offset, dump_size);
1774  goto error;
1775  } else {
1776  fw_dump->clr = 1;
1777  snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
1778  adapter->netdev->name);
1779  dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
1780  fw_dump->size);
1781  /* Send a udev event to notify availability of FW dump */
1782  kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1783  return 0;
1784  }
1785 error:
1786  vfree(fw_dump->data);
1787  return -EINVAL;
1788 }