Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ixgbe_mbx.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel 10 Gigabit PCI Express Linux driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  e1000-devel Mailing List <[email protected]>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #include <linux/pci.h>
29 #include <linux/delay.h>
30 #include "ixgbe_type.h"
31 #include "ixgbe_common.h"
32 #include "ixgbe_mbx.h"
33 
44 {
45  struct ixgbe_mbx_info *mbx = &hw->mbx;
46  s32 ret_val = IXGBE_ERR_MBX;
47 
48  /* limit read to size of mailbox */
49  if (size > mbx->size)
50  size = mbx->size;
51 
52  if (mbx->ops.read)
53  ret_val = mbx->ops.read(hw, msg, size, mbx_id);
54 
55  return ret_val;
56 }
57 
68 {
69  struct ixgbe_mbx_info *mbx = &hw->mbx;
70  s32 ret_val = 0;
71 
72  if (size > mbx->size)
73  ret_val = IXGBE_ERR_MBX;
74 
75  else if (mbx->ops.write)
76  ret_val = mbx->ops.write(hw, msg, size, mbx_id);
77 
78  return ret_val;
79 }
80 
89 {
90  struct ixgbe_mbx_info *mbx = &hw->mbx;
91  s32 ret_val = IXGBE_ERR_MBX;
92 
93  if (mbx->ops.check_for_msg)
94  ret_val = mbx->ops.check_for_msg(hw, mbx_id);
95 
96  return ret_val;
97 }
98 
107 {
108  struct ixgbe_mbx_info *mbx = &hw->mbx;
109  s32 ret_val = IXGBE_ERR_MBX;
110 
111  if (mbx->ops.check_for_ack)
112  ret_val = mbx->ops.check_for_ack(hw, mbx_id);
113 
114  return ret_val;
115 }
116 
125 {
126  struct ixgbe_mbx_info *mbx = &hw->mbx;
127  s32 ret_val = IXGBE_ERR_MBX;
128 
129  if (mbx->ops.check_for_rst)
130  ret_val = mbx->ops.check_for_rst(hw, mbx_id);
131 
132  return ret_val;
133 }
134 
142 static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
143 {
144  struct ixgbe_mbx_info *mbx = &hw->mbx;
145  int countdown = mbx->timeout;
146 
147  if (!countdown || !mbx->ops.check_for_msg)
148  goto out;
149 
150  while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
151  countdown--;
152  if (!countdown)
153  break;
154  udelay(mbx->usec_delay);
155  }
156 
157 out:
158  return countdown ? 0 : IXGBE_ERR_MBX;
159 }
160 
168 static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
169 {
170  struct ixgbe_mbx_info *mbx = &hw->mbx;
171  int countdown = mbx->timeout;
172 
173  if (!countdown || !mbx->ops.check_for_ack)
174  goto out;
175 
176  while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
177  countdown--;
178  if (!countdown)
179  break;
180  udelay(mbx->usec_delay);
181  }
182 
183 out:
184  return countdown ? 0 : IXGBE_ERR_MBX;
185 }
186 
197 static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
198  u16 mbx_id)
199 {
200  struct ixgbe_mbx_info *mbx = &hw->mbx;
201  s32 ret_val = IXGBE_ERR_MBX;
202 
203  if (!mbx->ops.read)
204  goto out;
205 
206  ret_val = ixgbe_poll_for_msg(hw, mbx_id);
207 
208  /* if ack received read message, otherwise we timed out */
209  if (!ret_val)
210  ret_val = mbx->ops.read(hw, msg, size, mbx_id);
211 out:
212  return ret_val;
213 }
214 
225 static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
226  u16 mbx_id)
227 {
228  struct ixgbe_mbx_info *mbx = &hw->mbx;
229  s32 ret_val = IXGBE_ERR_MBX;
230 
231  /* exit if either we can't write or there isn't a defined timeout */
232  if (!mbx->ops.write || !mbx->timeout)
233  goto out;
234 
235  /* send msg */
236  ret_val = mbx->ops.write(hw, msg, size, mbx_id);
237 
238  /* if msg sent wait until we receive an ack */
239  if (!ret_val)
240  ret_val = ixgbe_poll_for_ack(hw, mbx_id);
241 out:
242  return ret_val;
243 }
244 
245 static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
246 {
247  u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
248  s32 ret_val = IXGBE_ERR_MBX;
249 
250  if (mbvficr & mask) {
251  ret_val = 0;
252  IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
253  }
254 
255  return ret_val;
256 }
257 
265 static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
266 {
267  s32 ret_val = IXGBE_ERR_MBX;
268  s32 index = IXGBE_MBVFICR_INDEX(vf_number);
269  u32 vf_bit = vf_number % 16;
270 
271  if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
272  index)) {
273  ret_val = 0;
274  hw->mbx.stats.reqs++;
275  }
276 
277  return ret_val;
278 }
279 
287 static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
288 {
289  s32 ret_val = IXGBE_ERR_MBX;
290  s32 index = IXGBE_MBVFICR_INDEX(vf_number);
291  u32 vf_bit = vf_number % 16;
292 
293  if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
294  index)) {
295  ret_val = 0;
296  hw->mbx.stats.acks++;
297  }
298 
299  return ret_val;
300 }
301 
309 static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
310 {
311  u32 reg_offset = (vf_number < 32) ? 0 : 1;
312  u32 vf_shift = vf_number % 32;
313  u32 vflre = 0;
314  s32 ret_val = IXGBE_ERR_MBX;
315 
316  switch (hw->mac.type) {
317  case ixgbe_mac_82599EB:
318  vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
319  break;
320  case ixgbe_mac_X540:
321  vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
322  break;
323  default:
324  break;
325  }
326 
327  if (vflre & (1 << vf_shift)) {
328  ret_val = 0;
329  IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
330  hw->mbx.stats.rsts++;
331  }
332 
333  return ret_val;
334 }
335 
343 static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
344 {
345  s32 ret_val = IXGBE_ERR_MBX;
346  u32 p2v_mailbox;
347 
348  /* Take ownership of the buffer */
350 
351  /* reserve mailbox for vf use */
352  p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
353  if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
354  ret_val = 0;
355 
356  return ret_val;
357 }
358 
368 static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
369  u16 vf_number)
370 {
371  s32 ret_val;
372  u16 i;
373 
374  /* lock the mailbox to prevent pf/vf race condition */
375  ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
376  if (ret_val)
377  goto out_no_write;
378 
379  /* flush msg and acks as we are overwriting the message buffer */
380  ixgbe_check_for_msg_pf(hw, vf_number);
381  ixgbe_check_for_ack_pf(hw, vf_number);
382 
383  /* copy the caller specified message to the mailbox memory buffer */
384  for (i = 0; i < size; i++)
385  IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
386 
387  /* Interrupt VF to tell it a message has been sent and release buffer*/
389 
390  /* update stats */
391  hw->mbx.stats.msgs_tx++;
392 
393 out_no_write:
394  return ret_val;
395 
396 }
397 
409 static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
410  u16 vf_number)
411 {
412  s32 ret_val;
413  u16 i;
414 
415  /* lock the mailbox to prevent pf/vf race condition */
416  ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
417  if (ret_val)
418  goto out_no_read;
419 
420  /* copy the message to the mailbox memory buffer */
421  for (i = 0; i < size; i++)
422  msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
423 
424  /* Acknowledge the message and release buffer */
426 
427  /* update stats */
428  hw->mbx.stats.msgs_rx++;
429 
430 out_no_read:
431  return ret_val;
432 }
433 
434 #ifdef CONFIG_PCI_IOV
435 
441 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
442 {
443  struct ixgbe_mbx_info *mbx = &hw->mbx;
444 
445  if (hw->mac.type != ixgbe_mac_82599EB &&
446  hw->mac.type != ixgbe_mac_X540)
447  return;
448 
449  mbx->timeout = 0;
450  mbx->usec_delay = 0;
451 
452  mbx->stats.msgs_tx = 0;
453  mbx->stats.msgs_rx = 0;
454  mbx->stats.reqs = 0;
455  mbx->stats.acks = 0;
456  mbx->stats.rsts = 0;
457 
458  mbx->size = IXGBE_VFMAILBOX_SIZE;
459 }
460 #endif /* CONFIG_PCI_IOV */
461 
463  .read = ixgbe_read_mbx_pf,
464  .write = ixgbe_write_mbx_pf,
465  .read_posted = ixgbe_read_posted_mbx,
466  .write_posted = ixgbe_write_posted_mbx,
467  .check_for_msg = ixgbe_check_for_msg_pf,
468  .check_for_ack = ixgbe_check_for_ack_pf,
469  .check_for_rst = ixgbe_check_for_rst_pf,
470 };
471