49 ret_val = mbx->
ops.read(hw, msg, size, mbx_id);
71 else if (mbx->
ops.write)
72 ret_val = mbx->
ops.write(hw, msg, size, mbx_id);
89 if (mbx->
ops.check_for_msg)
90 ret_val = mbx->
ops.check_for_msg(hw, mbx_id);
107 if (mbx->
ops.check_for_ack)
108 ret_val = mbx->
ops.check_for_ack(hw, mbx_id);
125 if (mbx->
ops.check_for_rst)
126 ret_val = mbx->
ops.check_for_rst(hw, mbx_id);
143 if (!countdown || !mbx->
ops.check_for_msg)
146 while (countdown && mbx->
ops.check_for_msg(hw, mbx_id)) {
172 if (!countdown || !mbx->
ops.check_for_ack)
175 while (countdown && mbx->
ops.check_for_ack(hw, mbx_id)) {
207 ret_val = igb_poll_for_msg(hw, mbx_id);
210 ret_val = mbx->
ops.read(hw, msg, size, mbx_id);
235 ret_val = mbx->
ops.write(hw, msg, size, mbx_id);
239 ret_val = igb_poll_for_ack(hw, mbx_id);
249 if (mbvficr & mask) {
264 static s32 igb_check_for_msg_pf(
struct e1000_hw *hw,
u16 vf_number)
270 hw->
mbx.stats.reqs++;
283 static s32 igb_check_for_ack_pf(
struct e1000_hw *hw,
u16 vf_number)
289 hw->
mbx.stats.acks++;
302 static s32 igb_check_for_rst_pf(
struct e1000_hw *hw,
u16 vf_number)
307 if (vflre & (1 << vf_number)) {
310 hw->
mbx.stats.rsts++;
323 static s32 igb_obtain_mbx_lock_pf(
struct e1000_hw *hw,
u16 vf_number)
356 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
361 igb_check_for_msg_pf(hw, vf_number);
362 igb_check_for_ack_pf(hw, vf_number);
365 for (i = 0; i <
size; i++)
372 hw->
mbx.stats.msgs_tx++;
397 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
402 for (i = 0; i <
size; i++)
409 hw->
mbx.stats.msgs_rx++;
430 mbx->
ops.read = igb_read_mbx_pf;
431 mbx->
ops.write = igb_write_mbx_pf;
432 mbx->
ops.read_posted = igb_read_posted_mbx;
433 mbx->
ops.write_posted = igb_write_posted_mbx;
434 mbx->
ops.check_for_msg = igb_check_for_msg_pf;
435 mbx->
ops.check_for_ack = igb_check_for_ack_pf;
436 mbx->
ops.check_for_rst = igb_check_for_rst_pf;
438 mbx->
stats.msgs_tx = 0;
439 mbx->
stats.msgs_rx = 0;