30 #include <linux/slab.h>
31 #include <linux/sched.h>
42 #define IWL_TX_CRC_SIZE 4
43 #define IWL_TX_DELIMITER_SIZE 4
54 int write_ptr = txq->
q.write_ptr;
55 int txq_id = txq->
q.id;
61 (
void *) txq->
entries[txq->
q.write_ptr].cmd->payload;
82 bc_ent =
cpu_to_le16((len & 0xFFF) | (sta_id << 12));
84 scd_bc_tbl[txq_id].
tfd_offset[write_ptr] = bc_ent;
97 int txq_id = txq->
q.id;
102 if (trans->
cfg->base_params->shadow_reg_enable) {
105 txq->
q.write_ptr | (txq_id << 8));
118 "Tx queue %d requesting wakeup,"
119 " GP1 = 0x%x\n", txq_id, reg);
126 txq->
q.write_ptr | (txq_id << 8));
135 txq->
q.write_ptr | (txq_id << 8));
152 static inline u16 iwl_tfd_tb_get_len(
struct iwl_tfd *tfd,
u8 idx)
159 static inline void iwl_tfd_set_tb(
struct iwl_tfd *tfd,
u8 idx,
167 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
174 static inline u8 iwl_tfd_get_num_tbs(
struct iwl_tfd *tfd)
186 num_tbs = iwl_tfd_get_num_tbs(tfd);
189 IWL_ERR(trans,
"Too many chunks: %i\n", num_tbs);
202 for (i = 1; i < num_tbs; i++)
204 iwl_tfd_tb_get_len(tfd, i), dma_dir);
224 int rd_ptr = txq->
q.read_ptr;
225 int idx = get_cmd_index(&txq->
q, rd_ptr);
230 iwl_unmap_tfd(trans, &txq->
entries[idx].meta, &tfd_tmp[rd_ptr],
244 iwl_op_mode_free_skb(trans->
op_mode, skb);
264 memset(tfd, 0,
sizeof(*tfd));
266 num_tbs = iwl_tfd_get_num_tbs(tfd);
270 IWL_ERR(trans,
"Error can not send more than %d chunks\n",
279 IWL_ERR(trans,
"Unaligned address = %llx\n",
280 (
unsigned long long)addr);
282 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
357 static void iwlagn_txq_inval_byte_cnt_tbl(
struct iwl_trans *trans,
363 int txq_id = txq->
q.id;
364 int read_ptr = txq->
q.read_ptr;
368 (
void *)txq->
entries[txq->
q.read_ptr].cmd->payload;
376 scd_bc_tbl[txq_id].
tfd_offset[read_ptr] = bc_ent;
383 static int iwl_txq_set_ratid_map(
struct iwl_trans *trans,
u16 ra_tid,
399 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
401 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
408 static inline void iwl_txq_set_inactive(
struct iwl_trans *trans,
u16 txq_id)
413 SCD_QUEUE_STATUS_BITS(txq_id),
419 int sta_id,
int tid,
int frame_limit,
u16 ssn)
424 WARN_ONCE(1,
"queue %d already used - expect issues", txq_id);
427 iwl_txq_set_inactive(trans, txq_id);
438 iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
453 trans_pcie->
txq[txq_id].q.read_ptr = (ssn & 0xff);
454 trans_pcie->
txq[txq_id].q.write_ptr = (ssn & 0xff);
457 (ssn & 0xff) | (txq_id << 8));
477 txq_id, fifo, ssn & 0xff);
485 WARN_ONCE(1,
"queue %d not used", txq_id);
489 iwl_txq_set_inactive(trans, txq_id);
514 bool had_nocopy =
false;
518 copy_size =
sizeof(out_cmd->
hdr);
519 cmd_size =
sizeof(out_cmd->
hdr);
533 copy_size += cmd->
len[
i];
535 cmd_size += cmd->
len[
i];
547 spin_lock_bh(&txq->
lock);
550 spin_unlock_bh(&txq->
lock);
552 IWL_ERR(trans,
"No space in command queue\n");
553 iwl_op_mode_cmd_queue_full(trans->
op_mode);
561 memset(out_meta, 0,
sizeof(*out_meta));
567 out_cmd->
hdr.cmd = cmd->
id;
568 out_cmd->
hdr.flags = 0;
569 out_cmd->
hdr.sequence =
578 if (cmd->
dataflags[i] & IWL_HCMD_DFL_NOCOPY)
581 cmd_pos += cmd->
len[
i];
602 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
603 trans_pcie_get_cmd_string(trans_pcie, out_cmd->
hdr.cmd),
622 if (!(cmd->
dataflags[i] & IWL_HCMD_DFL_NOCOPY))
627 iwl_unmap_tfd(trans, out_meta,
642 trace_iwlwifi_dev_hcmd(trans->
dev, cmd, cmd_size,
643 &out_cmd->
hdr, copy_size);
654 spin_unlock_bh(&txq->
lock);
658 static inline void iwl_queue_progress(
struct iwl_trans_pcie *trans_pcie,
668 if (txq->
q.read_ptr == txq->
q.write_ptr)
681 static void iwl_hcmd_queue_reclaim(
struct iwl_trans *trans,
int txq_id,
691 if ((idx >= q->
n_bd) || (iwl_queue_used(q, idx) == 0)) {
693 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
694 __func__, txq_id, idx, q->
n_bd,
699 for (idx = iwl_queue_inc_wrap(idx, q->
n_bd); q->
read_ptr != idx;
703 IWL_ERR(trans,
"HCMD skipped: index (%d) %d %d\n",
705 iwl_op_mode_nic_error(trans->
op_mode);
710 iwl_queue_progress(trans_pcie, txq);
740 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
748 spin_lock(&txq->
lock);
750 cmd_index = get_cmd_index(&txq->
q, index);
751 cmd = txq->
entries[cmd_index].cmd;
752 meta = &txq->
entries[cmd_index].meta;
758 struct page *
p = rxb_steal_page(rxb);
760 meta->
source->resp_pkt = pkt;
763 meta->
source->handler_status = handler_status;
766 iwl_hcmd_queue_reclaim(trans, txq_id, index);
771 "HCMD_ACTIVE already clear for command %s\n",
772 trans_pcie_get_cmd_string(trans_pcie,
777 trans_pcie_get_cmd_string(trans_pcie,
784 spin_unlock(&txq->
lock);
787 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
799 ret = iwl_enqueue_hcmd(trans, cmd);
802 "Error sending %s: enqueue_hcmd failed: %d\n",
803 trans_pcie_get_cmd_string(trans_pcie, cmd->
id), ret);
816 trans_pcie_get_cmd_string(trans_pcie, cmd->
id));
820 IWL_ERR(trans,
"Command %s: a command is already active!\n",
821 trans_pcie_get_cmd_string(trans_pcie, cmd->
id));
826 trans_pcie_get_cmd_string(trans_pcie, cmd->
id));
828 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
833 "Error sending %s: enqueue_hcmd failed: %d\n",
834 trans_pcie_get_cmd_string(trans_pcie, cmd->
id), ret);
849 "Error sending %s: time out after %dms.\n",
850 trans_pcie_get_cmd_string(trans_pcie, cmd->
id),
854 "Current CMD queue read_ptr %d write_ptr %d\n",
859 "Clearing HCMD_ACTIVE for command %s\n",
860 trans_pcie_get_cmd_string(trans_pcie,
868 IWL_ERR(trans,
"Error: Response NULL in '%s'\n",
869 trans_pcie_get_cmd_string(trans_pcie, cmd->
id));
899 return iwl_send_cmd_async(trans, cmd);
901 return iwl_send_cmd_sync(trans, cmd);
922 last_to_free = iwl_queue_dec_wrap(index, q->
n_bd);
924 if ((index >= q->
n_bd) ||
925 (iwl_queue_used(q, last_to_free) == 0)) {
927 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
928 __func__, txq_id, last_to_free, q->
n_bd,
933 if (
WARN_ON(!skb_queue_empty(skbs)))
943 __skb_queue_tail(skbs, txq->
entries[txq->
q.read_ptr].skb);
947 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
953 iwl_queue_progress(trans_pcie, txq);