Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
crystalhd_hw.c
Go to the documentation of this file.
1 /***************************************************************************
2  * Copyright (c) 2005-2009, Broadcom Corporation.
3  *
4  * Name: crystalhd_hw . c
5  *
6  * Description:
7  * BCM70010 Linux driver HW layer.
8  *
9  **********************************************************************
10  * This file is part of the crystalhd device driver.
11  *
12  * This driver is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation, version 2 of the License.
15  *
16  * This driver is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this driver. If not, see <http://www.gnu.org/licenses/>.
23  **********************************************************************/
24 
25 #include "crystalhd.h"
26 
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 
31 /* Functions internal to this file */
32 
33 static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
34 {
37 }
38 
39 
40 static void crystalhd_start_dram(struct crystalhd_adp *adp)
41 {
42  bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) << 0) |
43  /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) << 4) | // trcd */
44  ((15 / 5 - 1) << 7) | /* trp */
45  ((10 / 5 - 1) << 10) | /* trrd */
46  ((15 / 5 + 1) << 12) | /* twr */
47  ((2 + 1) << 16) | /* twtr */
48  ((70 / 5 - 2) << 19) | /* trfc */
49  (0 << 23));
50 
53  bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
57  bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
58  /* setting the refresh rate here */
59  bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
60 }
61 
62 
63 static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
64 {
65  union link_misc_perst_deco_ctrl rst_deco_cntrl;
66  union link_misc_perst_clk_ctrl rst_clk_cntrl;
67  uint32_t temp;
68 
69  /*
70  * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
71  * delay to allow PLL to lock Clear alternate clock, stop clock bits
72  */
73  rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
74  rst_clk_cntrl.pll_pwr_dn = 0;
75  crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
77 
78  rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
79  rst_clk_cntrl.stop_core_clk = 0;
80  rst_clk_cntrl.sel_alt_clk = 0;
81 
82  crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
84 
85  /*
86  * Bus Arbiter Timeout: GISB_ARBITER_TIMER
87  * Set internal bus arbiter timeout to 40us based on core clock speed
88  * (63MHz * 40us = 0x9D8)
89  */
91 
92  /*
93  * Decoder clocks: MISC_PERST_DECODER_CTRL
94  * Enable clocks while 7412 reset is asserted, delay
95  * De-assert 7412 reset
96  */
97  rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
98  rst_deco_cntrl.stop_bcm_7412_clk = 0;
99  rst_deco_cntrl.bcm7412_rst = 1;
100  crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
102 
103  rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
104  rst_deco_cntrl.bcm7412_rst = 0;
105  crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
107 
108  /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
110 
111  /* Clear bit 29 of 0x404 */
113  temp &= ~BC_BIT(29);
115 
116  /* 2.5V regulator must be set to 2.6 volts (+6%) */
117  /* FIXME: jarod: what's the point of this reg read? */
120 
121  return true;
122 }
123 
124 static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
125 {
126  union link_misc_perst_deco_ctrl rst_deco_cntrl;
127  union link_misc_perst_clk_ctrl rst_clk_cntrl;
128  uint32_t temp;
129 
130  /*
131  * Decoder clocks: MISC_PERST_DECODER_CTRL
132  * Assert 7412 reset, delay
133  * Assert 7412 stop clock
134  */
135  rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
136  rst_deco_cntrl.stop_bcm_7412_clk = 1;
137  crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
139 
140  /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
141  * Set internal bus arbiter timeout to 40us based on core clock speed
142  * (6.75MHZ * 40us = 0x10E)
143  */
145 
146  /* Link clocks: MISC_PERST_CLOCK_CTRL
147  * Stop core clk, delay
148  * Set alternate clk, delay, set PLL power down
149  */
150  rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
151  rst_clk_cntrl.stop_core_clk = 1;
152  rst_clk_cntrl.sel_alt_clk = 1;
153  crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
155 
156  rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
157  rst_clk_cntrl.pll_pwr_dn = 1;
158  crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
159 
160  /*
161  * Read and restore the Transaction Configuration Register
162  * after core reset
163  */
165 
166  /*
167  * Link core soft reset: MISC3_RESET_CTRL
168  * - Write BIT[0]=1 and read it back for core reset to take place
169  */
171  rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
173 
174  /* restore the transaction configuration register */
176 
177  return true;
178 }
179 
180 static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
181 {
182  union intr_mask_reg intr_mask;
184  intr_mask.mask_pcie_err = 1;
185  intr_mask.mask_pcie_rbusmast_err = 1;
186  intr_mask.mask_pcie_rgr_bridge = 1;
187  intr_mask.mask_rx_done = 1;
188  intr_mask.mask_rx_err = 1;
189  intr_mask.mask_tx_done = 1;
190  intr_mask.mask_tx_err = 1;
192 
193  return;
194 }
195 
196 static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
197 {
198  union intr_mask_reg intr_mask;
200  intr_mask.mask_pcie_err = 1;
201  intr_mask.mask_pcie_rbusmast_err = 1;
202  intr_mask.mask_pcie_rgr_bridge = 1;
203  intr_mask.mask_rx_done = 1;
204  intr_mask.mask_rx_err = 1;
205  intr_mask.mask_tx_done = 1;
206  intr_mask.mask_tx_err = 1;
208 
209  return;
210 }
211 
212 static void crystalhd_clear_errors(struct crystalhd_adp *adp)
213 {
214  uint32_t reg;
215 
216  /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
218  if (reg)
220 
222  if (reg)
224 
226  if (reg)
228 }
229 
230 static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
231 {
232  uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
233 
234  if (intr_sts) {
235  crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
236 
237  /* Write End Of Interrupt for PCIE */
239  }
240 }
241 
242 static void crystalhd_soft_rst(struct crystalhd_adp *adp)
243 {
244  uint32_t val;
245 
246  /* Assert c011 soft reset*/
247  bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
249 
250  /* Release c011 soft reset*/
251  bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
252 
253  /* Disable Stuffing..*/
255  val |= BC_BIT(8);
257 }
258 
259 static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
260 {
261  uint32_t i = 0, reg;
262 
264 
265  crystalhd_reg_wr(adp, AES_CMD, 0);
267  crystalhd_reg_wr(adp, AES_CMD, 0x1);
268 
269  /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
270  for (i = 0; i < 100; ++i) {
271  reg = crystalhd_reg_rd(adp, AES_STATUS);
272  if (reg & 0x1)
273  return true;
275  }
276 
277  return false;
278 }
279 
280 
281 static bool crystalhd_start_device(struct crystalhd_adp *adp)
282 {
283  uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
284 
285  BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
286 
287  reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
288  reg_pwrmgmt &= ~ASPM_L1_ENABLE;
289 
290  crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
291 
292  if (!crystalhd_bring_out_of_rst(adp)) {
293  BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
294  return false;
295  }
296 
297  crystalhd_disable_interrupts(adp);
298 
299  crystalhd_clear_errors(adp);
300 
301  crystalhd_clear_interrupts(adp);
302 
303  crystalhd_enable_interrupts(adp);
304 
305  /* Enable the option for getting the total no. of DWORDS
306  * that have been transferred by the RXDMA engine
307  */
308  dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
309  dbg_options |= 0x10;
311 
312  /* Enable PCI Global Control options */
313  glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
314  glb_cntrl |= 0x100;
315  glb_cntrl |= 0x8000;
316  crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
317 
318  crystalhd_enable_interrupts(adp);
319 
320  crystalhd_soft_rst(adp);
321  crystalhd_start_dram(adp);
322  crystalhd_enable_uarts(adp);
323 
324  return true;
325 }
326 
327 static bool crystalhd_stop_device(struct crystalhd_adp *adp)
328 {
329  uint32_t reg;
330 
331  BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
332  /* Clear and disable interrupts */
333  crystalhd_disable_interrupts(adp);
334  crystalhd_clear_errors(adp);
335  crystalhd_clear_interrupts(adp);
336 
337  if (!crystalhd_put_in_reset(adp))
338  BCMLOG_ERR("Failed to Put Link To Reset State\n");
339 
341  reg |= ASPM_L1_ENABLE;
343 
344  /* Set PCI Clk Req */
346  reg |= PCI_CLK_REQ_ENABLE;
348 
349  return true;
350 }
351 
352 static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
353 {
354  unsigned long flags = 0;
355  struct crystalhd_rx_dma_pkt *temp = NULL;
356 
357  if (!hw)
358  return NULL;
359 
360  spin_lock_irqsave(&hw->lock, flags);
361  temp = hw->rx_pkt_pool_head;
362  if (temp) {
363  hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
364  temp->dio_req = NULL;
365  temp->pkt_tag = 0;
366  temp->flags = 0;
367  }
368  spin_unlock_irqrestore(&hw->lock, flags);
369 
370  return temp;
371 }
372 
373 static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
374  struct crystalhd_rx_dma_pkt *pkt)
375 {
376  unsigned long flags = 0;
377 
378  if (!hw || !pkt)
379  return;
380 
381  spin_lock_irqsave(&hw->lock, flags);
382  pkt->next = hw->rx_pkt_pool_head;
383  hw->rx_pkt_pool_head = pkt;
384  spin_unlock_irqrestore(&hw->lock, flags);
385 }
386 
387 /*
388  * Call back from TX - IOQ deletion.
389  *
390  * This routine will release the TX DMA rings allocated
391  * druing setup_dma rings interface.
392  *
393  * Memory is allocated per DMA ring basis. This is just
394  * a place holder to be able to create the dio queues.
395  */
396 static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
397 {
398 }
399 
400 /*
401  * Rx Packet release callback..
402  *
403  * Release All user mapped capture buffers and Our DMA packets
404  * back to our free pool. The actual cleanup of the DMA
405  * ring descriptors happen during dma ring release.
406  */
407 static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
408 {
409  struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
410  struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
411 
412  if (!pkt || !hw) {
413  BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
414  return;
415  }
416 
417  if (pkt->dio_req)
418  crystalhd_unmap_dio(hw->adp, pkt->dio_req);
419  else
420  BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
421 
422  crystalhd_hw_free_rx_pkt(hw, pkt);
423 }
424 
425 #define crystalhd_hw_delete_ioq(adp, q) \
426  if (q) { \
427  crystalhd_delete_dioq(adp, q); \
428  q = NULL; \
429  }
430 
431 static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
432 {
433  if (!hw)
434  return;
435 
436  BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
442 }
443 
444 #define crystalhd_hw_create_ioq(sts, hw, q, cb) \
445 do { \
446  sts = crystalhd_create_dioq(hw->adp, &q, cb, hw); \
447  if (sts != BC_STS_SUCCESS) \
448  goto hw_create_ioq_err; \
449 } while (0)
450 
451 /*
452  * Create IOQs..
453  *
454  * TX - Active & Free
455  * RX - Active, Ready and Free.
456  */
457 static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
458 {
460 
461  if (!hw) {
462  BCMLOG_ERR("Invalid Arg!!\n");
463  return BC_STS_INV_ARG;
464  }
465 
466  crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
467  crystalhd_tx_desc_rel_call_back);
468  crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
469  crystalhd_tx_desc_rel_call_back);
470 
471  crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
472  crystalhd_rx_pkt_rel_call_back);
473  crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
474  crystalhd_rx_pkt_rel_call_back);
475  crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
476  crystalhd_rx_pkt_rel_call_back);
477 
478  return sts;
479 
480 hw_create_ioq_err:
481  crystalhd_hw_delete_ioqs(hw);
482 
483  return sts;
484 }
485 
486 
487 static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
488  bool b_188_byte_pkts, uint8_t flags)
489 {
490  uint32_t base, end, writep, readp;
491  uint32_t cpbSize, cpbFullness, fifoSize;
492 
493  if (flags & 0x02) { /* ASF Bit is set */
496  writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
498  } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
499  base = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
500  end = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
501  writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
502  readp = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
503  } else {
504  base = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
506  writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
507  readp = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
508  }
509 
510  cpbSize = end - base;
511  if (writep >= readp)
512  cpbFullness = writep - readp;
513  else
514  cpbFullness = (end - base) - (readp - writep);
515 
516  fifoSize = cpbSize - cpbFullness;
517 
518  if (fifoSize < BC_INFIFO_THRESHOLD)
519  return true;
520 
521  if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
522  return true;
523 
524  return false;
525 }
526 
527 static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
528  uint32_t list_id, enum BC_STATUS cs)
529 {
530  struct tx_dma_pkt *tx_req;
531 
532  if (!hw || !list_id) {
533  BCMLOG_ERR("Invalid Arg..\n");
534  return BC_STS_INV_ARG;
535  }
536 
537  hw->pwr_lock--;
538 
539  tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
540  if (!tx_req) {
541  if (cs != BC_STS_IO_USER_ABORT)
542  BCMLOG_ERR("Find and Fetch Did not find req\n");
543  return BC_STS_NO_DATA;
544  }
545 
546  if (tx_req->call_back) {
547  tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
548  tx_req->dio_req = NULL;
549  tx_req->cb_event = NULL;
550  tx_req->call_back = NULL;
551  } else {
552  BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
553  tx_req->list_tag);
554  }
555 
556  /* Now put back the tx_list back in FreeQ */
557  tx_req->list_tag = 0;
558 
559  return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
560 }
561 
562 static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
563 {
564  uint32_t err_mask, tmp;
565  unsigned long flags = 0;
566 
570 
571  if (!(err_sts & err_mask))
572  return false;
573 
574  BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
575 
576  tmp = err_mask;
577 
579  tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
580 
581  if (tmp) {
582  spin_lock_irqsave(&hw->lock, flags);
583  /* reset list index.*/
584  hw->tx_list_post_index = 0;
585  spin_unlock_irqrestore(&hw->lock, flags);
586  }
587 
588  tmp = err_sts & err_mask;
590 
591  return true;
592 }
593 
594 static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
595 {
596  uint32_t err_mask, tmp;
597  unsigned long flags = 0;
598 
602 
603  if (!(err_sts & err_mask))
604  return false;
605 
606  BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
607 
608  tmp = err_mask;
609 
611  tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
612 
613  if (tmp) {
614  spin_lock_irqsave(&hw->lock, flags);
615  /* reset list index.*/
616  hw->tx_list_post_index = 0;
617  spin_unlock_irqrestore(&hw->lock, flags);
618  }
619 
620  tmp = err_sts & err_mask;
622 
623  return true;
624 }
625 
626 static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
627 {
628  uint32_t err_sts;
629 
631  crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
633 
635  crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
637 
640  /* No error mask set.. */
641  return;
642  }
643 
644  /* Handle Tx errors. */
646 
647  if (crystalhd_tx_list0_handler(hw, err_sts))
648  crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
649  BC_STS_ERROR);
650 
651  if (crystalhd_tx_list1_handler(hw, err_sts))
652  crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
653  BC_STS_ERROR);
654 
655  hw->stats.tx_errors++;
656 }
657 
658 static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
659  uint32_t ul_desc_index, uint32_t cnt)
660 {
661  uint32_t ix, ll = 0;
662 
663  if (!p_dma_desc || !cnt)
664  return;
665 
666  /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
667  * setting ll (log level, I presume) to non-zero? */
668  if (!ll)
669  return;
670 
671  for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
672  BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
673  ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
674  ul_desc_index,
675  p_dma_desc[ul_desc_index].buff_addr_high,
676  p_dma_desc[ul_desc_index].buff_addr_low,
677  p_dma_desc[ul_desc_index].next_desc_addr_high,
678  p_dma_desc[ul_desc_index].next_desc_addr_low,
679  p_dma_desc[ul_desc_index].xfer_size,
680  p_dma_desc[ul_desc_index].intr_enable,
681  p_dma_desc[ul_desc_index].last_rec_indicator);
682  }
683 
684 }
685 
686 static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
687  struct dma_descriptor *desc,
688  dma_addr_t desc_paddr_base,
689  uint32_t sg_cnt, uint32_t sg_st_ix,
690  uint32_t sg_st_off, uint32_t xfr_sz)
691 {
692  uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
693  dma_addr_t desc_phy_addr = desc_paddr_base;
694  union addr_64 addr_temp;
695 
696  if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
697  (!sg_cnt && !ioreq->uinfo.dir_tx)) {
698  BCMLOG_ERR("Invalid Args\n");
699  return BC_STS_INV_ARG;
700  }
701 
702  for (ix = 0; ix < sg_cnt; ix++) {
703 
704  /* Setup SGLE index. */
705  sg_ix = ix + sg_st_ix;
706 
707  /* Get SGLE length */
708  len = crystalhd_get_sgle_len(ioreq, sg_ix);
709  if (len % 4) {
710  BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
711  return BC_STS_NOT_IMPL;
712  }
713  /* Setup DMA desc with Phy addr & Length at current index. */
714  addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
715  if (sg_ix == sg_st_ix) {
716  addr_temp.full_addr += sg_st_off;
717  len -= sg_st_off;
718  }
719  memset(&desc[ix], 0, sizeof(desc[ix]));
720  desc[ix].buff_addr_low = addr_temp.low_part;
721  desc[ix].buff_addr_high = addr_temp.high_part;
722  desc[ix].dma_dir = ioreq->uinfo.dir_tx;
723 
724  /* Chain DMA descriptor. */
725  addr_temp.full_addr = desc_phy_addr + sizeof(struct dma_descriptor);
726  desc[ix].next_desc_addr_low = addr_temp.low_part;
727  desc[ix].next_desc_addr_high = addr_temp.high_part;
728 
729  if ((count + len) > xfr_sz)
730  len = xfr_sz - count;
731 
732  /* Debug.. */
733  if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
734  BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
735  len, ix, count, xfr_sz, sg_cnt);
736  return BC_STS_ERROR;
737  }
738  /* Length expects Multiple of 4 */
739  desc[ix].xfer_size = (len / 4);
740 
741  crystalhd_hw_dump_desc(desc, ix, 1);
742 
743  count += len;
744  desc_phy_addr += sizeof(struct dma_descriptor);
745  }
746 
747  last_desc_ix = ix - 1;
748 
749  if (ioreq->fb_size) {
750  memset(&desc[ix], 0, sizeof(desc[ix]));
751  addr_temp.full_addr = ioreq->fb_pa;
752  desc[ix].buff_addr_low = addr_temp.low_part;
753  desc[ix].buff_addr_high = addr_temp.high_part;
754  desc[ix].dma_dir = ioreq->uinfo.dir_tx;
755  desc[ix].xfer_size = 1;
756  desc[ix].fill_bytes = 4 - ioreq->fb_size;
757  count += ioreq->fb_size;
758  last_desc_ix++;
759  }
760 
761  /* setup last descriptor..*/
762  desc[last_desc_ix].last_rec_indicator = 1;
763  desc[last_desc_ix].next_desc_addr_low = 0;
764  desc[last_desc_ix].next_desc_addr_high = 0;
765  desc[last_desc_ix].intr_enable = 1;
766 
767  crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
768 
769  if (count != xfr_sz) {
770  BCMLOG_ERR("internal error sz curr:%x exp:%x\n", count, xfr_sz);
771  return BC_STS_ERROR;
772  }
773 
774  return BC_STS_SUCCESS;
775 }
776 
777 static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(struct crystalhd_dio_req *ioreq,
778  struct dma_desc_mem *pdesc_mem,
779  uint32_t *uv_desc_index)
780 {
781  struct dma_descriptor *desc = NULL;
782  dma_addr_t desc_paddr_base = 0;
783  uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
784  uint32_t xfr_sz = 0;
786 
787  /* Check params.. */
788  if (!ioreq || !pdesc_mem || !uv_desc_index) {
789  BCMLOG_ERR("Invalid Args\n");
790  return BC_STS_INV_ARG;
791  }
792 
793  if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
794  !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
795  BCMLOG_ERR("Invalid Args\n");
796  return BC_STS_INV_ARG;
797  }
798 
799  if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
800  BCMLOG_ERR("UV offset for TX??\n");
801  return BC_STS_INV_ARG;
802 
803  }
804 
805  desc = pdesc_mem->pdma_desc_start;
806  desc_paddr_base = pdesc_mem->phy_addr;
807 
808  if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
809  sg_cnt = ioreq->sg_cnt;
810  xfr_sz = ioreq->uinfo.xfr_len;
811  } else {
812  sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
813  xfr_sz = ioreq->uinfo.uv_offset;
814  }
815 
816  sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
817  sg_st_ix, sg_st_off, xfr_sz);
818 
819  if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
820  return sts;
821 
822  /* Prepare for UV mapping.. */
823  desc = &pdesc_mem->pdma_desc_start[sg_cnt];
824  desc_paddr_base = pdesc_mem->phy_addr +
825  (sg_cnt * sizeof(struct dma_descriptor));
826 
827  /* Done with desc addr.. now update sg stuff.*/
828  sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
829  xfr_sz = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
830  sg_st_ix = ioreq->uinfo.uv_sg_ix;
831  sg_st_off = ioreq->uinfo.uv_sg_off;
832 
833  sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
834  sg_st_ix, sg_st_off, xfr_sz);
835  if (sts != BC_STS_SUCCESS)
836  return sts;
837 
838  *uv_desc_index = sg_st_ix;
839 
840  return sts;
841 }
842 
843 static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
844 {
845  uint32_t dma_cntrl;
846 
848  if (!(dma_cntrl & DMA_START_BIT)) {
849  dma_cntrl |= DMA_START_BIT;
851  dma_cntrl);
852  }
853 
854  return;
855 }
856 
857 /* _CHECK_THIS_
858  *
859  * Verify if the Stop generates a completion interrupt or not.
860  * if it does not generate an interrupt, then add polling here.
861  */
862 static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
863 {
864  uint32_t dma_cntrl, cnt = 30;
865  uint32_t l1 = 1, l2 = 1;
866  unsigned long flags = 0;
867 
869 
870  BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
871 
872  if (!(dma_cntrl & DMA_START_BIT)) {
873  BCMLOG(BCMLOG_DBG, "Already Stopped\n");
874  return BC_STS_SUCCESS;
875  }
876 
877  crystalhd_disable_interrupts(hw->adp);
878 
879  /* Issue stop to HW */
880  /* This bit when set gave problems. Please check*/
881  dma_cntrl &= ~DMA_START_BIT;
883 
884  BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
885 
886  /* Poll for 3seconds (30 * 100ms) on both the lists..*/
887  while ((l1 || l2) && cnt) {
888 
889  if (l1) {
891  l1 &= DMA_START_BIT;
892  }
893 
894  if (l2) {
896  l2 &= DMA_START_BIT;
897  }
898 
900 
901  cnt--;
902  }
903 
904  if (!cnt) {
905  BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
906  crystalhd_enable_interrupts(hw->adp);
907  return BC_STS_ERROR;
908  }
909 
910  spin_lock_irqsave(&hw->lock, flags);
911  hw->tx_list_post_index = 0;
912  spin_unlock_irqrestore(&hw->lock, flags);
913  BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
914  crystalhd_enable_interrupts(hw->adp);
915 
916  return BC_STS_SUCCESS;
917 }
918 
919 static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
920 {
921  /*
922  * Position of the PIB Entries can be found at
923  * 0th and the 1st location of the Circular list.
924  */
925  uint32_t Q_addr;
926  uint32_t pib_cnt, r_offset, w_offset;
927 
928  Q_addr = hw->pib_del_Q_addr;
929 
930  /* Get the Read Pointer */
931  crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
932 
933  /* Get the Write Pointer */
934  crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
935 
936  if (r_offset == w_offset)
937  return 0; /* Queue is empty */
938 
939  if (w_offset > r_offset)
940  pib_cnt = w_offset - r_offset;
941  else
942  pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
943  (r_offset + MIN_PIB_Q_DEPTH);
944 
945  if (pib_cnt > MAX_PIB_Q_DEPTH) {
946  BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
947  return 0;
948  }
949 
950  return pib_cnt;
951 }
952 
953 static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
954 {
955  uint32_t Q_addr;
956  uint32_t addr_entry, r_offset, w_offset;
957 
958  Q_addr = hw->pib_del_Q_addr;
959 
960  /* Get the Read Pointer 0Th Location is Read Pointer */
961  crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
962 
963  /* Get the Write Pointer 1st Location is Write pointer */
964  crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
965 
966  /* Queue is empty */
967  if (r_offset == w_offset)
968  return 0;
969 
970  if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
971  return 0;
972 
973  /* Get the Actual Address of the PIB */
974  crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
975  1, &addr_entry);
976 
977  /* Increment the Read Pointer */
978  r_offset++;
979 
980  if (MAX_PIB_Q_DEPTH == r_offset)
981  r_offset = MIN_PIB_Q_DEPTH;
982 
983  /* Write back the read pointer to It's Location */
984  crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
985 
986  return addr_entry;
987 }
988 
989 static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
990 {
991  uint32_t Q_addr;
992  uint32_t r_offset, w_offset, n_offset;
993 
994  Q_addr = hw->pib_rel_Q_addr;
995 
996  /* Get the Read Pointer */
997  crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
998 
999  /* Get the Write Pointer */
1000  crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1001 
1002  if ((r_offset < MIN_PIB_Q_DEPTH) ||
1003  (r_offset >= MAX_PIB_Q_DEPTH))
1004  return false;
1005 
1006  n_offset = w_offset + 1;
1007 
1008  if (MAX_PIB_Q_DEPTH == n_offset)
1009  n_offset = MIN_PIB_Q_DEPTH;
1010 
1011  if (r_offset == n_offset)
1012  return false; /* should never happen */
1013 
1014  /* Write the DRAM ADDR to the Queue at Next Offset */
1015  crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1016  1, &addr_to_rel);
1017 
1018  /* Put the New value of the write pointer in Queue */
1019  crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1020 
1021  return true;
1022 }
1023 
1024 static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *dst_pib)
1025 {
1026  if (!src_pib || !dst_pib) {
1027  BCMLOG_ERR("Invalid Arguments\n");
1028  return;
1029  }
1030 
1031  dst_pib->timeStamp = 0;
1032  dst_pib->picture_number = src_pib->ppb.picture_number;
1033  dst_pib->width = src_pib->ppb.width;
1034  dst_pib->height = src_pib->ppb.height;
1035  dst_pib->chroma_format = src_pib->ppb.chroma_format;
1036  dst_pib->pulldown = src_pib->ppb.pulldown;
1037  dst_pib->flags = src_pib->ppb.flags;
1038  dst_pib->sess_num = src_pib->ptsStcOffset;
1039  dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
1040  dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
1041  dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1042  dst_pib->frame_rate = src_pib->resolution ;
1043  return;
1044 }
1045 
1046 static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1047 {
1048  unsigned int cnt;
1049  struct c011_pib src_pib;
1050  uint32_t pib_addr, pib_cnt;
1051  struct BC_PIC_INFO_BLOCK *AppPib;
1052  struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1053 
1054  pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1055 
1056  if (!pib_cnt)
1057  return;
1058 
1059  for (cnt = 0; cnt < pib_cnt; cnt++) {
1060 
1061  pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1062  crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
1063  (uint32_t *)&src_pib);
1064 
1065  if (src_pib.bFormatChange) {
1066  rx_pkt = (struct crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
1067  if (!rx_pkt)
1068  return;
1069  rx_pkt->flags = 0;
1071  AppPib = &rx_pkt->pib;
1072  cpy_pib_to_app(&src_pib, AppPib);
1073 
1075  "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1076  rx_pkt->pib.picture_number,
1077  rx_pkt->pib.aspect_ratio,
1078  rx_pkt->pib.chroma_format,
1079  rx_pkt->pib.colour_primaries,
1080  rx_pkt->pib.frame_rate,
1081  rx_pkt->pib.height,
1082  rx_pkt->pib.height,
1083  rx_pkt->pib.n_drop,
1084  rx_pkt->pib.pulldown,
1085  rx_pkt->pib.ycom);
1086 
1087  crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
1088 
1089  }
1090 
1091  crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1092  }
1093 }
1094 
1095 static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1096 {
1097  uint32_t dma_cntrl;
1098 
1100  if (!(dma_cntrl & DMA_START_BIT)) {
1101  dma_cntrl |= DMA_START_BIT;
1103  }
1104 
1106  if (!(dma_cntrl & DMA_START_BIT)) {
1107  dma_cntrl |= DMA_START_BIT;
1109  }
1110 
1111  return;
1112 }
1113 
1114 static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1115 {
1116  uint32_t dma_cntrl = 0, count = 30;
1117  uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1118 
1120  if ((dma_cntrl & DMA_START_BIT)) {
1121  dma_cntrl &= ~DMA_START_BIT;
1123  }
1124 
1126  if ((dma_cntrl & DMA_START_BIT)) {
1127  dma_cntrl &= ~DMA_START_BIT;
1129  }
1130 
1131  /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1132  while ((l0y || l0uv || l1y || l1uv) && count) {
1133 
1134  if (l0y) {
1136  l0y &= DMA_START_BIT;
1137  if (!l0y)
1138  hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1139  }
1140 
1141  if (l1y) {
1143  l1y &= DMA_START_BIT;
1144  if (!l1y)
1145  hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1146  }
1147 
1148  if (l0uv) {
1150  l0uv &= DMA_START_BIT;
1151  if (!l0uv)
1152  hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1153  }
1154 
1155  if (l1uv) {
1157  l1uv &= DMA_START_BIT;
1158  if (!l1uv)
1159  hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1160  }
1161  msleep_interruptible(100);
1162  count--;
1163  }
1164 
1165  hw->rx_list_post_index = 0;
1166 
1167  BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1168  count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1169 }
1170 
1171 static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt)
1172 {
1173  uint32_t y_low_addr_reg, y_high_addr_reg;
1174  uint32_t uv_low_addr_reg, uv_high_addr_reg;
1175  union addr_64 desc_addr;
1176  unsigned long flags;
1177 
1178  if (!hw || !rx_pkt) {
1179  BCMLOG_ERR("Invalid Arguments\n");
1180  return BC_STS_INV_ARG;
1181  }
1182 
1183  if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1184  BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1185  return BC_STS_INV_ARG;
1186  }
1187 
1188  spin_lock_irqsave(&hw->rx_lock, flags);
1189  /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
1190  if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1191  spin_unlock_irqrestore(&hw->rx_lock, flags);
1192  return BC_STS_BUSY;
1193  }
1194 
1195  if (!hw->rx_list_post_index) {
1196  y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1197  y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1198  uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1199  uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1200  } else {
1201  y_low_addr_reg = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1202  y_high_addr_reg = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1203  uv_low_addr_reg = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1204  uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1205  }
1206  rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1208  if (rx_pkt->uv_phy_addr)
1211  spin_unlock_irqrestore(&hw->rx_lock, flags);
1212 
1213  crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
1214 
1215  crystalhd_start_rx_dma_engine(hw);
1216  /* Program the Y descriptor */
1217  desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1218  crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1219  crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1220 
1221  if (rx_pkt->uv_phy_addr) {
1222  /* Program the UV descriptor */
1223  desc_addr.full_addr = rx_pkt->uv_phy_addr;
1224  crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
1225  crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
1226  }
1227 
1228  return BC_STS_SUCCESS;
1229 }
1230 
1231 static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1232  struct crystalhd_rx_dma_pkt *rx_pkt)
1233 {
1234  enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1235 
1236  if (sts == BC_STS_BUSY)
1237  crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1238  false, rx_pkt->pkt_tag);
1239 
1240  return sts;
1241 }
1242 
1243 static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1244  uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1245 {
1246  uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1247 
1248  if (!list_index) {
1249  y_dn_sz_reg = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1250  uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1251  } else {
1252  y_dn_sz_reg = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1253  uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1254  }
1255 
1256  *y_dw_dnsz = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1257  *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1258 }
1259 
1260 /*
1261  * This function should be called only after making sure that the two DMA
1262  * lists are free. This function does not check if DMA's are active, before
1263  * turning off the DMA.
1264  */
1265 static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1266 {
1267  uint32_t dma_cntrl, aspm;
1268 
1269  hw->stop_pending = 0;
1270 
1272  if (dma_cntrl & DMA_START_BIT) {
1273  dma_cntrl &= ~DMA_START_BIT;
1275  }
1276 
1278  if (dma_cntrl & DMA_START_BIT) {
1279  dma_cntrl &= ~DMA_START_BIT;
1281  }
1282  hw->rx_list_post_index = 0;
1283 
1285  aspm |= ASPM_L1_ENABLE;
1286  /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1288 }
1289 
1290 static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
1291  enum BC_STATUS comp_sts)
1292 {
1293  struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1294  uint32_t y_dw_dnsz, uv_dw_dnsz;
1295  enum BC_STATUS sts = BC_STS_SUCCESS;
1296 
1297  if (!hw || list_index >= DMA_ENGINE_CNT) {
1298  BCMLOG_ERR("Invalid Arguments\n");
1299  return BC_STS_INV_ARG;
1300  }
1301 
1303  hw->rx_pkt_tag_seed + list_index);
1304  if (!rx_pkt) {
1305  BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1306  hw->rx_list_post_index, hw->rx_list_sts[0],
1307  hw->rx_list_sts[1], list_index,
1308  hw->rx_pkt_tag_seed + list_index, comp_sts);
1309  return BC_STS_INV_ARG;
1310  }
1311 
1312  if (comp_sts == BC_STS_SUCCESS) {
1313  crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1314  rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1315  rx_pkt->flags = COMP_FLAG_DATA_VALID;
1316  if (rx_pkt->uv_phy_addr)
1317  rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1318  crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1319  hw->rx_pkt_tag_seed + list_index);
1320  return sts;
1321  }
1322 
1323  /* Check if we can post this DIO again. */
1324  return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1325 }
1326 
1327 static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1328  uint32_t y_err_sts, uint32_t uv_err_sts)
1329 {
1330  uint32_t tmp;
1331  enum list_sts tmp_lsts;
1332 
1333  if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1334  return false;
1335 
1336  tmp_lsts = hw->rx_list_sts[0];
1337 
1338  /* Y0 - DMA */
1339  tmp = y_err_sts & GET_Y0_ERR_MSK;
1341  hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1342 
1344  hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1345  tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1346  }
1347 
1349  hw->rx_list_sts[0] &= ~rx_y_mask;
1350  hw->rx_list_sts[0] |= rx_y_error;
1351  tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1352  }
1353 
1354  if (tmp) {
1355  hw->rx_list_sts[0] &= ~rx_y_mask;
1356  hw->rx_list_sts[0] |= rx_y_error;
1357  hw->rx_list_post_index = 0;
1358  }
1359 
1360  /* UV0 - DMA */
1361  tmp = uv_err_sts & GET_UV0_ERR_MSK;
1363  hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1364 
1366  hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1367  tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1368  }
1369 
1371  hw->rx_list_sts[0] &= ~rx_uv_mask;
1372  hw->rx_list_sts[0] |= rx_uv_error;
1373  tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1374  }
1375 
1376  if (tmp) {
1377  hw->rx_list_sts[0] &= ~rx_uv_mask;
1378  hw->rx_list_sts[0] |= rx_uv_error;
1379  hw->rx_list_post_index = 0;
1380  }
1381 
1382  if (y_err_sts & GET_Y0_ERR_MSK) {
1383  tmp = y_err_sts & GET_Y0_ERR_MSK;
1385  }
1386 
1387  if (uv_err_sts & GET_UV0_ERR_MSK) {
1388  tmp = uv_err_sts & GET_UV0_ERR_MSK;
1390  }
1391 
1392  return (tmp_lsts != hw->rx_list_sts[0]);
1393 }
1394 
1395 static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1396  uint32_t y_err_sts, uint32_t uv_err_sts)
1397 {
1398  uint32_t tmp;
1399  enum list_sts tmp_lsts;
1400 
1401  if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1402  return false;
1403 
1404  tmp_lsts = hw->rx_list_sts[1];
1405 
1406  /* Y1 - DMA */
1407  tmp = y_err_sts & GET_Y1_ERR_MSK;
1409  hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1410 
1412  hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1413  tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1414  }
1415 
1417  /* Add retry-support..*/
1418  hw->rx_list_sts[1] &= ~rx_y_mask;
1419  hw->rx_list_sts[1] |= rx_y_error;
1420  tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1421  }
1422 
1423  if (tmp) {
1424  hw->rx_list_sts[1] &= ~rx_y_mask;
1425  hw->rx_list_sts[1] |= rx_y_error;
1426  hw->rx_list_post_index = 0;
1427  }
1428 
1429  /* UV1 - DMA */
1430  tmp = uv_err_sts & GET_UV1_ERR_MSK;
1432  hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1433 
1435  hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1436  tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1437  }
1438 
1440  /* Add retry-support*/
1441  hw->rx_list_sts[1] &= ~rx_uv_mask;
1442  hw->rx_list_sts[1] |= rx_uv_error;
1443  tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1444  }
1445 
1446  if (tmp) {
1447  hw->rx_list_sts[1] &= ~rx_uv_mask;
1448  hw->rx_list_sts[1] |= rx_uv_error;
1449  hw->rx_list_post_index = 0;
1450  }
1451 
1452  if (y_err_sts & GET_Y1_ERR_MSK) {
1453  tmp = y_err_sts & GET_Y1_ERR_MSK;
1455  }
1456 
1457  if (uv_err_sts & GET_UV1_ERR_MSK) {
1458  tmp = uv_err_sts & GET_UV1_ERR_MSK;
1460  }
1461 
1462  return (tmp_lsts != hw->rx_list_sts[1]);
1463 }
1464 
1465 
1466 static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1467 {
1468  unsigned long flags;
1469  uint32_t i, list_avail = 0;
1470  enum BC_STATUS comp_sts = BC_STS_NO_DATA;
1471  uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1472  bool ret = 0;
1473 
1474  if (!hw) {
1475  BCMLOG_ERR("Invalid Arguments\n");
1476  return;
1477  }
1478 
1479  if (!(intr_sts & GET_RX_INTR_MASK))
1480  return;
1481 
1482  y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1483  uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1484 
1485  for (i = 0; i < DMA_ENGINE_CNT; i++) {
1486  /* Update States..*/
1487  spin_lock_irqsave(&hw->rx_lock, flags);
1488  if (i == 0)
1489  ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1490  else
1491  ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1492  if (ret) {
1493  switch (hw->rx_list_sts[i]) {
1494  case sts_free:
1495  comp_sts = BC_STS_SUCCESS;
1496  list_avail = 1;
1497  break;
1498  case rx_y_error:
1499  case rx_uv_error:
1500  case rx_sts_error:
1501  /* We got error on both or Y or uv. */
1502  hw->stats.rx_errors++;
1503  crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1504  /* FIXME: jarod: this is where my mini pci-e card is tripping up */
1505  BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
1506  "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1507  i, hw->stats.rx_errors, y_err_sts,
1508  uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
1509  hw->rx_list_sts[i] = sts_free;
1510  comp_sts = BC_STS_ERROR;
1511  break;
1512  default:
1513  /* Wait for completion..*/
1514  comp_sts = BC_STS_NO_DATA;
1515  break;
1516  }
1517  }
1518  spin_unlock_irqrestore(&hw->rx_lock, flags);
1519 
1520  /* handle completion...*/
1521  if (comp_sts != BC_STS_NO_DATA) {
1522  crystalhd_rx_pkt_done(hw, i, comp_sts);
1523  comp_sts = BC_STS_NO_DATA;
1524  }
1525  }
1526 
1527  if (list_avail) {
1528  if (hw->stop_pending) {
1529  if ((hw->rx_list_sts[0] == sts_free) &&
1530  (hw->rx_list_sts[1] == sts_free))
1531  crystalhd_hw_finalize_pause(hw);
1532  } else {
1534  }
1535  }
1536 }
1537 
1538 static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1539  struct BC_FW_CMD *fw_cmd)
1540 {
1541  enum BC_STATUS sts = BC_STS_SUCCESS;
1542  struct dec_rsp_channel_start_video *st_rsp = NULL;
1543 
1544  switch (fw_cmd->cmd[0]) {
1546  st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
1547  hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1548  hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1549  BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1550  hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1551  break;
1552  case eCMD_C011_INIT:
1553  if (!(crystalhd_load_firmware_config(hw->adp))) {
1554  BCMLOG_ERR("Invalid Params.\n");
1555  sts = BC_STS_FW_AUTH_FAILED;
1556  }
1557  break;
1558  default:
1559  break;
1560  }
1561  return sts;
1562 }
1563 
1564 static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1565 {
1566  uint32_t reg;
1567  union link_misc_perst_decoder_ctrl rst_cntrl_reg;
1568 
1569  /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1570  rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
1571 
1572  rst_cntrl_reg.bcm_7412_rst = 1;
1573  crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1575 
1576  rst_cntrl_reg.bcm_7412_rst = 0;
1577  crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1578 
1579  /* Close all banks, put DDR in idle */
1581 
1582  /* Set bit 25 (drop CKE pin of DDR) */
1583  reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1584  reg |= 0x02000000;
1585  bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1586 
1587  /* Reset the audio block */
1589 
1590  /* Power down Raptor PLL */
1591  reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1592  reg |= 0x00008000;
1593  bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1594 
1595  /* Power down all Audio PLL */
1597 
1598  /* Power down video clock (75MHz) */
1599  reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1600  reg |= 0x00008000;
1601  bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1602 
1603  /* Power down video clock (75MHz) */
1604  reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1605  reg |= 0x00008000;
1606  bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1607 
1608  /* Power down core clock (200MHz) */
1609  reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1610  reg |= 0x00008000;
1611  bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1612 
1613  /* Power down core clock (200MHz) */
1614  reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1615  reg |= 0x00008000;
1616  bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1617 
1618  return BC_STS_SUCCESS;
1619 }
1620 
1621 /************************************************
1622 **
1623 *************************************************/
1624 
1626 {
1627  uint32_t reg_data, cnt, *temp_buff;
1628  uint32_t fw_sig_len = 36;
1629  uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1630 
1631 
1632  if (!adp || !buffer || !sz) {
1633  BCMLOG_ERR("Invalid Params.\n");
1634  return BC_STS_INV_ARG;
1635  }
1636 
1637  reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1638  if (!(reg_data & 0x02)) {
1639  BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1640  return BC_STS_ERROR;
1641  }
1642 
1643  reg_data = 0;
1644  crystalhd_reg_wr(adp, DCI_CMD, 0);
1645  reg_data |= BC_BIT(0);
1646  crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1647 
1648  reg_data = 0;
1649  cnt = 1000;
1651 
1652  while (reg_data != BC_BIT(4)) {
1653  reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1654  reg_data &= BC_BIT(4);
1655  if (--cnt == 0) {
1656  BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1657  return BC_STS_TIMEOUT;
1658  }
1659  }
1660 
1662  /* Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1663  crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1664  temp_buff = (uint32_t *)buffer;
1665  for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1666  crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1667  crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1668  dram_offset += 4;
1669  temp_buff++;
1670  }
1672 
1673  temp_buff++;
1674 
1675  sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1676  for (cnt = 0; cnt < 8; cnt++) {
1677  uint32_t swapped_data = *temp_buff;
1678  swapped_data = bswap_32_1(swapped_data);
1679  crystalhd_reg_wr(adp, sig_reg, swapped_data);
1680  sig_reg -= 4;
1681  temp_buff++;
1682  }
1684 
1685  reg_data = 0;
1686  reg_data |= BC_BIT(1);
1687  crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1689 
1690  reg_data = 0;
1691  reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1692 
1693  if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1694  cnt = 1000;
1695  while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1696  reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1697  reg_data &= BC_BIT(0);
1698  if (!(--cnt))
1699  break;
1701  }
1702  reg_data = 0;
1703  reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1704  reg_data |= BC_BIT(4);
1705  crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1706 
1707  } else {
1708  BCMLOG_ERR("F/w Signature mismatch\n");
1709  return BC_STS_FW_AUTH_FAILED;
1710  }
1711 
1712  BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1713  return BC_STS_SUCCESS;
1714 }
1715 
1717  struct BC_FW_CMD *fw_cmd)
1718 {
1719  uint32_t cnt = 0, cmd_res_addr;
1720  uint32_t *cmd_buff, *res_buff;
1721  wait_queue_head_t fw_cmd_event;
1722  int rc = 0;
1723  enum BC_STATUS sts;
1724 
1725  crystalhd_create_event(&fw_cmd_event);
1726 
1727  if (!hw || !fw_cmd) {
1728  BCMLOG_ERR("Invalid Arguments\n");
1729  return BC_STS_INV_ARG;
1730  }
1731 
1732  cmd_buff = fw_cmd->cmd;
1733  res_buff = fw_cmd->rsp;
1734 
1735  if (!cmd_buff || !res_buff) {
1736  BCMLOG_ERR("Invalid Parameters for F/W Command\n");
1737  return BC_STS_INV_ARG;
1738  }
1739 
1740  hw->pwr_lock++;
1741 
1742  hw->fwcmd_evt_sts = 0;
1743  hw->pfw_cmd_event = &fw_cmd_event;
1744 
1745  /*Write the command to the memory*/
1747 
1748  /*Memory Read for memory arbitrator flush*/
1749  crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1750 
1751  /* Write the command address to mailbox */
1754 
1755  crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1756 
1757  if (!rc) {
1758  sts = BC_STS_SUCCESS;
1759  } else if (rc == -EBUSY) {
1760  BCMLOG_ERR("Firmware command T/O\n");
1761  sts = BC_STS_TIMEOUT;
1762  } else if (rc == -EINTR) {
1763  BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1764  sts = BC_STS_IO_USER_ABORT;
1765  } else {
1766  BCMLOG_ERR("FwCmd IO Error.\n");
1767  sts = BC_STS_IO_ERROR;
1768  }
1769 
1770  if (sts != BC_STS_SUCCESS) {
1771  BCMLOG_ERR("FwCmd Failed.\n");
1772  hw->pwr_lock--;
1773  return sts;
1774  }
1775 
1776  /*Get the Response Address*/
1777  cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1778 
1779  /*Read the Response*/
1780  crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1781 
1782  hw->pwr_lock--;
1783 
1784  if (res_buff[2] != C011_RET_SUCCESS) {
1785  BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1786  return BC_STS_FW_CMD_ERR;
1787  }
1788 
1789  sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1790  if (sts != BC_STS_SUCCESS)
1791  BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1792 
1793  return sts;
1794 }
1795 
1797 {
1798  uint32_t intr_sts = 0;
1799  uint32_t deco_intr = 0;
1800  bool rc = 0;
1801 
1802  if (!adp || !hw->dev_started)
1803  return rc;
1804 
1805  hw->stats.num_interrupts++;
1806  hw->pwr_lock++;
1807 
1808  deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1809  intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1810 
1811  if (intr_sts) {
1812  /* let system know we processed interrupt..*/
1813  rc = 1;
1814  hw->stats.dev_interrupts++;
1815  }
1816 
1817  if (deco_intr && (deco_intr != 0xdeaddead)) {
1818 
1819  if (deco_intr & 0x80000000) {
1820  /*Set the Event and the status flag*/
1821  if (hw->pfw_cmd_event) {
1822  hw->fwcmd_evt_sts = 1;
1824  }
1825  }
1826 
1827  if (deco_intr & BC_BIT(1))
1828  crystalhd_hw_proc_pib(hw);
1829 
1830  bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1831  /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
1833  rc = 1;
1834  }
1835 
1836  /* Rx interrupts */
1837  crystalhd_rx_isr(hw, intr_sts);
1838 
1839  /* Tx interrupts*/
1840  crystalhd_tx_isr(hw, intr_sts);
1841 
1842  /* Clear interrupts */
1843  if (rc) {
1844  if (intr_sts)
1845  crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1846 
1848  }
1849 
1850  hw->pwr_lock--;
1851 
1852  return rc;
1853 }
1854 
1856 {
1857  if (!hw || !adp) {
1858  BCMLOG_ERR("Invalid Arguments\n");
1859  return BC_STS_INV_ARG;
1860  }
1861 
1862  if (hw->dev_started)
1863  return BC_STS_SUCCESS;
1864 
1865  memset(hw, 0, sizeof(struct crystalhd_hw));
1866 
1867  hw->adp = adp;
1868  spin_lock_init(&hw->lock);
1869  spin_lock_init(&hw->rx_lock);
1870  /* FIXME: jarod: what are these magic numbers?!? */
1871  hw->tx_ioq_tag_seed = 0x70023070;
1872  hw->rx_pkt_tag_seed = 0x70029070;
1873 
1874  hw->stop_pending = 0;
1875  crystalhd_start_device(hw->adp);
1876  hw->dev_started = true;
1877 
1878  /* set initial core clock */
1880  hw->prev_n = 0;
1881  hw->pwr_lock = 0;
1883 
1884  return BC_STS_SUCCESS;
1885 }
1886 
1888 {
1889  if (!hw) {
1890  BCMLOG_ERR("Invalid Arguments\n");
1891  return BC_STS_INV_ARG;
1892  }
1893 
1894  if (!hw->dev_started)
1895  return BC_STS_SUCCESS;
1896 
1897  /* Stop and DDR sleep will happen in here */
1899  hw->dev_started = false;
1900 
1901  return BC_STS_SUCCESS;
1902 }
1903 
1905 {
1906  unsigned int i;
1907  void *mem;
1908  size_t mem_len;
1909  dma_addr_t phy_addr;
1910  enum BC_STATUS sts = BC_STS_SUCCESS;
1911  struct crystalhd_rx_dma_pkt *rpkt;
1912 
1913  if (!hw || !hw->adp) {
1914  BCMLOG_ERR("Invalid Arguments\n");
1915  return BC_STS_INV_ARG;
1916  }
1917 
1918  sts = crystalhd_hw_create_ioqs(hw);
1919  if (sts != BC_STS_SUCCESS) {
1920  BCMLOG_ERR("Failed to create IOQs..\n");
1921  return sts;
1922  }
1923 
1924  mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1925 
1926  for (i = 0; i < BC_TX_LIST_CNT; i++) {
1927  mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1928  if (mem) {
1929  memset(mem, 0, mem_len);
1930  } else {
1931  BCMLOG_ERR("Insufficient Memory For TX\n");
1933  return BC_STS_INSUFF_RES;
1934  }
1935  /* rx_pkt_pool -- static memory allocation */
1936  hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1937  hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1938  hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1939  sizeof(struct dma_descriptor);
1940  hw->tx_pkt_pool[i].list_tag = 0;
1941 
1942  /* Add TX dma requests to Free Queue..*/
1943  sts = crystalhd_dioq_add(hw->tx_freeq,
1944  &hw->tx_pkt_pool[i], false, 0);
1945  if (sts != BC_STS_SUCCESS) {
1947  return sts;
1948  }
1949  }
1950 
1951  for (i = 0; i < BC_RX_LIST_CNT; i++) {
1952  rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
1953  if (!rpkt) {
1954  BCMLOG_ERR("Insufficient Memory For RX\n");
1956  return BC_STS_INSUFF_RES;
1957  }
1958 
1959  mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1960  if (mem) {
1961  memset(mem, 0, mem_len);
1962  } else {
1963  BCMLOG_ERR("Insufficient Memory For RX\n");
1965  kfree(rpkt);
1966  return BC_STS_INSUFF_RES;
1967  }
1968  rpkt->desc_mem.pdma_desc_start = mem;
1969  rpkt->desc_mem.phy_addr = phy_addr;
1970  rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1971  rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
1972  crystalhd_hw_free_rx_pkt(hw, rpkt);
1973  }
1974 
1975  return BC_STS_SUCCESS;
1976 }
1977 
1979 {
1980  unsigned int i;
1981  struct crystalhd_rx_dma_pkt *rpkt = NULL;
1982 
1983  if (!hw || !hw->adp) {
1984  BCMLOG_ERR("Invalid Arguments\n");
1985  return BC_STS_INV_ARG;
1986  }
1987 
1988  /* Delete all IOQs.. */
1989  crystalhd_hw_delete_ioqs(hw);
1990 
1991  for (i = 0; i < BC_TX_LIST_CNT; i++) {
1992  if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
1993  bc_kern_dma_free(hw->adp,
1994  hw->tx_pkt_pool[i].desc_mem.sz,
1995  hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
1996  hw->tx_pkt_pool[i].desc_mem.phy_addr);
1997 
1998  hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
1999  }
2000  }
2001 
2002  BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2003  do {
2004  rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2005  if (!rpkt)
2006  break;
2007  bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2008  rpkt->desc_mem.pdma_desc_start,
2009  rpkt->desc_mem.phy_addr);
2010  kfree(rpkt);
2011  } while (rpkt);
2012 
2013  return BC_STS_SUCCESS;
2014 }
2015 
2017  hw_comp_callback call_back,
2018  wait_queue_head_t *cb_event, uint32_t *list_id,
2019  uint8_t data_flags)
2020 {
2021  struct tx_dma_pkt *tx_dma_packet = NULL;
2022  uint32_t first_desc_u_addr, first_desc_l_addr;
2023  uint32_t low_addr, high_addr;
2024  union addr_64 desc_addr;
2025  enum BC_STATUS sts, add_sts;
2026  uint32_t dummy_index = 0;
2027  unsigned long flags;
2028  bool rc;
2029 
2030  if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2031  BCMLOG_ERR("Invalid Arguments\n");
2032  return BC_STS_INV_ARG;
2033  }
2034 
2035  /*
2036  * Since we hit code in busy condition very frequently,
2037  * we will check the code in status first before
2038  * checking the availability of free elem.
2039  *
2040  * This will avoid the Q fetch/add in normal condition.
2041  */
2042  rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2043  false, data_flags);
2044  if (rc) {
2045  hw->stats.cin_busy++;
2046  return BC_STS_BUSY;
2047  }
2048 
2049  /* Get a list from TxFreeQ */
2050  tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
2051  if (!tx_dma_packet) {
2052  BCMLOG_ERR("No empty elements..\n");
2053  return BC_STS_ERR_USAGE;
2054  }
2055 
2056  sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2057  &tx_dma_packet->desc_mem,
2058  &dummy_index);
2059  if (sts != BC_STS_SUCCESS) {
2060  add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2061  false, 0);
2062  if (add_sts != BC_STS_SUCCESS)
2063  BCMLOG_ERR("double fault..\n");
2064 
2065  return sts;
2066  }
2067 
2068  hw->pwr_lock++;
2069 
2070  desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2071  low_addr = desc_addr.low_part;
2072  high_addr = desc_addr.high_part;
2073 
2074  tx_dma_packet->call_back = call_back;
2075  tx_dma_packet->cb_event = cb_event;
2076  tx_dma_packet->dio_req = ioreq;
2077 
2078  spin_lock_irqsave(&hw->lock, flags);
2079 
2080  if (hw->tx_list_post_index == 0) {
2081  first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2082  first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2083  } else {
2084  first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2085  first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2086  }
2087 
2088  *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2089  hw->tx_list_post_index;
2090 
2091  hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2092 
2093  spin_unlock_irqrestore(&hw->lock, flags);
2094 
2095 
2096  /* Insert in Active Q..*/
2097  crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2098  tx_dma_packet->list_tag);
2099 
2100  /*
2101  * Interrupt will come as soon as you write
2102  * the valid bit. So be ready for that. All
2103  * the initialization should happen before that.
2104  */
2105  crystalhd_start_tx_dma_engine(hw);
2106  crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2107 
2108  crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
2109  /* Be sure we set the valid bit ^^^^ */
2110 
2111  return BC_STS_SUCCESS;
2112 }
2113 
2114 /*
2115  * This is a force cancel and we are racing with ISR.
2116  *
2117  * Will try to remove the req from ActQ before ISR gets it.
2118  * If ISR gets it first then the completion happens in the
2119  * normal path and we will return _STS_NO_DATA from here.
2120  *
2121  * FIX_ME: Not Tested the actual condition..
2122  */
2124 {
2125  if (!hw || !list_id) {
2126  BCMLOG_ERR("Invalid Arguments\n");
2127  return BC_STS_INV_ARG;
2128  }
2129 
2130  crystalhd_stop_tx_dma_engine(hw);
2131  crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2132 
2133  return BC_STS_SUCCESS;
2134 }
2135 
2137  struct crystalhd_dio_req *ioreq, bool en_post)
2138 {
2139  struct crystalhd_rx_dma_pkt *rpkt;
2140  uint32_t tag, uv_desc_ix = 0;
2141  enum BC_STATUS sts;
2142 
2143  if (!hw || !ioreq) {
2144  BCMLOG_ERR("Invalid Arguments\n");
2145  return BC_STS_INV_ARG;
2146  }
2147 
2148  rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2149  if (!rpkt) {
2150  BCMLOG_ERR("Insufficient resources\n");
2151  return BC_STS_INSUFF_RES;
2152  }
2153 
2154  rpkt->dio_req = ioreq;
2155  tag = rpkt->pkt_tag;
2156 
2157  sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
2158  if (sts != BC_STS_SUCCESS)
2159  return sts;
2160 
2161  rpkt->uv_phy_addr = 0;
2162 
2163  /* Store the address of UV in the rx packet for post*/
2164  if (uv_desc_ix)
2165  rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2166  (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
2167 
2168  if (en_post)
2169  sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2170  else
2171  sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2172 
2173  return sts;
2174 }
2175 
2177  struct BC_PIC_INFO_BLOCK *pib,
2178  struct crystalhd_dio_req **ioreq)
2179 {
2180  struct crystalhd_rx_dma_pkt *rpkt;
2182  uint32_t sig_pending = 0;
2183 
2184 
2185  if (!hw || !ioreq || !pib) {
2186  BCMLOG_ERR("Invalid Arguments\n");
2187  return BC_STS_INV_ARG;
2188  }
2189 
2190  rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2191  if (!rpkt) {
2192  if (sig_pending) {
2193  BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
2194  return BC_STS_IO_USER_ABORT;
2195  } else {
2196  return BC_STS_TIMEOUT;
2197  }
2198  }
2199 
2200  rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2201 
2202  if (rpkt->flags & COMP_FLAG_PIB_VALID)
2203  memcpy(pib, &rpkt->pib, sizeof(*pib));
2204 
2205  *ioreq = rpkt->dio_req;
2206 
2207  crystalhd_hw_free_rx_pkt(hw, rpkt);
2208 
2209  return BC_STS_SUCCESS;
2210 }
2211 
2213 {
2214  struct crystalhd_rx_dma_pkt *rx_pkt;
2215  enum BC_STATUS sts;
2216  uint32_t i;
2217 
2218  if (!hw) {
2219  BCMLOG_ERR("Invalid Arguments\n");
2220  return BC_STS_INV_ARG;
2221  }
2222 
2223  /* This is start of capture.. Post to both the lists.. */
2224  for (i = 0; i < DMA_ENGINE_CNT; i++) {
2225  rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2226  if (!rx_pkt)
2227  return BC_STS_NO_DATA;
2228  sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2229  if (BC_STS_SUCCESS != sts)
2230  break;
2231 
2232  }
2233 
2234  return BC_STS_SUCCESS;
2235 }
2236 
2238 {
2239  void *temp = NULL;
2240 
2241  if (!hw) {
2242  BCMLOG_ERR("Invalid Arguments\n");
2243  return BC_STS_INV_ARG;
2244  }
2245 
2246  crystalhd_stop_rx_dma_engine(hw);
2247 
2248  do {
2249  temp = crystalhd_dioq_fetch(hw->rx_freeq);
2250  if (temp)
2251  crystalhd_rx_pkt_rel_call_back(hw, temp);
2252  } while (temp);
2253 
2254  return BC_STS_SUCCESS;
2255 }
2256 
2258 {
2259  hw->stats.pause_cnt++;
2260  hw->stop_pending = 1;
2261 
2262  if ((hw->rx_list_sts[0] == sts_free) &&
2263  (hw->rx_list_sts[1] == sts_free))
2264  crystalhd_hw_finalize_pause(hw);
2265 
2266  return BC_STS_SUCCESS;
2267 }
2268 
2270 {
2271  enum BC_STATUS sts;
2272  uint32_t aspm;
2273 
2274  hw->stop_pending = 0;
2275 
2277  aspm &= ~ASPM_L1_ENABLE;
2278 /* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2280 
2281  sts = crystalhd_hw_start_capture(hw);
2282  return sts;
2283 }
2284 
2286 {
2287  enum BC_STATUS sts;
2288 
2289  if (!hw) {
2290  BCMLOG_ERR("Invalid Arguments\n");
2291  return BC_STS_INV_ARG;
2292  }
2293 
2294  sts = crystalhd_put_ddr2sleep(hw);
2295  if (sts != BC_STS_SUCCESS) {
2296  BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2297  return BC_STS_ERROR;
2298  }
2299 
2300  if (!crystalhd_stop_device(hw->adp)) {
2301  BCMLOG_ERR("Failed to Stop Device!!\n");
2302  return BC_STS_ERROR;
2303  }
2304 
2305  return BC_STS_SUCCESS;
2306 }
2307 
2309 {
2310  if (!hw) {
2311  BCMLOG_ERR("Invalid Arguments\n");
2312  return;
2313  }
2314 
2315  /* if called w/NULL stats, its a req to zero out the stats */
2316  if (!stats) {
2317  memset(&hw->stats, 0, sizeof(hw->stats));
2318  return;
2319  }
2320 
2321  hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2322  hw->stats.rdyq_count = crystalhd_dioq_count(hw->rx_rdyq);
2323  memcpy(stats, &hw->stats, sizeof(*stats));
2324 }
2325 
2327 {
2328  uint32_t reg, n, i;
2329  uint32_t vco_mg, refresh_reg;
2330 
2331  if (!hw) {
2332  BCMLOG_ERR("Invalid Arguments\n");
2333  return BC_STS_INV_ARG;
2334  }
2335 
2336  /* FIXME: jarod: wha? */
2337  /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2338  n = hw->core_clock_mhz/5;
2339 
2340  if (n == hw->prev_n)
2341  return BC_STS_CLK_NOCHG;
2342 
2343  if (hw->pwr_lock > 0) {
2344  /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2345  return BC_STS_CLK_NOCHG;
2346  }
2347 
2348  i = n * 27;
2349  if (i < 560)
2350  vco_mg = 0;
2351  else if (i < 900)
2352  vco_mg = 1;
2353  else if (i < 1030)
2354  vco_mg = 2;
2355  else
2356  vco_mg = 3;
2357 
2358  reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2359 
2360  reg &= 0xFFFFCFC0;
2361  reg |= n;
2362  reg |= vco_mg << 12;
2363 
2364  BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2365  hw->core_clock_mhz, n, vco_mg);
2366 
2367  /* Change the DRAM refresh rate to accommodate the new frequency */
2368  /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2369  refresh_reg = (7 * hw->core_clock_mhz / 16);
2370  bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2371 
2372  bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2373 
2374  i = 0;
2375 
2376  for (i = 0; i < 10; i++) {
2377  reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2378 
2379  if (reg & 0x00020000) {
2380  hw->prev_n = n;
2381  /* FIXME: jarod: outputting a random "C" is... confusing... */
2382  BCMLOG(BCMLOG_INFO, "C");
2383  return BC_STS_SUCCESS;
2384  } else {
2386  }
2387  }
2388  BCMLOG(BCMLOG_INFO, "clk change failed\n");
2389  return BC_STS_CLK_NOCHG;
2390 }