43 #define INVOKE_CB(function_p, args...) \
46 res = function_p(args); \
52 #if CVMX_ENABLE_DEBUG_PRINTS
53 static const char *modes[] =
54 {
"UNKNOWN",
"TX Halfplex",
"Rx Halfplex",
"Duplex" };
78 memcpy(callbacks, &cvmx_spi_callbacks,
sizeof(cvmx_spi_callbacks));
88 memcpy(&cvmx_spi_callbacks, new_callbacks,
sizeof(cvmx_spi_callbacks));
157 cvmx_dprintf(
"SPI%d: Restart %s\n", interface, modes[mode]);
214 spxx_clk_ctl.
u64 = 0;
215 spxx_clk_ctl.
s.runbist = 1;
219 if (spxx_bist_stat.
s.stat0)
221 (
"ERROR SPI%d: BIST failed on receive datapath FIFO\n",
223 if (spxx_bist_stat.
s.stat1)
224 cvmx_dprintf(
"ERROR SPI%d: BIST failed on RX calendar table\n",
226 if (spxx_bist_stat.
s.stat2)
227 cvmx_dprintf(
"ERROR SPI%d: BIST failed on TX calendar table\n",
231 for (index = 0; index < 32; index++) {
235 srxx_spi4_calx.
u64 = 0;
236 srxx_spi4_calx.
s.oddpar = 1;
240 stxx_spi4_calx.
u64 = 0;
241 stxx_spi4_calx.
s.oddpar = 1;
255 spxx_clk_ctl.
u64 = 0;
256 spxx_clk_ctl.
s.seetrn = 0;
257 spxx_clk_ctl.
s.clkdly = 0x10;
258 spxx_clk_ctl.
s.runbist = 0;
259 spxx_clk_ctl.
s.statdrv = 0;
261 spxx_clk_ctl.
s.statrcv = 1;
262 spxx_clk_ctl.
s.sndtrn = 0;
263 spxx_clk_ctl.
s.drptrn = 0;
264 spxx_clk_ctl.
s.rcvtrn = 0;
265 spxx_clk_ctl.
s.srxdlck = 0;
270 spxx_clk_ctl.
s.srxdlck = 1;
277 spxx_trn4_ctl.
s.trntest = 0;
278 spxx_trn4_ctl.
s.jitter = 1;
279 spxx_trn4_ctl.
s.clr_boot = 1;
280 spxx_trn4_ctl.
s.set_boot = 0;
282 spxx_trn4_ctl.
s.maxdist = 3;
284 spxx_trn4_ctl.
s.maxdist = 8;
285 spxx_trn4_ctl.
s.macro_en = 1;
286 spxx_trn4_ctl.
s.mux_en = 1;
289 spxx_dbg_deskew_ctl.
u64 = 0;
291 spxx_dbg_deskew_ctl.
u64);
320 srxx_com_ctl.
u64 = 0;
321 srxx_com_ctl.
s.prts = num_ports - 1;
322 srxx_com_ctl.
s.st_en = 0;
323 srxx_com_ctl.
s.inf_en = 0;
329 while (port < num_ports) {
331 srxx_spi4_calx.
u64 = 0;
332 srxx_spi4_calx.
s.prt0 = port++;
333 srxx_spi4_calx.
s.prt1 = port++;
334 srxx_spi4_calx.
s.prt2 = port++;
335 srxx_spi4_calx.
s.prt3 = port++;
336 srxx_spi4_calx.
s.oddpar =
337 ~(cvmx_dpop(srxx_spi4_calx.
u64) & 1);
342 srxx_spi4_stat.
u64 = 0;
343 srxx_spi4_stat.
s.len = num_ports;
344 srxx_spi4_stat.
s.m = 1;
358 stxx_arb_ctl.
u64 = 0;
359 stxx_arb_ctl.
s.igntpa = 0;
360 stxx_arb_ctl.
s.mintrn = 0;
363 gmxx_tx_spi_max.
u64 = 0;
364 gmxx_tx_spi_max.
s.max1 = 8;
365 gmxx_tx_spi_max.
s.max2 = 4;
366 gmxx_tx_spi_max.
s.slice = 0;
368 gmxx_tx_spi_max.
u64);
370 gmxx_tx_spi_thresh.
u64 = 0;
371 gmxx_tx_spi_thresh.
s.thresh = 4;
373 gmxx_tx_spi_thresh.
u64);
375 gmxx_tx_spi_ctl.
u64 = 0;
376 gmxx_tx_spi_ctl.
s.tpa_clr = 0;
377 gmxx_tx_spi_ctl.
s.cont_pkt = 0;
379 gmxx_tx_spi_ctl.
u64);
382 stxx_spi4_dat.
u64 = 0;
384 stxx_spi4_dat.
s.alpha = 32;
385 stxx_spi4_dat.
s.max_t = 0xFFFF;
392 while (port < num_ports) {
394 stxx_spi4_calx.
u64 = 0;
395 stxx_spi4_calx.
s.prt0 = port++;
396 stxx_spi4_calx.
s.prt1 = port++;
397 stxx_spi4_calx.
s.prt2 = port++;
398 stxx_spi4_calx.
s.prt3 = port++;
399 stxx_spi4_calx.
s.oddpar =
400 ~(cvmx_dpop(stxx_spi4_calx.
u64) & 1);
405 stxx_spi4_stat.
u64 = 0;
406 stxx_spi4_stat.
s.len = num_ports;
407 stxx_spi4_stat.
s.m = 1;
431 int clock_transitions;
440 cvmx_dprintf(
"SPI%d: Waiting to see TsClk...\n", interface);
441 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
446 clock_transitions = 100;
449 if (stat.
s.s4clk0 && stat.
s.s4clk1 && clock_transitions) {
459 if (cvmx_get_cycle() > timeout_time) {
463 }
while (stat.
s.s4clk0 == 0 || stat.
s.s4clk1 == 0);
465 cvmx_dprintf(
"SPI%d: Waiting to see RsClk...\n", interface);
466 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
471 clock_transitions = 100;
474 if (stat.
s.d4clk0 && stat.
s.d4clk1 && clock_transitions) {
484 if (cvmx_get_cycle() > timeout_time) {
488 }
while (stat.
s.d4clk0 == 0 || stat.
s.d4clk1 == 0);
512 uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
513 int rx_training_needed;
517 spxx_clk_ctl.
u64 = 0;
518 spxx_clk_ctl.
s.seetrn = 0;
519 spxx_clk_ctl.
s.clkdly = 0x10;
520 spxx_clk_ctl.
s.runbist = 0;
521 spxx_clk_ctl.
s.statdrv = 0;
523 spxx_clk_ctl.
s.statrcv = 1;
524 spxx_clk_ctl.
s.sndtrn = 1;
525 spxx_clk_ctl.
s.drptrn = 1;
526 spxx_clk_ctl.
s.rcvtrn = 1;
527 spxx_clk_ctl.
s.srxdlck = 1;
529 cvmx_wait(1000 * MS);
533 spxx_trn4_ctl.
s.clr_boot = 1;
537 cvmx_dprintf(
"SPI%d: Waiting for training\n", interface);
538 cvmx_wait(1000 * MS);
540 timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
545 rx_training_needed = 500;
548 if (stat.
s.srxtrn && rx_training_needed) {
549 rx_training_needed--;
553 if (cvmx_get_cycle() > timeout_time) {
557 }
while (stat.
s.srxtrn == 0);
583 (
"SPI%d: Rx is synchronized, start sending calendar data\n",
586 srxx_com_ctl.
s.inf_en = 1;
587 srxx_com_ctl.
s.st_en = 1;
598 stxx_com_ctl.
u64 = 0;
599 stxx_com_ctl.
s.st_en = 1;
603 cvmx_dprintf(
"SPI%d: Waiting to sync on STX[%d] STAT\n",
604 interface, interface);
605 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
609 if (cvmx_get_cycle() > timeout_time) {
613 }
while (stat.
s.stxcal == 0);
641 srxx_com_ctl.
s.inf_en = 1;
649 stxx_com_ctl.
s.inf_en = 1;
654 gmxx_rxx_frm_min.
u64 = 0;
655 gmxx_rxx_frm_min.
s.len = 64;
657 gmxx_rxx_frm_min.
u64);
658 gmxx_rxx_frm_max.
u64 = 0;
659 gmxx_rxx_frm_max.
s.len = 64 * 1024 - 4;
661 gmxx_rxx_frm_max.
u64);
662 gmxx_rxx_jabber.
u64 = 0;
663 gmxx_rxx_jabber.
s.cnt = 64 * 1024 - 4;
664 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.
u64);