75 #if !defined(DEBUG_PARPORT_IP32)
76 # define DEBUG_PARPORT_IP32 0
84 #if DEBUG_PARPORT_IP32 == 1
85 # warning DEBUG_PARPORT_IP32 == 1
86 #elif DEBUG_PARPORT_IP32 == 2
87 # warning DEBUG_PARPORT_IP32 == 2
88 #elif DEBUG_PARPORT_IP32 >= 3
89 # warning DEBUG_PARPORT_IP32 >= 3
102 #include <linux/kernel.h>
103 #include <linux/module.h>
104 #include <linux/parport.h>
105 #include <linux/sched.h>
106 #include <linux/slab.h>
108 #include <linux/stddef.h>
109 #include <linux/types.h>
117 #if DEBUG_PARPORT_IP32 >= 1
118 # define DEFAULT_VERBOSE_PROBING 1
120 # define DEFAULT_VERBOSE_PROBING 0
124 #define PPIP32 "parport_ip32: "
132 #define PARPORT_IP32_ENABLE_IRQ (1U << 0)
133 #define PARPORT_IP32_ENABLE_DMA (1U << 1)
134 #define PARPORT_IP32_ENABLE_SPP (1U << 2)
135 #define PARPORT_IP32_ENABLE_EPP (1U << 3)
136 #define PARPORT_IP32_ENABLE_ECP (1U << 4)
144 #define FIFO_NFAULT_TIMEOUT 100
145 #define FIFO_POLLING_INTERVAL 50
185 #define DSR_nBUSY (1U << 7)
186 #define DSR_nACK (1U << 6)
187 #define DSR_PERROR (1U << 5)
188 #define DSR_SELECT (1U << 4)
189 #define DSR_nFAULT (1U << 3)
190 #define DSR_nPRINT (1U << 2)
192 #define DSR_TIMEOUT (1U << 0)
196 #define DCR_DIR (1U << 5)
197 #define DCR_IRQ (1U << 4)
198 #define DCR_SELECT (1U << 3)
199 #define DCR_nINIT (1U << 2)
200 #define DCR_AUTOFD (1U << 1)
201 #define DCR_STROBE (1U << 0)
204 #define CNFGA_IRQ (1U << 7)
205 #define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
206 #define CNFGA_ID_SHIFT 4
207 #define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
208 #define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
209 #define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
211 #define CNFGA_nBYTEINTRANS (1U << 2)
212 #define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
215 #define CNFGB_COMPRESS (1U << 7)
216 #define CNFGB_INTRVAL (1U << 6)
217 #define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
218 #define CNFGB_IRQ_SHIFT 3
219 #define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
220 #define CNFGB_DMA_SHIFT 0
223 #define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
224 #define ECR_MODE_SHIFT 5
225 #define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
226 #define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
227 #define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
228 #define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
229 #define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
231 #define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
232 #define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
233 #define ECR_nERRINTR (1U << 4)
234 #define ECR_DMAEN (1U << 3)
235 #define ECR_SERVINTR (1U << 2)
236 #define ECR_F_FULL (1U << 1)
237 #define ECR_F_EMPTY (1U << 0)
281 #if DEBUG_PARPORT_IP32 >= 1
282 # define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
284 # define pr_debug1(...) do { } while (0)
298 #define __pr_trace(pr, p, fmt, ...) \
299 pr("%s: %s" fmt "\n", \
300 ({ const struct parport *__p = (p); \
301 __p ? __p->name : "parport_ip32"; }), \
302 __func__ , ##__VA_ARGS__)
303 #define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
304 #define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
314 #define __pr_probe(...) \
315 do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
316 #define pr_probe(p, fmt, ...) \
317 __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
330 #if DEBUG_PARPORT_IP32 >= 2
332 unsigned int show_ecp_config)
339 static const char ecr_modes[8][4] = {
"SPP",
"PS2",
"PPF",
342 unsigned int ecr =
readb(priv->
regs.ecr);
358 if (show_ecp_config) {
359 unsigned int oecr, cnfgA, cnfgB;
387 ((cnfgA & CNFGA_PWORDLEFT) > 1) ?
"s" :
"");
398 for (i = 0; i < 2; i++) {
401 i ?
"soft" :
"hard", dcr);
415 #define sep (f++ ? ',' : ' ')
418 unsigned int dsr =
readb(priv->
regs.dsr);
439 #define parport_ip32_dump_state(...) do { } while (0)
452 #if DEBUG_PARPORT_IP32 >= 1
453 #define CHECK_EXTRA_BITS(p, b, m) \
455 unsigned int __b = (b), __m = (m); \
457 pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
459 (p)->name, __func__, #b, __b, __m); \
462 #define CHECK_EXTRA_BITS(...) do { } while (0)
497 static void parport_ip32_dma_setup_context(
unsigned int limit)
502 if (parport_ip32_dma.left > 0) {
506 volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
507 &
mace->perif.ctrl.parport.context_a :
508 &
mace->perif.ctrl.parport.context_b;
511 if (parport_ip32_dma.left <= limit) {
512 count = parport_ip32_dma.left;
520 "(%u): 0x%04x:0x%04x, %u -> %u%s",
522 (
unsigned int)parport_ip32_dma.buf,
523 (
unsigned int)parport_ip32_dma.next,
525 parport_ip32_dma.ctx, ctxval ?
"*" :
"");
527 ctxval |= parport_ip32_dma.next &
532 parport_ip32_dma.next +=
count;
533 parport_ip32_dma.left -=
count;
534 parport_ip32_dma.ctx ^= 1
U;
539 if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
543 parport_ip32_dma.irq_on = 0;
545 spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
555 if (parport_ip32_dma.left)
556 pr_trace(
NULL,
"(%d): ctx=%d", irq, parport_ip32_dma.ctx);
561 #if DEBUG_PARPORT_IP32
562 static irqreturn_t parport_ip32_merr_interrupt(
int irq,
void *dev_id)
592 writeq(ctrl, &
mace->perif.ctrl.parport.cntlstat);
595 if (!parport_ip32_dma.irq_on) {
599 parport_ip32_dma.irq_on = 1;
603 parport_ip32_dma.dir =
dir;
605 parport_ip32_dma.len =
count;
606 parport_ip32_dma.next = parport_ip32_dma.buf;
607 parport_ip32_dma.left = parport_ip32_dma.len;
608 parport_ip32_dma.ctx = 0;
612 writeq(ctrl, &
mace->perif.ctrl.parport.cntlstat);
616 parport_ip32_dma_setup_context(limit);
621 writeq(ctrl, &
mace->perif.ctrl.parport.cntlstat);
632 static void parport_ip32_dma_stop(
void)
643 spin_lock_irq(&parport_ip32_dma.lock);
644 if (parport_ip32_dma.irq_on) {
648 parport_ip32_dma.irq_on = 0;
650 spin_unlock_irq(&parport_ip32_dma.lock);
657 ctrl =
readq(&
mace->perif.ctrl.parport.cntlstat);
659 writeq(ctrl, &
mace->perif.ctrl.parport.cntlstat);
662 ctx_a =
readq(&
mace->perif.ctrl.parport.context_a);
663 ctx_b =
readq(&
mace->perif.ctrl.parport.context_b);
664 ctrl =
readq(&
mace->perif.ctrl.parport.cntlstat);
665 diag =
readq(&
mace->perif.ctrl.parport.diagnostic);
671 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
678 parport_ip32_dma.left += res[0] + res[1];
682 writeq(ctrl, &
mace->perif.ctrl.parport.cntlstat);
686 parport_ip32_dma.irq_on = 1;
689 parport_ip32_dma.dir);
697 static inline size_t parport_ip32_dma_get_residue(
void)
699 return parport_ip32_dma.left;
707 static int parport_ip32_dma_register(
void)
712 parport_ip32_dma.irq_on = 1;
719 0,
"parport_ip32",
NULL);
723 0,
"parport_ip32",
NULL);
726 #if DEBUG_PARPORT_IP32
729 0,
"parport_ip32",
NULL);
735 #if DEBUG_PARPORT_IP32
748 static void parport_ip32_dma_unregister(
void)
750 #if DEBUG_PARPORT_IP32
763 static inline void parport_ip32_wakeup(
struct parport *
p)
777 static irqreturn_t parport_ip32_interrupt(
int irq,
void *dev_id)
788 parport_ip32_wakeup(p);
801 static inline unsigned int parport_ip32_read_econtrol(
struct parport *p)
812 static inline void parport_ip32_write_econtrol(
struct parport *p,
828 static inline void parport_ip32_frob_econtrol(
struct parport *p,
833 c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
834 parport_ip32_write_econtrol(p, c);
845 static void parport_ip32_set_mode(
struct parport *p,
unsigned int mode)
856 parport_ip32_write_econtrol(p, ecr);
867 static inline unsigned char parport_ip32_read_data(
struct parport *p)
878 static inline void parport_ip32_write_data(
struct parport *p,
unsigned char d)
888 static inline unsigned char parport_ip32_read_status(
struct parport *p)
898 static inline unsigned int __parport_ip32_read_control(
struct parport *p)
909 static inline void __parport_ip32_write_control(
struct parport *p,
929 static inline void __parport_ip32_frob_control(
struct parport *p,
934 c = (__parport_ip32_read_control(p) & ~mask) ^ val;
935 __parport_ip32_write_control(p, c);
945 static inline unsigned char parport_ip32_read_control(
struct parport *p)
947 const unsigned int rm =
949 return __parport_ip32_read_control(p) & rm;
960 static inline void parport_ip32_write_control(
struct parport *p,
963 const unsigned int wm =
966 __parport_ip32_frob_control(p, wm, c & wm);
978 static inline unsigned char parport_ip32_frob_control(
struct parport *p,
982 const unsigned int wm =
986 __parport_ip32_frob_control(p, mask & wm, val & wm);
987 return parport_ip32_read_control(p);
994 static inline void parport_ip32_disable_irq(
struct parport *p)
996 __parport_ip32_frob_control(p,
DCR_IRQ, 0);
1003 static inline void parport_ip32_enable_irq(
struct parport *p)
1014 static inline void parport_ip32_data_forward(
struct parport *p)
1016 __parport_ip32_frob_control(p,
DCR_DIR, 0);
1026 static inline void parport_ip32_data_reverse(
struct parport *p)
1036 static void parport_ip32_init_state(
struct pardevice *
dev,
1048 static void parport_ip32_save_state(
struct parport *p,
1051 s->
u.
ip32.dcr = __parport_ip32_read_control(p);
1052 s->
u.
ip32.ecr = parport_ip32_read_econtrol(p);
1060 static void parport_ip32_restore_state(
struct parport *p,
1064 parport_ip32_write_econtrol(p, s->
u.
ip32.ecr);
1065 __parport_ip32_write_control(p, s->
u.
ip32.dcr);
1076 static unsigned int parport_ip32_clear_epp_timeout(
struct parport *p)
1079 unsigned int cleared;
1086 parport_ip32_read_status(p);
1087 r = parport_ip32_read_status(p);
1093 r = parport_ip32_read_status(p);
1097 pr_trace(p,
"(): %s", cleared ?
"cleared" :
"failed");
1109 static size_t parport_ip32_epp_read(
void __iomem *eppreg,
1111 size_t len,
int flags)
1116 parport_ip32_data_reverse(p);
1117 parport_ip32_write_control(p,
DCR_nINIT);
1119 readsb(eppreg, buf, len);
1121 parport_ip32_clear_epp_timeout(p);
1127 for (got = 0; got < len; got++) {
1128 *bufp++ =
readb(eppreg);
1130 parport_ip32_clear_epp_timeout(p);
1135 parport_ip32_data_forward(p);
1148 static size_t parport_ip32_epp_write(
void __iomem *eppreg,
1149 struct parport *p,
const void *buf,
1150 size_t len,
int flags)
1155 parport_ip32_data_forward(p);
1156 parport_ip32_write_control(p,
DCR_nINIT);
1157 if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
1160 parport_ip32_clear_epp_timeout(p);
1165 const u8 *bufp =
buf;
1166 for (written = 0; written < len; written++) {
1169 parport_ip32_clear_epp_timeout(p);
1185 static size_t parport_ip32_epp_read_data(
struct parport *p,
void *buf,
1186 size_t len,
int flags)
1189 return parport_ip32_epp_read(priv->
regs.eppData0, p, buf, len, flags);
1199 static size_t parport_ip32_epp_write_data(
struct parport *p,
const void *buf,
1200 size_t len,
int flags)
1203 return parport_ip32_epp_write(priv->
regs.eppData0, p, buf, len, flags);
1213 static size_t parport_ip32_epp_read_addr(
struct parport *p,
void *buf,
1214 size_t len,
int flags)
1217 return parport_ip32_epp_read(priv->
regs.eppAddr, p, buf, len, flags);
1227 static size_t parport_ip32_epp_write_addr(
struct parport *p,
const void *buf,
1228 size_t len,
int flags)
1231 return parport_ip32_epp_write(priv->
regs.eppAddr, p, buf, len, flags);
1248 static unsigned int parport_ip32_fifo_wait_break(
struct parport *p,
1249 unsigned long expire)
1256 if (signal_pending(
current)) {
1260 if (!(parport_ip32_read_status(p) &
DSR_nFAULT)) {
1275 static unsigned int parport_ip32_fwp_wait_polling(
struct parport *p)
1279 unsigned long expire;
1286 if (parport_ip32_fifo_wait_break(p, expire))
1293 ecr = parport_ip32_read_econtrol(p);
1315 static unsigned int parport_ip32_fwp_wait_interrupt(
struct parport *p)
1317 static unsigned int lost_interrupt = 0;
1320 unsigned long nfault_timeout;
1321 unsigned long expire;
1325 nfault_timeout =
min((
unsigned long)physport->
cad->timeout,
1330 if (parport_ip32_fifo_wait_break(p, expire))
1342 ecr = parport_ip32_read_econtrol(p);
1348 ecr = parport_ip32_read_econtrol(p);
1350 && !lost_interrupt) {
1352 "%s: lost interrupt in %s\n",
1362 if (ecr & ECR_F_EMPTY) {
1392 static size_t parport_ip32_fifo_write_block_pio(
struct parport *p,
1393 const void *buf,
size_t len)
1396 const u8 *bufp =
buf;
1405 parport_ip32_fwp_wait_polling(p) :
1406 parport_ip32_fwp_wait_interrupt(p);
1436 static size_t parport_ip32_fifo_write_block_dma(
struct parport *p,
1437 const void *buf,
size_t len)
1441 unsigned long nfault_timeout;
1442 unsigned long expire;
1452 nfault_timeout =
min((
unsigned long)physport->
cad->timeout,
1456 if (parport_ip32_fifo_wait_break(p, expire))
1460 ecr = parport_ip32_read_econtrol(p);
1464 parport_ip32_dma_stop();
1465 written = len - parport_ip32_dma_get_residue();
1481 static size_t parport_ip32_fifo_write_block(
struct parport *p,
1482 const void *buf,
size_t len)
1489 parport_ip32_fifo_write_block_dma(p, buf, len) :
1490 parport_ip32_fifo_write_block_pio(p, buf, len);
1502 static unsigned int parport_ip32_drain_fifo(
struct parport *p,
1503 unsigned long timeout)
1505 unsigned long expire =
jiffies + timeout;
1506 unsigned int polling_interval;
1510 for (counter = 0; counter < 40; counter++) {
1511 if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
1521 polling_interval = 1;
1522 while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
1528 if (polling_interval < 128)
1529 polling_interval *= 2;
1532 return !!(parport_ip32_read_econtrol(p) &
ECR_F_EMPTY);
1542 static unsigned int parport_ip32_get_fifo_residue(
struct parport *p,
1546 unsigned int residue;
1554 if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
1574 for (residue = priv->
fifo_depth; residue > 0; residue--) {
1575 if (parport_ip32_read_econtrol(p) &
ECR_F_FULL)
1583 (residue == 1) ?
" was" :
"s were");
1590 parport_ip32_data_reverse(p);
1591 parport_ip32_frob_control(p,
DCR_nINIT, 0);
1618 parport_ip32_data_forward(p);
1630 static size_t parport_ip32_compat_write_data(
struct parport *p,
1631 const void *buf,
size_t len,
1634 static unsigned int ready_before = 1;
1647 parport_ip32_data_forward(p);
1648 parport_ip32_disable_irq(p);
1664 written = parport_ip32_fifo_write_block(p, buf, len);
1667 parport_ip32_drain_fifo(p, physport->
cad->timeout * priv->
fifo_depth);
1670 written -= parport_ip32_get_fifo_residue(p,
ECR_MODE_PPF);
1696 static size_t parport_ip32_ecp_write_data(
struct parport *p,
1697 const void *buf,
size_t len,
1700 static unsigned int ready_before = 1;
1728 parport_ip32_data_forward(p);
1729 parport_ip32_disable_irq(p);
1745 written = parport_ip32_fifo_write_block(p, buf, len);
1748 parport_ip32_drain_fifo(p, physport->
cad->timeout * priv->
fifo_depth);
1751 written -= parport_ip32_get_fifo_residue(p,
ECR_MODE_ECP);
1773 .write_data = parport_ip32_write_data,
1774 .read_data = parport_ip32_read_data,
1776 .write_control = parport_ip32_write_control,
1777 .read_control = parport_ip32_read_control,
1778 .frob_control = parport_ip32_frob_control,
1780 .read_status = parport_ip32_read_status,
1782 .enable_irq = parport_ip32_enable_irq,
1783 .disable_irq = parport_ip32_disable_irq,
1785 .data_forward = parport_ip32_data_forward,
1786 .data_reverse = parport_ip32_data_reverse,
1788 .init_state = parport_ip32_init_state,
1789 .save_state = parport_ip32_save_state,
1790 .restore_state = parport_ip32_restore_state,
1818 static __init unsigned int parport_ip32_ecp_supported(
struct parport *p)
1825 if (
readb(priv->
regs.ecr) != (ecr | ECR_F_EMPTY))
1828 pr_probe(p,
"Found working ECR register\n");
1834 pr_probe(p,
"ECR register not found\n");
1846 static __init unsigned int parport_ip32_fifo_supported(
struct parport *p)
1849 unsigned int configa, configb;
1870 pr_probe(p,
"Unknown implementation ID: 0x%0x\n",
1876 pr_probe(p,
"Unsupported PWord size: %u\n", pword);
1885 pr_probe(p,
"Hardware compression detected (unsupported)\n");
1892 if (!(
readb(priv->
regs.ecr) & ECR_F_EMPTY)) {
1899 for (i = 0; i < 1024; i++) {
1912 pr_probe(p,
"Can't get FIFO depth\n");
1925 pr_probe(p,
"Invalid data in FIFO\n");
1933 &&
readb(priv->
regs.ecr) & ECR_F_EMPTY) {
1935 pr_probe(p,
"Data lost in FIFO\n");
1940 pr_probe(p,
"Can't get writeIntrThreshold\n");
1946 if (!(
readb(priv->
regs.ecr) & ECR_F_EMPTY)) {
1954 parport_ip32_data_reverse(p);
1972 pr_probe(p,
"Can't get readIntrThreshold\n");
1979 parport_ip32_data_forward(p);
2005 unsigned int regshift)
2007 #define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
2008 #define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
2042 parport_ip32_make_isa_registers(&
regs, &
mace->isa.parallel,
2043 &
mace->isa.ecp1284, 8 );
2056 *ops = parport_ip32_ops;
2066 if (!parport_ip32_ecp_supported(p)) {
2077 if (!parport_ip32_fifo_supported(p)) {
2079 "%s: error: FIFO disabled\n", p->
name);
2092 "%s: error: IRQ disabled\n", p->
name);
2096 pr_probe(p,
"Interrupt support enabled\n");
2104 if (parport_ip32_dma_register())
2106 "%s: error: DMA disabled\n", p->
name);
2108 pr_probe(p,
"DMA support enabled\n");
2116 p->
ops->compat_write_data = parport_ip32_compat_write_data;
2118 pr_probe(p,
"Hardware support for SPP mode enabled\n");
2122 p->
ops->epp_read_data = parport_ip32_epp_read_data;
2123 p->
ops->epp_write_data = parport_ip32_epp_write_data;
2124 p->
ops->epp_read_addr = parport_ip32_epp_read_addr;
2125 p->
ops->epp_write_addr = parport_ip32_epp_write_addr;
2127 pr_probe(p,
"Hardware support for EPP mode enabled\n");
2131 p->
ops->ecp_write_data = parport_ip32_ecp_write_data;
2136 pr_probe(p,
"Hardware support for ECP mode enabled\n");
2142 parport_ip32_data_forward(p);
2143 parport_ip32_disable_irq(p);
2144 parport_ip32_write_data(p, 0x00);
2153 #define printmode(x) if (p->modes & PARPORT_MODE_##x) \
2154 printk("%s%s", f++ ? "," : "", #x)
2175 return ERR_PTR(err);
2185 static __exit void parport_ip32_unregister_port(
struct parport *p)
2192 parport_ip32_dma_unregister();
2203 static int __init parport_ip32_init(
void)
2205 pr_info(
PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
2206 this_port = parport_ip32_probe_port();
2207 return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
2213 static void __exit parport_ip32_exit(
void)
2215 parport_ip32_unregister_port(this_port);
2233 "Bit mask of features to enable"
2234 ", bit 0: IRQ support"
2235 ", bit 1: DMA support"
2236 ", bit 2: hardware SPP mode"
2237 ", bit 3: hardware EPP mode"
2238 ", bit 4: hardware ECP mode");