21 #include <linux/bitops.h>
23 #include <linux/compiler.h>
25 #include <linux/device.h>
32 #include <linux/kernel.h>
33 #include <linux/list.h>
35 #include <linux/module.h>
38 #include <linux/pci.h>
40 #include <linux/slab.h>
42 #include <linux/string.h>
43 #include <linux/time.h>
47 #include <asm/byteorder.h>
50 #ifdef CONFIG_PPC_PMAC
57 #define DESCRIPTOR_OUTPUT_MORE 0
58 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
59 #define DESCRIPTOR_INPUT_MORE (2 << 12)
60 #define DESCRIPTOR_INPUT_LAST (3 << 12)
61 #define DESCRIPTOR_STATUS (1 << 11)
62 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
63 #define DESCRIPTOR_PING (1 << 7)
64 #define DESCRIPTOR_YY (1 << 6)
65 #define DESCRIPTOR_NO_IRQ (0 << 4)
66 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
67 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
68 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
69 #define DESCRIPTOR_WAIT (3 << 0)
80 #define CONTROL_SET(regs) (regs)
81 #define CONTROL_CLEAR(regs) ((regs) + 4)
82 #define COMMAND_PTR(regs) ((regs) + 12)
83 #define CONTEXT_MATCH(regs) ((regs) + 16)
85 #define AR_BUFFER_SIZE (32*1024)
86 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
88 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
90 #define MAX_ASYNC_PAYLOAD 4096
91 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
92 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
162 #define IT_HEADER_SY(v) ((v) << 0)
163 #define IT_HEADER_TCODE(v) ((v) << 4)
164 #define IT_HEADER_CHANNEL(v) ((v) << 8)
165 #define IT_HEADER_TAG(v) ((v) << 14)
166 #define IT_HEADER_SPEED(v) ((v) << 16)
167 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
182 #define CONFIG_ROM_SIZE 1024
243 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
244 #define IR_CONTEXT_BUFFER_FILL 0x80000000
245 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
246 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
247 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
248 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
250 #define CONTEXT_RUN 0x8000
251 #define CONTEXT_WAKE 0x1000
252 #define CONTEXT_DEAD 0x0800
253 #define CONTEXT_ACTIVE 0x0400
255 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
256 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
257 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
259 #define OHCI1394_REGISTER_SIZE 0x800
260 #define OHCI1394_PCI_HCI_Control 0x40
261 #define SELF_ID_BUF_SIZE 0x800
262 #define OHCI_TCODE_PHY_PACKET 0x0e
263 #define OHCI_VERSION_1_1 0x010010
265 static char ohci_driver_name[] = KBUILD_MODNAME;
267 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
268 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
269 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
270 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
271 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
272 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
273 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
275 #define QUIRK_CYCLE_TIMER 1
276 #define QUIRK_RESET_PACKET 2
277 #define QUIRK_BE_HEADERS 4
278 #define QUIRK_NO_1394A 8
279 #define QUIRK_NO_MSI 16
280 #define QUIRK_TI_SLLZ059 32
283 static const struct {
327 static int param_quirks;
338 #define OHCI_PARAM_DEBUG_AT_AR 1
339 #define OHCI_PARAM_DEBUG_SELFIDS 2
340 #define OHCI_PARAM_DEBUG_IRQS 4
341 #define OHCI_PARAM_DEBUG_BUSRESETS 8
343 static int param_debug;
350 ", or a combination, or all = -1)");
354 if (
likely(!(param_debug &
362 dev_notice(ohci->
card.device,
363 "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
377 evt & OHCI1394_busReset ?
" busReset" :
"",
378 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
379 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
380 OHCI1394_respTxComplete | OHCI1394_isochRx |
381 OHCI1394_isochTx | OHCI1394_postedWriteErr |
382 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
383 OHCI1394_cycleInconsistent |
384 OHCI1394_regAccessFail | OHCI1394_busReset)
388 static const char *
speed[] = {
389 [0] =
"S100", [1] =
"S200", [2] =
"S400", [3] =
"beta",
391 static const char *power[] = {
392 [0] =
"+0W", [1] =
"+15W", [2] =
"+30W", [3] =
"+45W",
393 [4] =
"-3W", [5] =
" ?W", [6] =
"-3..-6W", [7] =
"-3..-10W",
395 static const char port[] = {
'.',
'-',
'p',
'c', };
397 static char _p(
u32 *
s,
int shift)
399 return port[*s >> shift & 3];
409 dev_notice(ohci->
card.device,
410 "%d selfIDs, generation %d, local node ID %04x\n",
411 self_id_count, generation, ohci->
node_id);
414 if ((*s & 1 << 23) == 0)
415 dev_notice(ohci->
card.device,
416 "selfID 0: %08x, phy %d [%c%c%c] "
417 "%s gc=%d %s %s%s%s\n",
418 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
419 speed[*s >> 14 & 3], *s >> 16 & 63,
420 power[*s >> 8 & 7], *s >> 22 & 1 ?
"L" :
"",
421 *s >> 11 & 1 ?
"c" :
"", *s & 2 ?
"i" :
"");
423 dev_notice(ohci->
card.device,
424 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
426 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
427 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
430 static const char *
evts[] = {
431 [0x00] =
"evt_no_status", [0x01] =
"-reserved-",
432 [0x02] =
"evt_long_packet", [0x03] =
"evt_missing_ack",
433 [0x04] =
"evt_underrun", [0x05] =
"evt_overrun",
434 [0x06] =
"evt_descriptor_read", [0x07] =
"evt_data_read",
435 [0x08] =
"evt_data_write", [0x09] =
"evt_bus_reset",
436 [0x0a] =
"evt_timeout", [0x0b] =
"evt_tcode_err",
437 [0x0c] =
"-reserved-", [0x0d] =
"-reserved-",
438 [0x0e] =
"evt_unknown", [0x0f] =
"evt_flushed",
439 [0x10] =
"-reserved-", [0x11] =
"ack_complete",
440 [0x12] =
"ack_pending ", [0x13] =
"-reserved-",
441 [0x14] =
"ack_busy_X", [0x15] =
"ack_busy_A",
442 [0x16] =
"ack_busy_B", [0x17] =
"-reserved-",
443 [0x18] =
"-reserved-", [0x19] =
"-reserved-",
444 [0x1a] =
"-reserved-", [0x1b] =
"ack_tardy",
445 [0x1c] =
"-reserved-", [0x1d] =
"ack_data_error",
446 [0x1e] =
"ack_type_error", [0x1f] =
"-reserved-",
447 [0x20] =
"pending/cancelled",
449 static const char *tcodes[] = {
450 [0x0] =
"QW req", [0x1] =
"BW req",
451 [0x2] =
"W resp", [0x3] =
"-reserved-",
452 [0x4] =
"QR req", [0x5] =
"BR req",
453 [0x6] =
"QR resp", [0x7] =
"BR resp",
454 [0x8] =
"cycle start", [0x9] =
"Lk req",
455 [0xa] =
"async stream packet", [0xb] =
"Lk resp",
456 [0xc] =
"-reserved-", [0xd] =
"-reserved-",
457 [0xe] =
"link internal", [0xf] =
"-reserved-",
460 static void log_ar_at_event(
struct fw_ohci *ohci,
461 char dir,
int speed,
u32 *
header,
int evt)
463 int tcode = header[0] >> 4 & 0xf;
473 dev_notice(ohci->
card.device,
474 "A%c evt_bus_reset, generation %d\n",
475 dir, (header[2] >> 16) & 0xff);
480 case 0x0:
case 0x6:
case 0x8:
481 snprintf(specific,
sizeof(specific),
" = %08x",
484 case 0x1:
case 0x5:
case 0x7:
case 0x9:
case 0xb:
485 snprintf(specific,
sizeof(specific),
" %x,%x",
486 header[3] >> 16, header[3] & 0xffff);
494 dev_notice(ohci->
card.device,
496 dir, evts[evt], tcodes[tcode]);
499 dev_notice(ohci->
card.device,
500 "A%c %s, PHY %08x %08x\n",
501 dir, evts[evt], header[1], header[2]);
503 case 0x0:
case 0x1:
case 0x4:
case 0x5:
case 0x9:
504 dev_notice(ohci->
card.device,
505 "A%c spd %x tl %02x, "
508 dir, speed, header[0] >> 10 & 0x3f,
509 header[1] >> 16, header[0] >> 16, evts[evt],
510 tcodes[tcode], header[1] & 0xffff, header[2], specific);
513 dev_notice(ohci->
card.device,
514 "A%c spd %x tl %02x, "
517 dir, speed, header[0] >> 10 & 0x3f,
518 header[1] >> 16, header[0] >> 16, evts[evt],
519 tcodes[tcode], specific);
533 static inline void flush_writes(
const struct fw_ohci *ohci)
545 static int read_phy_reg(
struct fw_ohci *ohci,
int addr)
551 for (i = 0; i < 3 + 100; i++) {
566 dev_err(ohci->
card.device,
"failed to read phy reg\n");
571 static int write_phy_reg(
const struct fw_ohci *ohci,
int addr,
u32 val)
577 for (i = 0; i < 3 + 100; i++) {
588 dev_err(ohci->
card.device,
"failed to write phy reg\n");
593 static int update_phy_reg(
struct fw_ohci *ohci,
int addr,
596 int ret = read_phy_reg(ohci, addr);
607 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
610 static int read_paged_phy_reg(
struct fw_ohci *ohci,
int page,
int addr)
618 return read_phy_reg(ohci, addr);
621 static int ohci_read_phy_reg(
struct fw_card *
card,
int addr)
627 ret = read_phy_reg(ohci, addr);
633 static int ohci_update_phy_reg(
struct fw_card *
card,
int addr,
634 int clear_bits,
int set_bits)
640 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
648 return page_private(ctx->
pages[i]);
651 static void ar_context_link_page(
struct ar_context *ctx,
unsigned int index)
669 static void ar_context_release(
struct ar_context *ctx)
679 ar_buffer_bus(ctx, i),
685 static void ar_context_abort(
struct ar_context *ctx,
const char *error_msg)
693 dev_err(ohci->
card.device,
"AR error: %s; DMA stopped\n",
699 static inline unsigned int ar_next_buffer_index(
unsigned int index)
704 static inline unsigned int ar_prev_buffer_index(
unsigned int index)
709 static inline unsigned int ar_first_buffer_index(
struct ar_context *ctx)
718 static unsigned int ar_search_last_active_buffer(
struct ar_context *ctx,
719 unsigned int *buffer_offset)
724 i = ar_first_buffer_index(ctx);
728 while (i != last && res_count == 0) {
731 next_i = ar_next_buffer_index(i);
748 next_i = ar_next_buffer_index(next_i);
753 goto next_buffer_is_active;
759 next_buffer_is_active:
761 res_count = next_res_count;
769 ar_context_abort(ctx,
"corrupted descriptor");
775 static void ar_sync_buffers_for_cpu(
struct ar_context *ctx,
776 unsigned int end_buffer_index,
777 unsigned int end_buffer_offset)
781 i = ar_first_buffer_index(ctx);
782 while (i != end_buffer_index) {
784 ar_buffer_bus(ctx, i),
786 i = ar_next_buffer_index(i);
788 if (end_buffer_offset > 0)
790 ar_buffer_bus(ctx, i),
794 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
795 #define cond_le32_to_cpu(v) \
796 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
798 #define cond_le32_to_cpu(v) le32_to_cpu(v)
812 tcode = (p.
header[0] >> 4) & 0x0f;
835 ar_context_abort(ctx,
"invalid packet length");
848 ar_context_abort(ctx,
"invalid tcode");
857 evt = (status >> 16) & 0x1f;
860 p.
speed = (status >> 21) & 0x7;
864 log_ar_at_event(ohci,
'R', p.
speed, p.
header, evt);
896 return buffer + length + 1;
899 static void *handle_ar_packets(
struct ar_context *ctx,
void *
p,
void *
end)
904 next = handle_ar_packet(ctx, p);
913 static void ar_recycle_buffers(
struct ar_context *ctx,
unsigned int end_buffer)
917 i = ar_first_buffer_index(ctx);
918 while (i != end_buffer) {
920 ar_buffer_bus(ctx, i),
922 ar_context_link_page(ctx, i);
923 i = ar_next_buffer_index(i);
927 static void ar_context_tasklet(
unsigned long data)
930 unsigned int end_buffer_index, end_buffer_offset;
937 end_buffer_index = ar_search_last_active_buffer(ctx,
939 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
942 if (end_buffer_index < ar_first_buffer_index(ctx)) {
950 p = handle_ar_packets(ctx, p, buffer_end);
957 p = handle_ar_packets(ctx, p, end);
960 ar_context_abort(ctx,
"inconsistent descriptor");
965 ar_recycle_buffers(ctx, end_buffer_index);
974 unsigned int descriptors_offset,
u32 regs)
996 set_page_private(ctx->
pages[i], dma_addr);
1000 pages[i] = ctx->
pages[i];
1002 pages[AR_BUFFERS + i] = ctx->
pages[i];
1019 ar_next_buffer_index(i) *
sizeof(
struct descriptor));
1025 ar_context_release(ctx);
1030 static void ar_context_run(
struct ar_context *ctx)
1035 ar_context_link_page(ctx, i);
1056 static void context_tasklet(
unsigned long data)
1081 last = find_branch_descriptor(d, z);
1086 if (old_desc != desc) {
1089 unsigned long flags;
1093 spin_unlock_irqrestore(&ctx->
ohci->lock, flags);
1103 static int context_add_buffer(
struct context *ctx)
1121 offset = (
void *)&desc->
buffer - (
void *)desc;
1132 static int context_init(
struct context *ctx,
struct fw_ohci *ohci,
1140 if (context_add_buffer(ctx) < 0)
1164 static void context_release(
struct context *ctx)
1172 ((
void *)&desc->
buffer - (
void *)desc));
1189 if (desc->
list.next == &ctx->buffer_list) {
1192 if (context_add_buffer(ctx) < 0)
1197 ctx->buffer_tail = desc;
1201 memset(d, 0, z *
sizeof(*d));
1219 static void context_append(
struct context *ctx,
1231 ctx->
prev = find_branch_descriptor(d, z);
1234 static void context_stop(
struct context *ctx)
1243 for (i = 0; i < 1000; i++) {
1251 dev_err(ohci->
card.device,
"DMA context still active (0x%08x)\n", reg);
1264 static int at_context_queue_packet(
struct context *ctx,
1274 d = context_get_descriptors(ctx, 4, &d_bus);
1289 tcode = (packet->
header[0] >> 4) & 0x0f;
1290 header = (
__le32 *) &d[1];
1302 (packet->
speed << 16));
1304 (packet->
header[0] & 0xffff0000));
1317 (packet->
speed << 16));
1322 if (is_ping_packet(&packet->
header[1]))
1328 (packet->
speed << 16));
1340 driver_data = (
struct driver_data *) &d[3];
1359 payload_bus = d_bus + 3 *
sizeof(*d);
1384 context_append(ctx, d, z, 4 - z);
1389 context_run(ctx, 0);
1394 static void at_context_flush(
struct context *ctx)
1396 tasklet_disable(&ctx->
tasklet);
1399 context_tasklet((
unsigned long)ctx);
1402 tasklet_enable(&ctx->
tasklet);
1409 struct driver_data *driver_data;
1418 driver_data = (
struct driver_data *) &d[3];
1419 packet = driver_data->
packet;
1431 log_ar_at_event(ohci,
'T', packet->
speed, packet->
header, evt);
1466 packet->
ack = evt - 0x10;
1486 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1487 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1488 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1489 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1490 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1492 static void handle_local_rom(
struct fw_ohci *ohci,
1519 static void handle_local_lock(
struct fw_ohci *ohci,
1525 u32 lock_arg, lock_data;
1550 for (
try = 0;
try < 20;
try++)
1556 &lock_old,
sizeof(lock_old));
1560 dev_err(ohci->
card.device,
"swap not done (CSR lock timeout)\n");
1567 static void handle_local_request(
struct context *ctx,
struct fw_packet *packet)
1571 if (ctx == &ctx->
ohci->at_request_ctx) {
1577 ((
unsigned long long)
1584 handle_local_rom(ctx->
ohci, packet, csr);
1590 handle_local_lock(ctx->
ohci, packet, csr);
1593 if (ctx == &ctx->
ohci->at_request_ctx)
1600 if (ctx == &ctx->
ohci->at_response_ctx) {
1606 static void at_context_transmit(
struct context *ctx,
struct fw_packet *packet)
1608 unsigned long flags;
1615 spin_unlock_irqrestore(&ctx->
ohci->lock, flags);
1616 handle_local_request(ctx, packet);
1620 ret = at_context_queue_packet(ctx, packet);
1621 spin_unlock_irqrestore(&ctx->
ohci->lock, flags);
1628 static void detect_dead_context(
struct fw_ohci *ohci,
1629 const char *
name,
unsigned int regs)
1636 "DMA context %s has stopped, error code: %s\n",
1637 name, evts[ctl & 0x1f]);
1640 static void handle_dead_contexts(
struct fw_ohci *ohci)
1649 for (i = 0; i < 32; ++
i) {
1655 for (i = 0; i < 32; ++
i) {
1664 static u32 cycle_timer_ticks(
u32 cycle_timer)
1668 ticks = cycle_timer & 0xfff;
1669 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1670 ticks += (3072 * 8000) * (cycle_timer >> 25);
1690 static u32 get_cycle_time(
struct fw_ohci *ohci)
1707 t0 = cycle_timer_ticks(c0);
1708 t1 = cycle_timer_ticks(c1);
1709 t2 = cycle_timer_ticks(c2);
1712 }
while ((diff01 <= 0 || diff12 <= 0 ||
1713 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1726 static u32 update_bus_time(
struct fw_ohci *ohci)
1728 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1733 (cycle_time_seconds & 0x40);
1737 if ((ohci->
bus_time & 0x40) != (cycle_time_seconds & 0x40))
1740 return ohci->
bus_time | cycle_time_seconds;
1743 static int get_status_for_port(
struct fw_ohci *ohci,
int port_index)
1748 reg = write_phy_reg(ohci, 7, port_index);
1750 reg = read_phy_reg(ohci, 8);
1755 switch (reg & 0x0f) {
1764 static int get_self_id_pos(
struct fw_ohci *ohci,
u32 self_id,
1770 for (i = 0; i < self_id_count; i++) {
1772 if ((self_id & 0xff000000) == (entry & 0xff000000))
1774 if ((self_id & 0xff000000) < (entry & 0xff000000))
1780 static int initiated_reset(
struct fw_ohci *ohci)
1786 reg = write_phy_reg(ohci, 7, 0xe0);
1788 reg = read_phy_reg(ohci, 8);
1790 reg = write_phy_reg(ohci, 8, reg);
1792 reg = read_phy_reg(ohci, 12);
1794 if ((reg & 0x08) == 0x08) {
1810 static int find_and_insert_self_id(
struct fw_ohci *ohci,
int self_id_count)
1814 u32 self_id = 0x8040c800;
1818 dev_notice(ohci->
card.device,
1819 "node ID not valid, new bus reset in progress\n");
1822 self_id |= ((reg & 0x3f) << 24);
1824 reg = ohci_read_phy_reg(&ohci->
card, 4);
1827 self_id |= ((reg & 0x07) << 8);
1829 reg = ohci_read_phy_reg(&ohci->
card, 1);
1832 self_id |= ((reg & 0x3f) << 16);
1834 for (i = 0; i < 3; i++) {
1835 status = get_status_for_port(ohci, i);
1838 self_id |= ((status & 0x3) << (6 - (i * 2)));
1841 self_id |= initiated_reset(ohci);
1843 pos = get_self_id_pos(ohci, self_id, self_id_count);
1851 return self_id_count;
1860 void *free_rom =
NULL;
1865 if (!(reg & OHCI1394_NodeID_idValid)) {
1866 dev_notice(ohci->
card.device,
1867 "node ID not valid, new bus reset in progress\n");
1871 dev_notice(ohci->
card.device,
"malconfigured bus\n");
1878 if (!(ohci->
is_root && is_new_root))
1885 dev_notice(ohci->
card.device,
"inconsistent self IDs\n");
1894 self_id_count = (reg >> 3) & 0xff;
1896 if (self_id_count > 252) {
1897 dev_notice(ohci->
card.device,
"inconsistent self IDs\n");
1904 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1915 dev_notice(ohci->
card.device,
1916 "ignoring spurious self IDs\n");
1920 dev_notice(ohci->
card.device,
1921 "inconsistent self IDs\n");
1930 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1931 if (self_id_count < 0) {
1932 dev_notice(ohci->
card.device,
1933 "could not construct local self ID\n");
1938 if (self_id_count == 0) {
1939 dev_notice(ohci->
card.device,
"inconsistent self IDs\n");
1959 if (new_generation != generation) {
1960 dev_notice(ohci->
card.device,
1961 "new bus reset, discarding self ids\n");
1966 spin_lock_irq(&ohci->
lock);
1972 spin_unlock_irq(&ohci->
lock);
1982 spin_lock_irq(&ohci->
lock);
2021 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2026 spin_unlock_irq(&ohci->
lock);
2030 free_rom, free_rom_bus);
2032 log_selfids(ohci, generation, self_id_count);
2048 if (!event || !~event)
2057 log_irqs(ohci, event);
2079 i =
ffs(iso_event) - 1;
2082 iso_event &= ~(1 <<
i);
2091 i =
ffs(iso_event) - 1;
2094 iso_event &= ~(1 <<
i);
2099 dev_err(ohci->
card.device,
"register access failure\n");
2105 OHCI1394_postedWriteErr);
2106 if (printk_ratelimit())
2107 dev_err(ohci->
card.device,
"PCI posted write error\n");
2111 if (printk_ratelimit())
2112 dev_notice(ohci->
card.device,
2113 "isochronous cycle too long\n");
2125 if (printk_ratelimit())
2126 dev_notice(ohci->
card.device,
2127 "isochronous cycle inconsistent\n");
2131 handle_dead_contexts(ohci);
2134 spin_lock(&ohci->
lock);
2135 update_bus_time(ohci);
2136 spin_unlock(&ohci->
lock);
2149 for (i = 0; i < 500; i++) {
2165 size_t size = length * 4;
2172 static int configure_1394a_enhancements(
struct fw_ohci *ohci)
2183 enable_1394a =
false;
2184 ret = read_phy_reg(ohci, 2);
2188 ret = read_paged_phy_reg(ohci, 1, 8);
2192 enable_1394a =
true;
2196 enable_1394a =
false;
2206 ret = update_phy_reg(ohci, 5, clear,
set);
2218 OHCI1394_HCControl_programPhyEnable);
2223 static int probe_tsb41ba3d(
struct fw_ohci *ohci)
2226 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2229 reg = read_phy_reg(ohci, 2);
2232 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2236 reg = read_paged_phy_reg(ohci, 1, i + 10);
2245 static int ohci_enable(
struct fw_card *card,
2271 for (lps = 0, i = 0; !lps && i < 3; i++) {
2283 ret = probe_tsb41ba3d(ohci);
2287 dev_notice(card->
device,
"local TSB41BA3D phy\n");
2308 for (i = 0; i < 32; i++)
2330 ret = configure_1394a_enhancements(ohci);
2386 pci_enable_msi(dev);
2389 ohci_driver_name, ohci)) {
2403 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2404 OHCI1394_RQPkt | OHCI1394_RSPkt |
2405 OHCI1394_isochTx | OHCI1394_isochRx |
2406 OHCI1394_postedWriteErr |
2407 OHCI1394_selfIDComplete |
2408 OHCI1394_regAccessFail |
2409 OHCI1394_cycleInconsistent |
2410 OHCI1394_unrecoverableError |
2411 OHCI1394_cycleTooLong |
2436 static int ohci_set_config_rom(
struct fw_card *card,
2437 const __be32 *config_rom,
size_t length)
2475 if (next_config_rom ==
NULL)
2478 spin_lock_irq(&ohci->
lock);
2494 next_config_rom =
NULL;
2504 spin_unlock_irq(&ohci->
lock);
2507 if (next_config_rom !=
NULL)
2524 static void ohci_send_request(
struct fw_card *card,
struct fw_packet *packet)
2531 static void ohci_send_response(
struct fw_card *card,
struct fw_packet *packet)
2538 static int ohci_cancel_packet(
struct fw_card *card,
struct fw_packet *packet)
2542 struct driver_data *driver_data = packet->
driver_data;
2545 tasklet_disable(&ctx->
tasklet);
2547 if (packet->
ack != 0)
2554 log_ar_at_event(ohci,
'T', packet->
speed, packet->
header, 0x20);
2560 tasklet_enable(&ctx->
tasklet);
2565 static int ohci_enable_phys_dma(
struct fw_card *card,
2568 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2572 unsigned long flags;
2592 n = (node_id & 0xffc0) ==
LOCAL_BUS ? node_id & 0x3f : 63;
2600 spin_unlock_irqrestore(&ohci->
lock, flags);
2606 static u32 ohci_read_csr(
struct fw_card *card,
int csr_offset)
2609 unsigned long flags;
2612 switch (csr_offset) {
2630 return get_cycle_time(ohci);
2639 value = update_bus_time(ohci);
2640 spin_unlock_irqrestore(&ohci->
lock, flags);
2645 return (value >> 4) & 0x0ffff00f;
2657 static void ohci_write_csr(
struct fw_card *card,
int csr_offset,
u32 value)
2660 unsigned long flags;
2662 switch (csr_offset) {
2666 OHCI1394_LinkControl_cycleMaster);
2674 if ((value & CSR_STATE_BIT_CMSTR) && ohci->
is_root) {
2676 OHCI1394_LinkControl_cycleMaster);
2679 if (value & CSR_STATE_BIT_ABDICATE)
2691 OHCI1394_cycleInconsistent);
2697 ohci->
bus_time = (update_bus_time(ohci) & 0x40) |
2699 spin_unlock_irqrestore(&ohci->
lock, flags);
2703 value = (value & 0xf) | ((value & 0xf) << 4) |
2704 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2720 static void flush_iso_completions(
struct iso_context *ctx)
2724 ctx->
base.callback_data);
2728 static void copy_iso_headers(
struct iso_context *ctx,
const u32 *dma_hdr)
2733 flush_iso_completions(ctx);
2743 if (ctx->
base.header_size > 0)
2744 ctx_hdr[0] =
swab32(dma_hdr[1]);
2745 if (ctx->
base.header_size > 4)
2746 ctx_hdr[1] =
swab32(dma_hdr[0]);
2747 if (ctx->
base.header_size > 8)
2748 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->
base.header_size - 8);
2752 static int handle_ir_packet_per_buffer(
struct context *context,
2761 for (pd = d; pd <= last; pd++)
2773 buffer_dma & ~PAGE_MASK,
2778 copy_iso_headers(ctx, (
u32 *) (last + 1));
2781 flush_iso_completions(ctx);
2787 static int handle_ir_buffer_fill(
struct context *context,
2801 if (completed > 0) {
2812 buffer_dma & ~PAGE_MASK,
2817 buffer_dma + completed,
2818 ctx->
base.callback_data);
2825 static void flush_ir_buffer_fill(
struct iso_context *ctx)
2834 ctx->
base.callback_data);
2838 static inline void sync_it_packet_for_cpu(
struct context *context,
2867 buffer_dma & ~PAGE_MASK,
2875 static int handle_it_packet(
struct context *context,
2884 for (pd = d; pd <= last; pd++)
2891 sync_it_packet_for_cpu(context, d);
2894 flush_iso_completions(ctx);
2904 flush_iso_completions(ctx);
2931 spin_lock_irq(&ohci->
lock);
2936 callback = handle_it_packet;
2948 callback = handle_ir_packet_per_buffer;
2960 callback = handle_ir_buffer_fill;
2975 spin_unlock_irq(&ohci->
lock);
2978 return ERR_PTR(ret);
2980 memset(ctx, 0,
sizeof(*ctx));
2987 ret = context_init(&ctx->
context, ohci, regs, callback);
2989 goto out_with_header;
2992 set_multichannel_mask(ohci, 0);
3001 spin_lock_irq(&ohci->
lock);
3014 spin_unlock_irq(&ohci->
lock);
3016 return ERR_PTR(ret);
3028 if (ctx->
context.last->branch_address == 0)
3031 switch (ctx->
base.type) {
3037 (cycle & 0x7fff) << 16;
3041 context_run(&ctx->
context, match);
3049 match = (tags << 28) | (sync << 8) | ctx->
base.channel;
3051 match |= (cycle & 0x07fff) << 12;
3058 context_run(&ctx->
context, control);
3075 switch (ctx->
base.type) {
3098 unsigned long flags;
3101 ohci_stop_iso(base);
3102 context_release(&ctx->
context);
3107 switch (base->
type) {
3128 spin_unlock_irqrestore(&ohci->
lock, flags);
3134 unsigned long flags;
3137 switch (base->
type) {
3147 set_multichannel_mask(ohci, *channels);
3151 spin_unlock_irqrestore(&ohci->
lock, flags);
3162 static void ohci_resume_iso_dma(
struct fw_ohci *ohci)
3167 for (i = 0 ; i < ohci->
n_ir ; i++) {
3173 for (i = 0 ; i < ohci->
n_it ; i++) {
3181 static int queue_iso_transmit(
struct iso_context *ctx,
3184 unsigned long payload)
3190 u32 z, header_z, payload_z, irq;
3207 payload_z = end_page - (payload_index >>
PAGE_SHIFT);
3216 d = context_get_descriptors(&ctx->
context, z + header_z, &d_bus);
3232 header = (__le32 *) &d[1];
3249 pd = d + z - payload_z;
3251 for (i = 0; i < payload_z; i++) {
3254 next_page_index = (page + 1) << PAGE_SHIFT;
3256 min(next_page_index, payload_end_index) - payload_index;
3259 page_bus = page_private(buffer->
pages[page]);
3263 page_bus, offset, length,
3274 last = z == 2 ? d : d + z - 1;
3280 context_append(&ctx->
context, d, z, header_z);
3285 static int queue_iso_packet_per_buffer(
struct iso_context *ctx,
3288 unsigned long payload)
3290 struct device *device = ctx->
context.ohci->card.device;
3295 int page,
offset, packet_count, header_size, payload_per_buffer;
3302 header_size =
max(ctx->
base.header_size, (
size_t)8);
3310 for (i = 0; i < packet_count; i++) {
3312 z =
DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3313 d = context_get_descriptors(&ctx->
context,
3314 z + header_z, &d_bus);
3320 if (packet->
skip && i == 0)
3327 rest = payload_per_buffer;
3329 for (j = 1; j < z; j++) {
3334 if (offset + rest < PAGE_SIZE)
3337 length = PAGE_SIZE -
offset;
3342 page_bus = page_private(buffer->
pages[page]);
3357 if (packet->
interrupt && i == packet_count - 1)
3360 context_append(&ctx->
context, d, z, header_z);
3366 static int queue_iso_buffer_fill(
struct iso_context *ctx,
3369 unsigned long payload)
3385 for (i = 0; i < z; i++) {
3386 d = context_get_descriptors(&ctx->
context, 1, &d_bus);
3392 if (packet->
skip && i == 0)
3397 if (offset + rest < PAGE_SIZE)
3400 length = PAGE_SIZE -
offset;
3405 page_bus = page_private(buffer->
pages[page]);
3409 page_bus, offset, length,
3416 context_append(&ctx->
context, d, 1, 0);
3425 unsigned long payload)
3428 unsigned long flags;
3432 switch (base->
type) {
3434 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3437 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3440 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3443 spin_unlock_irqrestore(&ctx->
context.ohci->lock, flags);
3450 struct context *ctx =
3456 static int ohci_flush_iso_completions(
struct fw_iso_context *base)
3461 tasklet_disable(&ctx->
context.tasklet);
3464 context_tasklet((
unsigned long)&ctx->
context);
3466 switch (base->
type) {
3470 flush_iso_completions(ctx);
3474 flush_ir_buffer_fill(ctx);
3484 tasklet_enable(&ctx->
context.tasklet);
3490 .enable = ohci_enable,
3491 .read_phy_reg = ohci_read_phy_reg,
3492 .update_phy_reg = ohci_update_phy_reg,
3493 .set_config_rom = ohci_set_config_rom,
3494 .send_request = ohci_send_request,
3495 .send_response = ohci_send_response,
3496 .cancel_packet = ohci_cancel_packet,
3497 .enable_phys_dma = ohci_enable_phys_dma,
3498 .read_csr = ohci_read_csr,
3499 .write_csr = ohci_write_csr,
3501 .allocate_iso_context = ohci_allocate_iso_context,
3502 .free_iso_context = ohci_free_iso_context,
3503 .set_iso_channels = ohci_set_iso_channels,
3504 .queue_iso = ohci_queue_iso,
3505 .flush_queue_iso = ohci_flush_queue_iso,
3506 .flush_iso_completions = ohci_flush_iso_completions,
3507 .start_iso = ohci_start_iso,
3508 .stop_iso = ohci_stop_iso,
3511 #ifdef CONFIG_PPC_PMAC
3512 static void pmac_ohci_on(
struct pci_dev *dev)
3514 if (machine_is(powermac)) {
3515 struct device_node *ofn = pci_device_to_OF_node(dev);
3518 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3519 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3524 static void pmac_ohci_off(
struct pci_dev *dev)
3526 if (machine_is(powermac)) {
3527 struct device_node *ofn = pci_device_to_OF_node(dev);
3530 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3531 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3536 static inline void pmac_ohci_on(
struct pci_dev *dev) {}
3537 static inline void pmac_ohci_off(
struct pci_dev *dev) {}
3550 dev_err(&dev->
dev,
"Pinnacle MovieBoard is not yet supported\n");
3566 dev_err(&dev->
dev,
"failed to enable OHCI hardware\n");
3572 pci_set_drvdata(dev, ohci);
3581 dev_err(&dev->
dev,
"invalid MMIO resource\n");
3588 dev_err(&dev->
dev,
"MMIO resource unavailable\n");
3594 dev_err(&dev->
dev,
"failed to remap registers\n");
3599 for (i = 0; i <
ARRAY_SIZE(ohci_quirks); i++)
3602 ohci_quirks[i].device == dev->
device) &&
3605 ohci->
quirks = ohci_quirks[
i].flags;
3609 ohci->
quirks = param_quirks;
3635 goto fail_arreq_ctx;
3640 goto fail_arrsp_ctx;
3645 goto fail_atreq_ctx;
3673 max_receive = (bus_options >> 12) & 0xf;
3674 link_speed = bus_options & 0x7;
3683 dev_notice(&dev->
dev,
3684 "added OHCI v%x.%x device as card %d, "
3685 "%d IR + %d IT contexts, quirks 0x%x\n",
3686 version >> 16, version & 0xff, ohci->
card.index,
3720 static void pci_remove(
struct pci_dev *dev)
3724 ohci = pci_get_drvdata(dev);
3759 dev_notice(&dev->
dev,
"removed fw-ohci device\n");
3765 struct fw_ohci *ohci = pci_get_drvdata(dev);
3773 dev_err(&dev->
dev,
"pci_save_state failed\n");
3778 dev_err(&dev->
dev,
"pci_set_power_state failed with %d\n", err);
3784 static int pci_resume(
struct pci_dev *dev)
3786 struct fw_ohci *ohci = pci_get_drvdata(dev);
3794 dev_err(&dev->
dev,
"pci_enable_device failed\n");
3805 err = ohci_enable(&ohci->
card,
NULL, 0);
3809 ohci_resume_iso_dma(ohci);
3822 static struct pci_driver fw_ohci_pci_driver = {
3823 .name = ohci_driver_name,
3824 .id_table = pci_table,
3826 .remove = pci_remove,
3828 .resume = pci_resume,
3829 .suspend = pci_suspend,
3840 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE