10 #include <linux/compiler.h>
12 #include <linux/device.h>
13 #include <linux/ethtool.h>
21 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/slab.h>
29 #include <asm/unaligned.h>
33 #define FWNET_MAX_FRAGMENTS 30
34 #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2)
37 #define FWNET_MAX_QUEUED_DATAGRAMS 20
38 #define FWNET_MIN_QUEUED_DATAGRAMS 10
39 #define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS
41 #define IEEE1394_BROADCAST_CHANNEL 31
42 #define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
43 #define IEEE1394_MAX_PAYLOAD_S100 512
44 #define FWNET_NO_FIFO_ADDR (~0ULL)
46 #define IANA_SPECIFIER_ID 0x00005eU
47 #define RFC2734_SW_VERSION 0x000001U
49 #define IEEE1394_GASP_HDR_SIZE 8
51 #define RFC2374_UNFRAG_HDR_SIZE 4
52 #define RFC2374_FRAG_HDR_SIZE 8
53 #define RFC2374_FRAG_OVERHEAD 4
55 #define RFC2374_HDR_UNFRAG 0
56 #define RFC2374_HDR_FIRSTFRAG 1
57 #define RFC2374_HDR_LASTFRAG 2
58 #define RFC2374_HDR_INTFRAG 3
60 #define RFC2734_HW_ADDR_LEN 16
93 #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
94 #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
95 #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
96 #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
97 #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
99 #define fwnet_set_hdr_lf(lf) ((lf) << 30)
100 #define fwnet_set_hdr_ether_type(et) (et)
101 #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
102 #define fwnet_set_hdr_fg_off(fgo) (fgo)
104 #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
114 unsigned ether_type,
unsigned dg_size,
unsigned dgl)
123 unsigned lf,
unsigned dg_size,
unsigned fg_off,
unsigned dgl)
230 const void *
saddr,
unsigned len)
235 put_unaligned_be16(type, &h->
h_proto);
252 static int fwnet_header_rebuild(
struct sk_buff *skb)
259 dev_notice(&skb->
dev->dev,
"unable to resolve type %04x addresses\n",
264 static int fwnet_header_cache(
const struct neighbour *neigh,
282 static void fwnet_header_cache_update(
struct hh_cache *hh,
283 const struct net_device *net,
const unsigned char *haddr)
288 static int fwnet_header_parse(
const struct sk_buff *skb,
unsigned char *haddr)
295 static const struct header_ops fwnet_header_ops = {
296 .create = fwnet_header_create,
297 .rebuild = fwnet_header_rebuild,
298 .cache = fwnet_header_cache,
299 .cache_update = fwnet_header_cache_update,
300 .parse = fwnet_header_parse,
305 unsigned offset,
unsigned len)
308 unsigned end = offset +
len;
311 if (offset < fi->offset + fi->len && end > fi->offset)
333 fi->
len += len + fi2->
len;
342 if (offset + len == fi->
offset) {
349 fi2->
len += fi->
len + len;
364 if (offset + len < fi->offset) {
372 dev_err(&pd->skb->dev->dev,
"out of memory\n");
378 list_add(&new->fi_link, list);
385 void *frag_buf,
unsigned frag_off,
unsigned frag_len)
394 INIT_LIST_HEAD(&new->fi_list);
395 fi = fwnet_frag_new(
new, frag_off, frag_len);
399 new->datagram_label = datagram_label;
400 new->datagram_size = dg_size;
402 if (new->skb ==
NULL)
406 new->pbuf =
skb_put(new->skb, dg_size);
407 memcpy(new->pbuf + frag_off, frag_buf, frag_len);
428 if (pd->datagram_label == datagram_label)
449 unsigned frag_off,
unsigned frag_len)
451 if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
454 memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
460 list_move_tail(&pd->pd_link, &peer->pd_list);
481 if (peer->guid == guid)
494 if (peer->node_id == node_id &&
495 peer->generation == generation)
502 static
unsigned fwnet_max_payload(
unsigned max_rec,
unsigned speed)
504 max_rec =
min(max_rec, speed + 8);
505 max_rec =
clamp(max_rec, 8
U, 11
U);
511 static int fwnet_finish_incoming_packet(
struct net_device *net,
520 dev = netdev_priv(net);
539 unsigned char *arp_ptr;
549 arp_ptr = (
unsigned char *)(arp + 1);
551 fifo_addr = (
u64)get_unaligned_be16(&arp1394->
fifo_hi) << 32
554 sspd = arp1394->
sspd;
557 dev_notice(&net->
dev,
"sspd %x out of range\n", sspd);
560 max_payload = fwnet_max_payload(arp1394->
max_rec, sspd);
563 peer = fwnet_peer_find_by_guid(dev, peer_guid);
565 peer->
fifo = fifo_addr;
567 if (peer->
speed > sspd)
572 peer->
ip = arp1394->
sip;
574 spin_unlock_irqrestore(&dev->
lock, flags);
577 dev_notice(&net->
dev,
578 "no peer for ARP packet from %016llx\n",
579 (
unsigned long long)peer_guid);
611 if (dev_hard_header(skb, net, ether_type,
612 is_broadcast ? &broadcast_hw : &guid,
613 NULL, skb->
len) >= 0) {
618 skb_reset_mac_header(skb);
646 net->
stats.rx_errors++;
647 net->
stats.rx_dropped++;
649 net->
stats.rx_packets++;
656 net->
stats.rx_errors++;
657 net->
stats.rx_dropped++;
665 int source_node_id,
int generation,
696 net->
stats.rx_dropped++;
703 return fwnet_finish_incoming_packet(net, skb, source_node_id,
704 is_broadcast, ether_type);
722 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
728 pd = fwnet_pd_find(peer, datagram_label);
736 pd = fwnet_pd_new(net, peer, datagram_label,
737 dg_size, buf, fg_off, len);
744 if (fwnet_frag_overlap(pd, fg_off, len) ||
751 pd = fwnet_pd_new(net, peer, datagram_label,
752 dg_size, buf, fg_off, len);
759 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
776 if (fwnet_pd_is_complete(pd)) {
779 skb = skb_get(pd->
skb);
782 spin_unlock_irqrestore(&dev->
lock, flags);
784 return fwnet_finish_incoming_packet(net, skb, source_node_id,
793 spin_unlock_irqrestore(&dev->
lock, flags);
799 int tcode,
int destination,
int source,
int generation,
812 if (offset != dev->
handler.offset)
816 else if (fwnet_incoming_packet(dev, payload, length,
817 source, generation,
false) != 0) {
854 spin_unlock_irqrestore(&dev->
lock, flags);
856 specifier_id = (
be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
864 fwnet_incoming_packet(dev, buf_ptr, length,
865 source_node_id, -1,
true);
880 spin_unlock_irqrestore(&dev->
lock, flags);
888 static struct kmem_cache *fwnet_packet_task_cache;
897 static void dec_queued_datagrams(
struct fwnet_device *dev)
900 netif_wake_queue(dev->
netdev);
919 dec_queued_datagrams(dev);
922 dev->
netdev->stats.tx_packets++;
926 spin_unlock_irqrestore(&dev->
lock, flags);
941 "outstanding packet %x lf %x, header %x,%x\n",
964 dg_size, fg_off, datagram_label);
967 dg_size, fg_off, datagram_label);
970 fwnet_send_packet(ptask);
974 fwnet_free_ptask(ptask);
991 dec_queued_datagrams(dev);
993 dev->
netdev->stats.tx_dropped++;
994 dev->
netdev->stats.tx_errors++;
996 spin_unlock_irqrestore(&dev->
lock, flags);
999 fwnet_free_ptask(ptask);
1002 static void fwnet_write_complete(
struct fw_card *card,
int rcode,
1003 void *payload,
size_t length,
void *data)
1006 static unsigned long j;
1007 static int last_rcode, errors_skipped;
1010 fwnet_transmit_packet_done(ptask);
1012 fwnet_transmit_packet_failed(ptask);
1014 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
1016 "fwnet_write_complete failed: %x (skipped %d)\n",
1017 rcode, errors_skipped);
1031 unsigned long flags;
1061 generation = dev->
card->generation;
1063 node_id = dev->
card->node_id;
1073 fw_stream_packet_destination_id(3,
1076 tx_len + 8, fwnet_write_complete, ptask);
1085 dec_queued_datagrams(dev);
1087 spin_unlock_irqrestore(&dev->
lock, flags);
1095 ptask->
skb->data, tx_len, fwnet_write_complete, ptask);
1104 dec_queued_datagrams(dev);
1106 spin_unlock_irqrestore(&dev->
lock, flags);
1111 fwnet_free_ptask(ptask);
1116 static int fwnet_broadcast_start(
struct fwnet_device *dev)
1120 unsigned num_packets;
1121 unsigned max_receive;
1128 dev->
handler.address_callback = fwnet_receive_packet;
1134 goto failed_initial;
1139 max_receive = 1
U << (dev->
card->max_receive + 1);
1147 dev->
card->link_speed, 8, fwnet_receive_broadcast, dev);
1148 if (IS_ERR(context)) {
1149 retval = PTR_ERR(context);
1150 goto failed_context_create;
1156 goto failed_buffer_init;
1161 goto failed_ptrs_alloc;
1171 *ptrptr++ = (
void *)
1172 ((
char *)ptr + v * max_receive);
1179 packet.payload_length = max_receive;
1187 for (u = 0; u < num_packets; u++) {
1191 goto failed_rcv_queue;
1193 offset += max_receive;
1201 goto failed_rcv_queue;
1218 failed_context_create:
1226 static void set_carrier_state(
struct fwnet_device *dev)
1235 static int fwnet_open(
struct net_device *net)
1241 ret = fwnet_broadcast_start(dev);
1245 netif_start_queue(net);
1247 spin_lock_irq(&dev->
lock);
1248 set_carrier_state(dev);
1249 spin_unlock_irq(&dev->
lock);
1255 static int fwnet_stop(
struct net_device *net)
1257 netif_stop_queue(net);
1272 u16 *datagram_label_ptr;
1275 unsigned long flags;
1280 if (netif_queue_stopped(dev->
netdev)) {
1281 spin_unlock_irqrestore(&dev->
lock, flags);
1298 memcpy(&hdr_buf, skb->
data,
sizeof(hdr_buf));
1301 proto = hdr_buf.h_proto;
1323 peer = fwnet_peer_find_by_guid(dev,
be64_to_cpu(guid));
1341 unsigned char *arp_ptr = (
unsigned char *)(arp + 1);
1349 arp1394->
sspd = dev->
card->link_speed;
1364 if (dg_size <= max_payload) {
1365 fwnet_make_uf_hdr(&ptask->
hdr,
ntohs(proto));
1372 datagram_label = (*datagram_label_ptr)++;
1373 fwnet_make_ff_hdr(&ptask->
hdr,
ntohs(proto), dg_size,
1380 netif_stop_queue(dev->
netdev);
1382 spin_unlock_irqrestore(&dev->
lock, flags);
1387 fwnet_send_packet(ptask);
1392 spin_unlock_irqrestore(&dev->
lock, flags);
1400 net->
stats.tx_dropped++;
1401 net->
stats.tx_errors++;
1413 static int fwnet_change_mtu(
struct net_device *net,
int new_mtu)
1422 static const struct ethtool_ops fwnet_ethtool_ops = {
1427 .ndo_open = fwnet_open,
1428 .ndo_stop = fwnet_stop,
1429 .ndo_start_xmit = fwnet_tx,
1430 .ndo_change_mtu = fwnet_change_mtu,
1433 static void fwnet_init_dev(
struct net_device *net)
1453 if (dev->card == card)
1471 peer->
guid = (
u64)device->config_rom[3] << 32 | device->config_rom[4];
1474 INIT_LIST_HEAD(&peer->
pd_list);
1477 peer->
speed = device->max_speed;
1482 peer->
node_id = device->node_id;
1484 spin_lock_irq(&dev->lock);
1487 set_carrier_state(dev);
1488 spin_unlock_irq(&dev->lock);
1493 static int fwnet_probe(
struct device *_dev)
1496 struct fw_device *device = fw_parent_device(unit);
1499 bool allocated_netdev =
false;
1506 dev = fwnet_dev_find(card);
1512 net =
alloc_netdev(
sizeof(*dev),
"firewire%d", fwnet_init_dev);
1518 allocated_netdev =
true;
1520 dev = netdev_priv(net);
1539 net->
mtu =
min(1500
U, max_mtu);
1549 dev_notice(&net->
dev,
"IPv4 over IEEE 1394 on card %s\n",
1552 ret = fwnet_add_peer(dev, unit, device);
1553 if (ret && allocated_netdev) {
1558 if (ret && allocated_netdev)
1570 spin_lock_irq(&dev->
lock);
1573 set_carrier_state(dev);
1574 spin_unlock_irq(&dev->
lock);
1577 fwnet_pd_delete(pd);
1582 static
int fwnet_remove(
struct device *_dev)
1592 if (net && peer->
ip)
1595 fwnet_remove_peer(peer, dev);
1625 static void fwnet_update(
struct fw_unit *unit)
1627 struct fw_device *device = fw_parent_device(unit);
1633 spin_lock_irq(&peer->
dev->lock);
1636 spin_unlock_irq(&peer->
dev->lock);
1649 static struct fw_driver fwnet_driver = {
1652 .name = KBUILD_MODNAME,
1654 .probe = fwnet_probe,
1655 .remove = fwnet_remove,
1657 .update = fwnet_update,
1658 .id_table = fwnet_id_table,
1661 static const u32 rfc2374_unit_directory_data[] = {
1678 .length =
ARRAY_SIZE(rfc2374_unit_directory_data),
1680 .data = rfc2374_unit_directory_data
1683 static int __init fwnet_init(
void)
1693 if (!fwnet_packet_task_cache) {
1710 static void __exit fwnet_cleanup(
void)