9 #define pr_fmt(fmt) KBUILD_MODNAME fmt
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/netdevice.h>
15 #include <linux/string.h>
16 #include <linux/list.h>
19 #include <linux/sched.h>
20 #include <linux/if_arp.h>
32 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
33 (((pow)-((x)&((pow)-1)))))
38 .inactivity_timeout =
HZ,
41 .aggregation_timeout = 1,
70 static void cfhsi_inactivity_tout(
unsigned long arg)
82 static void cfhsi_update_aggregation_stats(
struct cfhsi *cfhsi,
92 len = skb->
len + hpad + tpad;
96 else if (direction < 0)
100 static bool cfhsi_can_send_aggregate(
struct cfhsi *cfhsi)
104 if (cfhsi->
cfg.aggregation_timeout == 0)
108 if (cfhsi->
qhead[i].qlen)
119 static struct sk_buff *cfhsi_dequeue(
struct cfhsi *cfhsi)
133 static int cfhsi_tx_queue_len(
struct cfhsi *cfhsi)
137 len += skb_queue_len(&cfhsi->
qhead[i]);
141 static void cfhsi_abort_tx(
struct cfhsi *cfhsi)
146 spin_lock_bh(&cfhsi->
lock);
147 skb = cfhsi_dequeue(cfhsi);
151 cfhsi->
ndev->stats.tx_errors++;
152 cfhsi->
ndev->stats.tx_dropped++;
153 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
154 spin_unlock_bh(&cfhsi->
lock);
160 jiffies + cfhsi->
cfg.inactivity_timeout);
161 spin_unlock_bh(&cfhsi->
lock);
164 static int cfhsi_flush_fifo(
struct cfhsi *cfhsi)
167 size_t fifo_occupancy;
174 ret = cfhsi->
ops->cfhsi_fifo_occupancy(cfhsi->
ops,
177 netdev_warn(cfhsi->
ndev,
178 "%s: can't get FIFO occupancy: %d.\n",
181 }
else if (!fifo_occupancy)
185 fifo_occupancy =
min(
sizeof(buffer), fifo_occupancy);
187 ret = cfhsi->
ops->cfhsi_rx(buffer, fifo_occupancy,
191 netdev_warn(cfhsi->
ndev,
192 "%s: can't read data: %d.\n",
202 netdev_warn(cfhsi->
ndev,
203 "%s: can't wait for flush complete: %d.\n",
208 netdev_warn(cfhsi->
ndev,
209 "%s: timeout waiting for flush complete.\n",
218 static int cfhsi_tx_frm(
struct cfhsi_desc *
desc,
struct cfhsi *cfhsi)
225 skb = cfhsi_dequeue(cfhsi);
248 *pemb = (
u8)(hpad - 1);
252 spin_lock_bh(&cfhsi->
lock);
253 cfhsi->
ndev->stats.tx_packets++;
254 cfhsi->
ndev->stats.tx_bytes += skb->
len;
255 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
256 spin_unlock_bh(&cfhsi->
lock);
275 skb = cfhsi_dequeue(cfhsi);
290 *pfrm = (
u8)(hpad - 1);
294 spin_lock_bh(&cfhsi->
lock);
295 cfhsi->
ndev->stats.tx_packets++;
296 cfhsi->
ndev->stats.tx_bytes += skb->
len;
297 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
298 spin_unlock_bh(&cfhsi->
lock);
307 pfrm += skb->
len + tpad;
324 if (cfhsi_can_send_aggregate(cfhsi))
332 static void cfhsi_start_tx(
struct cfhsi *cfhsi)
344 len = cfhsi_tx_frm(desc, cfhsi);
346 spin_lock_bh(&cfhsi->
lock);
347 if (
unlikely(cfhsi_tx_queue_len(cfhsi))) {
348 spin_unlock_bh(&cfhsi->
lock);
355 jiffies + cfhsi->
cfg.inactivity_timeout);
356 spin_unlock_bh(&cfhsi->
lock);
363 netdev_err(cfhsi->
ndev,
"%s: TX error %d.\n",
368 static void cfhsi_tx_done(
struct cfhsi *cfhsi)
379 spin_lock_bh(&cfhsi->
lock);
381 cfhsi_tx_queue_len(cfhsi) <= cfhsi->
cfg.q_low_mark &&
382 cfhsi->
cfdev.flowctrl) {
388 if (cfhsi_can_send_aggregate(cfhsi)) {
389 spin_unlock_bh(&cfhsi->
lock);
390 cfhsi_start_tx(cfhsi);
393 jiffies + cfhsi->
cfg.aggregation_timeout);
394 spin_unlock_bh(&cfhsi->
lock);
400 static void cfhsi_tx_done_cb(
struct cfhsi_cb_ops *cb_ops)
410 cfhsi_tx_done(cfhsi);
413 static int cfhsi_rx_desc(
struct cfhsi_desc *desc,
struct cfhsi *cfhsi)
422 netdev_err(cfhsi->
ndev,
"%s: Invalid descriptor.\n",
439 len |= ((*(pfrm+1)) << 8) & 0xFF00;
444 netdev_err(cfhsi->
ndev,
"%s: Invalid length.\n",
452 netdev_err(cfhsi->
ndev,
"%s: Out of memory !\n",
462 skb_reset_mac_header(skb);
476 cfhsi->
ndev->stats.rx_packets++;
477 cfhsi->
ndev->stats.rx_bytes +=
len;
493 netdev_err(cfhsi->
ndev,
494 "%s: Invalid payload len: %d, ignored.\n",
501 static int cfhsi_rx_desc_len(
struct cfhsi_desc *desc)
524 pr_err(
"Invalid payload len: %d, ignored.\n", xfer_sz);
530 static int cfhsi_rx_pld(
struct cfhsi_desc *desc,
struct cfhsi *cfhsi)
540 netdev_err(cfhsi->
ndev,
"%s: Invalid descriptor.\n",
550 while (nfrms < cfhsi->
rx_state.nfrms) {
565 pcffrm = pfrm + *pfrm + 1;
569 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
574 netdev_err(cfhsi->
ndev,
"%s: Invalid length.\n",
582 netdev_err(cfhsi->
ndev,
"%s: Out of memory !\n",
593 skb_reset_mac_header(skb);
606 cfhsi->
ndev->stats.rx_packets++;
607 cfhsi->
ndev->stats.rx_bytes +=
len;
618 static void cfhsi_rx_done(
struct cfhsi *cfhsi)
621 int desc_pld_len = 0, rx_len,
rx_state;
634 spin_lock_bh(&cfhsi->
lock);
636 jiffies + cfhsi->
cfg.inactivity_timeout);
637 spin_unlock_bh(&cfhsi->
lock);
640 desc_pld_len = cfhsi_rx_desc_len(desc);
642 if (desc_pld_len < 0)
646 rx_len = desc_pld_len;
649 if (desc_pld_len == 0)
665 desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
666 if (desc_pld_len < 0)
669 if (desc_pld_len > 0) {
670 rx_len = desc_pld_len;
699 res = cfhsi->
ops->cfhsi_rx(rx_ptr, rx_len,
702 netdev_err(cfhsi->
ndev,
"%s: RX error %d.\n",
704 cfhsi->
ndev->stats.rx_errors++;
705 cfhsi->
ndev->stats.rx_dropped++;
711 if (cfhsi_rx_desc(desc, cfhsi) < 0)
715 if (cfhsi_rx_pld(desc, cfhsi) < 0)
719 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
731 cfhsi->
rx_state.pld_len = desc_pld_len;
734 if (rx_buf != cfhsi->
rx_buf)
739 netdev_err(cfhsi->
ndev,
"%s: Out of sync.\n", __func__);
745 static void cfhsi_rx_slowpath(
unsigned long arg)
747 struct cfhsi *cfhsi = (
struct cfhsi *)arg;
752 cfhsi_rx_done(cfhsi);
755 static void cfhsi_rx_done_cb(
struct cfhsi_cb_ops *cb_ops)
769 cfhsi_rx_done(cfhsi);
774 struct cfhsi *cfhsi =
NULL;
793 cfhsi->
ops->cfhsi_wake_up(cfhsi->
ops);
805 netdev_err(cfhsi->
ndev,
"%s: Signalled: %ld.\n",
809 cfhsi->
ops->cfhsi_wake_down(cfhsi->
ops);
812 bool ca_wake =
false;
813 size_t fifo_occupancy = 0;
824 __func__, (
unsigned) fifo_occupancy);
831 netdev_err(cfhsi->
ndev,
"%s: CA Wake missed !.\n",
842 cfhsi->
ops->cfhsi_wake_down(cfhsi->
ops);
858 netdev_err(cfhsi->
ndev,
"%s: RX err %d.\n", __func__, res);
863 spin_lock_bh(&cfhsi->
lock);
866 if (!cfhsi_tx_queue_len(cfhsi)) {
871 jiffies + cfhsi->
cfg.inactivity_timeout);
872 spin_unlock_bh(&cfhsi->
lock);
879 spin_unlock_bh(&cfhsi->
lock);
886 res = cfhsi->
ops->cfhsi_tx(cfhsi->
tx_buf, len, cfhsi->
ops);
888 netdev_err(cfhsi->
ndev,
"%s: TX error %d.\n",
890 cfhsi_abort_tx(cfhsi);
893 netdev_err(cfhsi->
ndev,
894 "%s: Failed to create HSI frame: %d.\n",
899 static void cfhsi_wake_down(
struct work_struct *work)
902 struct cfhsi *cfhsi =
NULL;
903 size_t fifo_occupancy = 0;
913 cfhsi->
ops->cfhsi_wake_down(cfhsi->
ops);
922 netdev_err(cfhsi->
ndev,
"%s: Signalled: %ld.\n",
929 netdev_err(cfhsi->
ndev,
"%s: Timeout.\n", __func__);
935 netdev_err(cfhsi->
ndev,
"%s: CA Wake missed !.\n",
953 netdev_err(cfhsi->
ndev,
"%s: FIFO Timeout.\n", __func__);
959 cfhsi->
ops->cfhsi_rx_cancel(cfhsi->
ops);
962 static void cfhsi_out_of_sync(
struct work_struct *work)
964 struct cfhsi *cfhsi =
NULL;
973 static void cfhsi_wake_up_cb(
struct cfhsi_cb_ops *cb_ops)
975 struct cfhsi *cfhsi =
NULL;
992 static void cfhsi_wake_down_cb(
struct cfhsi_cb_ops *cb_ops)
994 struct cfhsi *cfhsi =
NULL;
1005 static void cfhsi_aggregation_tout(
unsigned long arg)
1007 struct cfhsi *cfhsi = (
struct cfhsi *)arg;
1012 cfhsi_start_tx(cfhsi);
1017 struct cfhsi *cfhsi =
NULL;
1025 cfhsi = netdev_priv(dev);
1045 spin_lock_bh(&cfhsi->
lock);
1048 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1055 spin_unlock_bh(&cfhsi->
lock);
1056 cfhsi_abort_tx(cfhsi);
1062 cfhsi_tx_queue_len(cfhsi) > cfhsi->
cfg.q_high_mark &&
1063 cfhsi->
cfdev.flowctrl) {
1075 bool aggregate_ready =
1076 cfhsi_can_send_aggregate(cfhsi) &&
1078 spin_unlock_bh(&cfhsi->
lock);
1079 if (aggregate_ready)
1080 cfhsi_start_tx(cfhsi);
1087 spin_unlock_bh(&cfhsi->
lock);
1095 len = cfhsi_tx_frm(desc, cfhsi);
1099 res = cfhsi->
ops->cfhsi_tx(cfhsi->
tx_buf, len, cfhsi->
ops);
1101 netdev_err(cfhsi->
ndev,
"%s: TX error %d.\n",
1103 cfhsi_abort_tx(cfhsi);
1116 static void cfhsi_setup(
struct net_device *dev)
1119 struct cfhsi *cfhsi = netdev_priv(dev);
1128 skb_queue_head_init(&cfhsi->
qhead[i]);
1130 cfhsi->
cfdev.use_frag =
false;
1131 cfhsi->
cfdev.use_stx =
false;
1132 cfhsi->
cfdev.use_fcs =
false;
1134 cfhsi->
cfg = hsi_default_config;
1139 struct cfhsi *cfhsi = netdev_priv(ndev);
1174 goto err_alloc_rx_flip;
1188 cfhsi->
cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1189 cfhsi->
cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1190 cfhsi->
cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1191 cfhsi->
cb_ops.wake_down_cb = cfhsi_wake_down_cb;
1207 netdev_err(cfhsi->
ndev,
"%s: Failed to create work queue.\n",
1232 res = cfhsi->
ops->cfhsi_up(cfhsi->
ops);
1234 netdev_err(cfhsi->
ndev,
1235 "%s: can't activate HSI interface: %d.\n",
1241 res = cfhsi_flush_fifo(cfhsi);
1243 netdev_err(cfhsi->
ndev,
"%s: Can't flush FIFO: %d.\n",
1250 cfhsi->
ops->cfhsi_down(cfhsi->
ops);
1263 static int cfhsi_close(
struct net_device *ndev)
1265 struct cfhsi *cfhsi = netdev_priv(ndev);
1280 cfhsi->
ops->cfhsi_rx_cancel(cfhsi->
ops);
1290 cfhsi_abort_tx(cfhsi);
1293 cfhsi->
ops->cfhsi_down(cfhsi->
ops);
1302 static void cfhsi_uninit(
struct net_device *dev)
1304 struct cfhsi *cfhsi = netdev_priv(dev);
1311 .ndo_uninit = cfhsi_uninit,
1312 .ndo_open = cfhsi_open,
1313 .ndo_stop = cfhsi_close,
1314 .ndo_start_xmit = cfhsi_xmit
1317 static void cfhsi_netlink_parms(
struct nlattr *
data[],
struct cfhsi *cfhsi)
1322 pr_debug(
"no params data found\n");
1332 u32 inactivity_timeout = nla_get_u32(data[i]);
1334 cfhsi->
cfg.inactivity_timeout = inactivity_timeout *
HZ / 1000;
1335 if (cfhsi->
cfg.inactivity_timeout == 0)
1336 cfhsi->
cfg.inactivity_timeout = 1;
1343 cfhsi->
cfg.aggregation_timeout = nla_get_u32(data[i]);
1347 cfhsi->
cfg.head_align = nla_get_u32(data[i]);
1351 cfhsi->
cfg.tail_align = nla_get_u32(data[i]);
1355 cfhsi->
cfg.q_high_mark = nla_get_u32(data[i]);
1359 cfhsi->
cfg.q_low_mark = nla_get_u32(data[i]);
1365 cfhsi_netlink_parms(data, netdev_priv(dev));
1379 static size_t caif_hsi_get_size(
const struct net_device *dev)
1384 s += nla_total_size(caif_hsi_policy[i].len);
1388 static int caif_hsi_fill_info(
struct sk_buff *skb,
const struct net_device *dev)
1390 struct cfhsi *cfhsi = netdev_priv(dev);
1393 cfhsi->
cfg.inactivity_timeout) ||
1395 cfhsi->
cfg.aggregation_timeout) ||
1397 cfhsi->
cfg.head_align) ||
1399 cfhsi->
cfg.tail_align) ||
1401 cfhsi->
cfg.q_high_mark) ||
1403 cfhsi->
cfg.q_low_mark))
1409 static int caif_hsi_newlink(
struct net *src_net,
struct net_device *dev,
1412 struct cfhsi *cfhsi =
NULL;
1417 cfhsi = netdev_priv(dev);
1418 cfhsi_netlink_parms(data, cfhsi);
1419 dev_net_set(cfhsi->
ndev, src_net);
1423 pr_err(
"%s: failed to get the cfhsi_ops\n", __func__);
1428 cfhsi->
ops = (*get_ops)();
1430 pr_err(
"%s: failed to get the cfhsi_ops\n", __func__);
1437 pr_warn(
"%s: caif_hsi device registration failed\n", __func__);
1451 .priv_size =
sizeof(
struct cfhsi),
1452 .
setup = cfhsi_setup,
1454 .policy = caif_hsi_policy,
1455 .newlink = caif_hsi_newlink,
1456 .changelink = caif_hsi_changelink,
1457 .get_size = caif_hsi_get_size,
1458 .fill_info = caif_hsi_fill_info,
1461 static void __exit cfhsi_exit_module(
void)
1465 struct cfhsi *cfhsi;
1477 static int __init cfhsi_init_module(
void)