7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
11 #include <linux/module.h>
12 #include <linux/sched.h>
15 #include <linux/list.h>
16 #include <linux/wait.h>
17 #include <linux/poll.h>
18 #include <linux/tcp.h>
43 #define TX_FLOW_ON_BIT 1
44 #define RX_FLOW_ON_BIT 2
56 static int rx_flow_is_on(
struct caifsock *cf_sk)
62 static int tx_flow_is_on(
struct caifsock *cf_sk)
68 static void set_rx_flow_off(
struct caifsock *cf_sk)
74 static void set_rx_flow_on(
struct caifsock *cf_sk)
80 static void set_tx_flow_off(
struct caifsock *cf_sk)
86 static void set_tx_flow_on(
struct caifsock *cf_sk)
92 static void caif_read_lock(
struct sock *
sk)
99 static void caif_read_unlock(
struct sock *
sk)
106 static int sk_rcvbuf_lowwater(
struct caifsock *cf_sk)
109 return cf_sk->
sk.sk_rcvbuf / 4;
112 static void caif_flow_ctrl(
struct sock *
sk,
int mode)
116 if (cf_sk->
layer.dn && cf_sk->
layer.dn->modemcmd)
117 cf_sk->
layer.dn->modemcmd(cf_sk->
layer.dn, mode);
124 static int caif_queue_rcv_skb(
struct sock *sk,
struct sk_buff *
skb)
133 (
unsigned int)sk->
sk_rcvbuf && rx_flow_is_on(cf_sk)) {
136 sk_rcvbuf_lowwater(cf_sk));
137 set_rx_flow_off(cf_sk);
144 if (!sk_rmem_schedule(sk, skb, skb->
truesize) && rx_flow_is_on(cf_sk)) {
145 set_rx_flow_off(cf_sk);
150 skb_set_owner_r(skb, sk);
159 __skb_queue_tail(list, skb);
160 spin_unlock_irqrestore(&list->
lock, flags);
170 static int caif_sktrecv_cb(
struct cflayer *layr,
struct cfpkt *pkt)
182 caif_queue_rcv_skb(&cf_sk->
sk, skb);
186 static void cfsk_hold(
struct cflayer *layr)
189 sock_hold(&cf_sk->
sk);
192 static void cfsk_put(
struct cflayer *layr)
195 sock_put(&cf_sk->
sk);
199 static void caif_ctrl_cb(
struct cflayer *layr,
207 set_tx_flow_on(cf_sk);
208 cf_sk->
sk.sk_state_change(&cf_sk->
sk);
213 set_tx_flow_off(cf_sk);
214 cf_sk->
sk.sk_state_change(&cf_sk->
sk);
220 cfsk_hold, cfsk_put);
222 set_tx_flow_on(cf_sk);
223 cf_sk->
sk.sk_shutdown = 0;
224 cf_sk->
sk.sk_state_change(&cf_sk->
sk);
230 cf_sk->
sk.sk_state_change(&cf_sk->
sk);
242 set_tx_flow_on(cf_sk);
243 cf_sk->
sk.sk_state_change(&cf_sk->
sk);
250 set_rx_flow_on(cf_sk);
251 cf_sk->
sk.sk_error_report(&cf_sk->
sk);
255 pr_debug(
"Unexpected flow command %d\n", flow);
259 static void caif_check_flow_release(
struct sock *sk)
263 if (rx_flow_is_on(cf_sk))
266 if (
atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
267 set_rx_flow_on(cf_sk);
277 struct msghdr *
m,
size_t len,
int flags)
280 struct sock *sk = sock->
sk;
305 caif_check_flow_release(sk);
314 static long caif_stream_data_wait(
struct sock *sk,
long timeo)
348 static int caif_stream_recvmsg(
struct kiocb *iocb,
struct socket *sock,
352 struct sock *sk = sock->
sk;
373 target = sock_rcvlowat(sk, flags&
MSG_WAITALL, size);
382 caif_check_flow_release(sk);
385 if (copied >= target)
390 err = sock_error(sk);
409 caif_read_unlock(sk);
411 timeo = caif_stream_data_wait(sk, timeo);
414 err = sock_intr_errno(timeo);
424 chunk =
min_t(
unsigned int, skb->
len, size);
454 caif_read_unlock(sk);
457 return copied ? :
err;
464 static long caif_wait_for_flow_on(
struct caifsock *cf_sk,
465 int wait_writeable,
long timeo,
int *err)
467 struct sock *sk = &cf_sk->
sk;
471 if (tx_flow_is_on(cf_sk) &&
472 (!wait_writeable || sock_writeable(&cf_sk->
sk)))
501 int noblock,
long timeo)
514 return cf_sk->
layer.dn->transmit(cf_sk->
layer.dn, pkt);
519 struct msghdr *msg,
size_t len)
521 struct sock *sk = sock->
sk;
529 ret = sock_error(sk);
546 timeo = sock_sndtimeo(sk, noblock);
568 if (!skb || skb_tailroom(skb) < buffer_size)
577 ret = transmit_skb(skb, cf_sk, noblock, timeo);
593 static int caif_stream_sendmsg(
struct kiocb *kiocb,
struct socket *sock,
594 struct msghdr *msg,
size_t len)
596 struct sock *sk = sock->
sk;
611 timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
646 size =
min_t(
int, size, skb_tailroom(skb));
653 err = transmit_skb(skb, cf_sk,
672 static int setsockopt(
struct socket *sock,
673 int lvl,
int opt,
char __user *ov,
unsigned int ol)
675 struct sock *sk = sock->
sk;
684 if (ol <
sizeof(
int))
690 lock_sock(&(cf_sk->
sk));
691 cf_sk->
conn_req.link_selector = linksel;
700 lock_sock(&(cf_sk->
sk));
701 if (ol >
sizeof(cf_sk->
conn_req.param.data) ||
745 int addr_len,
int flags)
747 struct sock *sk = sock->
sk;
761 switch (sock->
state) {
767 switch (sk->sk_state) {
821 cf_sk->
conn_req.priority = cf_sk->
sk.sk_priority;
824 cf_sk->
conn_req.ifindex = cf_sk->
sk.sk_bound_dev_if;
826 cf_sk->
layer.receive = caif_sktrecv_cb;
829 &cf_sk->
layer, &ifindex, &headroom, &tailroom);
849 cf_sk->
maxframe = mtu - (headroom + tailroom);
851 pr_warn(
"CAIF Interface MTU too small (%d)\n", dev->
mtu);
862 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
878 err = sock_error(sk);
894 static int caif_release(
struct socket *sock)
896 struct sock *sk = sock->
sk;
902 set_tx_flow_off(cf_sk);
918 lock_sock(&(cf_sk->
sk));
934 static unsigned int caif_poll(
struct file *
file,
937 struct sock *sk = sock->
sk;
941 sock_poll_wait(file, sk_sleep(sk), wait);
961 if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
967 static const struct proto_ops caif_seqpacket_ops = {
970 .release = caif_release,
972 .connect = caif_connect,
982 .sendmsg = caif_seqpkt_sendmsg,
983 .recvmsg = caif_seqpkt_recvmsg,
988 static const struct proto_ops caif_stream_ops = {
991 .release = caif_release,
993 .connect = caif_connect,
1003 .sendmsg = caif_stream_sendmsg,
1004 .recvmsg = caif_stream_recvmsg,
1010 static void caif_sock_destructor(
struct sock *sk)
1017 pr_debug(
"Attempt to release alive CAIF socket: %p\n", sk);
1027 struct sock *sk =
NULL;
1029 static struct proto prot = {.
name =
"PF_CAIF",
1031 .obj_size =
sizeof(
struct caifsock),
1043 sock->
ops = &caif_seqpacket_ops;
1045 sock->
ops = &caif_stream_ops;
1081 lock_sock(&(cf_sk->
sk));
1088 cf_sk->
layer.ctrlcmd = caif_ctrl_cb;
1092 set_tx_flow_off(cf_sk);
1093 set_rx_flow_on(cf_sk);
1105 .create = caif_create,
1109 static int __init caif_sktinit_module(
void)
1117 static void __exit caif_sktexit_module(
void)