44 #define MAX_PKT_DEFAULT_MCAST 1500
46 #define BCLINK_WIN_DEFAULT 20
106 static struct tipc_link *bcl = &bcast_link.link;
121 static void bcbuf_set_acks(
struct sk_buff *buf,
u32 acks)
123 TIPC_SKB_CB(buf)->handle = (
void *)(
unsigned long)acks;
126 static void bcbuf_decr_acks(
struct sk_buff *buf)
128 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
133 spin_lock_bh(&bc_lock);
135 spin_unlock_bh(&bc_lock);
140 spin_lock_bh(&bc_lock);
142 spin_unlock_bh(&bc_lock);
145 static void bclink_set_last_sent(
void)
160 node->
bclink.last_sent = less_eq(node->
bclink.last_sent, seqno) ?
161 seqno : node->
bclink.last_sent;
182 static void bclink_retransmit_pkt(
u32 after,
u32 to)
187 while (buf && less_eq(buf_seqno(buf), after))
203 unsigned int released = 0;
205 spin_lock_bh(&bc_lock);
228 if (less(acked, buf_seqno(crs)) ||
230 less_eq(acked, n_ptr->
bclink.acked))
235 while (crs && less_eq(buf_seqno(crs), n_ptr->
bclink.acked))
240 while (crs && less_eq(buf_seqno(crs), acked)) {
244 bcbuf_decr_acks(crs);
246 bcbuf_set_acks(crs, 0);
248 bclink_set_last_sent();
251 if (bcbuf_acks(crs) == 0) {
259 n_ptr->
bclink.acked = acked;
265 bclink_set_last_sent();
270 spin_unlock_bh(&bc_lock);
284 if (less_eq(last_sent, n_ptr->
bclink.last_in))
289 bclink_update_last_sent(n_ptr, last_sent);
296 if ((++n_ptr->
bclink.oos_state) == 1) {
299 n_ptr->
bclink.oos_state++;
304 if (n_ptr->
bclink.oos_state & 0x1)
315 msg_set_non_seq(msg, 1);
316 msg_set_mc_netid(msg, tipc_net_id);
317 msg_set_bcast_ack(msg, n_ptr->
bclink.last_in);
318 msg_set_bcgap_after(msg, n_ptr->
bclink.last_in);
319 msg_set_bcgap_to(msg, n_ptr->
bclink.deferred_head
320 ? buf_seqno(n_ptr->
bclink.deferred_head) - 1
321 : n_ptr->
bclink.last_sent);
323 spin_lock_bh(&bc_lock);
324 tipc_bearer_send(&bcbearer->
bearer, buf,
NULL);
325 bcl->
stats.sent_nacks++;
326 spin_unlock_bh(&bc_lock);
329 n_ptr->
bclink.oos_state++;
348 tipc_node_lock(n_ptr);
350 if (n_ptr->
bclink.supported &&
352 (n_ptr->
bclink.last_in == msg_bcgap_after(msg)))
353 n_ptr->
bclink.oos_state = 2;
355 tipc_node_unlock(n_ptr);
365 spin_lock_bh(&bc_lock);
368 res = msg_data_sz(buf_msg(buf));
375 bclink_set_last_sent();
376 bcl->
stats.queue_sz_counts++;
380 spin_unlock_bh(&bc_lock);
391 bclink_update_last_sent(node, seqno);
393 node->
bclink.oos_state = 0;
394 bcl->
stats.recv_info++;
405 bcl->
stats.sent_acks++;
416 struct tipc_msg *msg = buf_msg(buf);
424 if (msg_mc_netid(msg) != tipc_net_id)
431 tipc_node_lock(node);
440 if (msg_destnode(msg) == tipc_own_addr) {
442 tipc_node_unlock(node);
443 spin_lock_bh(&bc_lock);
444 bcl->
stats.recv_nacks++;
446 bclink_retransmit_pkt(msg_bcgap_after(msg),
448 spin_unlock_bh(&bc_lock);
450 tipc_node_unlock(node);
451 bclink_peek_nack(msg);
458 seqno = msg_seqno(msg);
461 if (
likely(seqno == next_in)) {
465 if (
likely(msg_isdata(msg))) {
466 spin_lock_bh(&bc_lock);
467 bclink_accept_pkt(node, seqno);
468 spin_unlock_bh(&bc_lock);
469 tipc_node_unlock(node);
470 if (
likely(msg_mcast(msg)))
475 spin_lock_bh(&bc_lock);
476 bclink_accept_pkt(node, seqno);
477 bcl->
stats.recv_bundles++;
478 bcl->
stats.recv_bundled += msg_msgcnt(msg);
479 spin_unlock_bh(&bc_lock);
480 tipc_node_unlock(node);
487 spin_lock_bh(&bc_lock);
488 bclink_accept_pkt(node, seqno);
489 bcl->
stats.recv_fragments++;
491 bcl->
stats.recv_fragmented++;
492 spin_unlock_bh(&bc_lock);
493 tipc_node_unlock(node);
496 spin_lock_bh(&bc_lock);
497 bclink_accept_pkt(node, seqno);
498 spin_unlock_bh(&bc_lock);
499 tipc_node_unlock(node);
502 spin_lock_bh(&bc_lock);
503 bclink_accept_pkt(node, seqno);
504 spin_unlock_bh(&bc_lock);
505 tipc_node_unlock(node);
512 tipc_node_lock(node);
519 if (!node->
bclink.deferred_head) {
520 node->
bclink.oos_state = 1;
524 msg = buf_msg(node->
bclink.deferred_head);
525 seqno = msg_seqno(msg);
526 next_in =
mod(next_in + 1);
527 if (seqno != next_in)
532 buf = node->
bclink.deferred_head;
534 node->
bclink.deferred_size--;
540 if (less(next_in, seqno)) {
542 &node->
bclink.deferred_tail,
544 node->
bclink.deferred_size += deferred;
545 bclink_update_last_sent(node, seqno);
550 spin_lock_bh(&bc_lock);
553 bcl->
stats.deferred_recv++;
555 bcl->
stats.duplicates++;
557 spin_unlock_bh(&bc_lock);
560 tipc_node_unlock(node);
567 return (n_ptr->
bclink.supported &&
581 static int tipc_bcbearer_send(
struct sk_buff *buf,
593 if (
likely(!msg_non_seq(buf_msg(buf)))) {
598 msg_set_non_seq(msg, 1);
599 msg_set_mc_netid(msg, tipc_net_id);
600 bcl->
stats.sent_info++;
611 for (bp_index = 0; bp_index <
MAX_BEARERS; bp_index++) {
623 p->
media->send_msg(buf, p, &p->
media->bcast_addr)) {
626 s->
media->send_msg(buf, s,
627 &s->
media->bcast_addr)) {
634 bcbearer->
bpairs[bp_index].primary =
s;
635 bcbearer->
bpairs[bp_index].secondary =
p;
657 spin_lock_bh(&bc_lock);
662 for (b_index = 0; b_index <
MAX_BEARERS; b_index++) {
675 bp_curr = bcbearer->
bpairs;
685 if (bp_temp[pri].secondary) {
686 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
698 spin_unlock_bh(&bc_lock);
710 spin_lock_bh(&bc_lock);
715 " Window:%u packets\n",
718 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
723 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
728 " RX naks:%u defs:%u dups:%u\n",
731 " TX naks:%u acks:%u dups:%u\n",
734 " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
739 spin_unlock_bh(&bc_lock);
748 spin_lock_bh(&bc_lock);
750 spin_unlock_bh(&bc_lock);
761 spin_lock_bh(&bc_lock);
763 spin_unlock_bh(&bc_lock);
769 INIT_LIST_HEAD(&bcbearer->
bearer.cong_links);
771 bcbearer->
media.send_msg = tipc_bcbearer_send;
787 spin_lock_bh(&bc_lock);
789 spin_unlock_bh(&bc_lock);
791 memset(bclink, 0,
sizeof(*bclink));
792 memset(bcbearer, 0,
sizeof(*bcbearer));
805 if ((nm_ptr->
map[w] & mask) == 0) {
820 if ((nm_ptr->
map[w] & mask) != 0) {
821 nm_ptr->
map[
w] &= ~mask;
841 memset(nm_diff, 0,
sizeof(*nm_diff));
842 for (w = 0; w <
stop; w++) {
846 for (b = 0 ; b <
WSIZE; b++) {
864 for (; ; cnt -= item_sz, item = item->
next) {
867 for (i = 0; i < item_sz; i++)
868 if (item->ports[i] == port)
871 item->ports[
i] =
port;
878 pr_warn(
"Incomplete multicast delivery, no memory\n");
881 item->next->next =
NULL;
895 for (item = pl_ptr->
next; item; item = next) {