Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hwchannel.c
Go to the documentation of this file.
1 /*
2  *
3  * Author Karsten Keil <[email protected]>
4  *
5  * Copyright 2008 by Karsten Keil <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #include <linux/gfp.h>
19 #include <linux/module.h>
20 #include <linux/mISDNhw.h>
21 
22 static void
23 dchannel_bh(struct work_struct *ws)
24 {
25  struct dchannel *dch = container_of(ws, struct dchannel, workq);
26  struct sk_buff *skb;
27  int err;
28 
30  while ((skb = skb_dequeue(&dch->rqueue))) {
31  if (likely(dch->dev.D.peer)) {
32  err = dch->dev.D.recv(dch->dev.D.peer, skb);
33  if (err)
34  dev_kfree_skb(skb);
35  } else
36  dev_kfree_skb(skb);
37  }
38  }
40  if (dch->phfunc)
41  dch->phfunc(dch);
42  }
43 }
44 
45 static void
46 bchannel_bh(struct work_struct *ws)
47 {
48  struct bchannel *bch = container_of(ws, struct bchannel, workq);
49  struct sk_buff *skb;
50  int err;
51 
53  while ((skb = skb_dequeue(&bch->rqueue))) {
54  bch->rcount--;
55  if (likely(bch->ch.peer)) {
56  err = bch->ch.recv(bch->ch.peer, skb);
57  if (err)
58  dev_kfree_skb(skb);
59  } else
60  dev_kfree_skb(skb);
61  }
62  }
63 }
64 
65 int
66 mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67 {
69  ch->maxlen = maxlen;
70  ch->hw = NULL;
71  ch->rx_skb = NULL;
72  ch->tx_skb = NULL;
73  ch->tx_idx = 0;
74  ch->phfunc = phf;
75  skb_queue_head_init(&ch->squeue);
76  skb_queue_head_init(&ch->rqueue);
77  INIT_LIST_HEAD(&ch->dev.bchannels);
78  INIT_WORK(&ch->workq, dchannel_bh);
79  return 0;
80 }
82 
83 int
84 mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
85  unsigned short minlen)
86 {
87  ch->Flags = 0;
88  ch->minlen = minlen;
89  ch->next_minlen = minlen;
90  ch->init_minlen = minlen;
91  ch->maxlen = maxlen;
92  ch->next_maxlen = maxlen;
93  ch->init_maxlen = maxlen;
94  ch->hw = NULL;
95  ch->rx_skb = NULL;
96  ch->tx_skb = NULL;
97  ch->tx_idx = 0;
98  skb_queue_head_init(&ch->rqueue);
99  ch->rcount = 0;
100  ch->next_skb = NULL;
101  INIT_WORK(&ch->workq, bchannel_bh);
102  return 0;
103 }
105 
106 int
108 {
109  if (ch->tx_skb) {
110  dev_kfree_skb(ch->tx_skb);
111  ch->tx_skb = NULL;
112  }
113  if (ch->rx_skb) {
114  dev_kfree_skb(ch->rx_skb);
115  ch->rx_skb = NULL;
116  }
117  skb_queue_purge(&ch->squeue);
118  skb_queue_purge(&ch->rqueue);
119  flush_work(&ch->workq);
120  return 0;
121 }
123 
124 void
126 {
127  if (ch->tx_skb) {
128  dev_kfree_skb(ch->tx_skb);
129  ch->tx_skb = NULL;
130  }
131  ch->tx_idx = 0;
132  if (ch->rx_skb) {
133  dev_kfree_skb(ch->rx_skb);
134  ch->rx_skb = NULL;
135  }
136  if (ch->next_skb) {
137  dev_kfree_skb(ch->next_skb);
138  ch->next_skb = NULL;
139  }
146  ch->dropcnt = 0;
147  ch->minlen = ch->init_minlen;
148  ch->next_minlen = ch->init_minlen;
149  ch->maxlen = ch->init_maxlen;
150  ch->next_maxlen = ch->init_maxlen;
151  skb_queue_purge(&ch->rqueue);
152  ch->rcount = 0;
153 }
155 
156 void
158 {
159  cancel_work_sync(&ch->workq);
161 }
163 
164 int
166 {
167  int ret = 0;
168 
169  switch (cq->op) {
170  case MISDN_CTRL_GETOP:
173  break;
175  if (cq->p1) {
176  memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
178  } else {
180  }
181  break;
182  case MISDN_CTRL_RX_OFF:
183  /* read back dropped byte count */
184  cq->p2 = bch->dropcnt;
185  if (cq->p1)
187  else
189  bch->dropcnt = 0;
190  break;
192  if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
193  bch->next_maxlen = cq->p2;
194  if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
195  bch->next_minlen = cq->p1;
196  /* we return the old values */
197  cq->p1 = bch->minlen;
198  cq->p2 = bch->maxlen;
199  break;
200  default:
201  pr_info("mISDN unhandled control %x operation\n", cq->op);
202  ret = -EINVAL;
203  break;
204  }
205  return ret;
206 }
208 
209 static inline u_int
210 get_sapi_tei(u_char *p)
211 {
212  u_int sapi, tei;
213 
214  sapi = *p >> 2;
215  tei = p[1] >> 1;
216  return sapi | (tei << 8);
217 }
218 
219 void
221 {
222  struct mISDNhead *hh;
223 
224  if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
225  dev_kfree_skb(dch->rx_skb);
226  dch->rx_skb = NULL;
227  return;
228  }
229  hh = mISDN_HEAD_P(dch->rx_skb);
230  hh->prim = PH_DATA_IND;
231  hh->id = get_sapi_tei(dch->rx_skb->data);
232  skb_queue_tail(&dch->rqueue, dch->rx_skb);
233  dch->rx_skb = NULL;
235 }
237 
238 void
239 recv_Echannel(struct dchannel *ech, struct dchannel *dch)
240 {
241  struct mISDNhead *hh;
242 
243  if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
244  dev_kfree_skb(ech->rx_skb);
245  ech->rx_skb = NULL;
246  return;
247  }
248  hh = mISDN_HEAD_P(ech->rx_skb);
249  hh->prim = PH_DATA_E_IND;
250  hh->id = get_sapi_tei(ech->rx_skb->data);
251  skb_queue_tail(&dch->rqueue, ech->rx_skb);
252  ech->rx_skb = NULL;
254 }
256 
257 void
258 recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
259 {
260  struct mISDNhead *hh;
261 
262  /* if allocation did fail upper functions still may call us */
263  if (unlikely(!bch->rx_skb))
264  return;
265  if (unlikely(!bch->rx_skb->len)) {
266  /* we have no data to send - this may happen after recovery
267  * from overflow or too small allocation.
268  * We need to free the buffer here */
269  dev_kfree_skb(bch->rx_skb);
270  bch->rx_skb = NULL;
271  } else {
272  if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
273  (bch->rx_skb->len < bch->minlen) && !force)
274  return;
275  hh = mISDN_HEAD_P(bch->rx_skb);
276  hh->prim = PH_DATA_IND;
277  hh->id = id;
278  if (bch->rcount >= 64) {
280  "B%d receive queue overflow - flushing!\n",
281  bch->nr);
282  skb_queue_purge(&bch->rqueue);
283  }
284  bch->rcount++;
285  skb_queue_tail(&bch->rqueue, bch->rx_skb);
286  bch->rx_skb = NULL;
288  }
289 }
291 
292 void
293 recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
294 {
295  skb_queue_tail(&dch->rqueue, skb);
297 }
299 
300 void
301 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
302 {
303  if (bch->rcount >= 64) {
304  printk(KERN_WARNING "B-channel %p receive queue overflow, "
305  "flushing!\n", bch);
306  skb_queue_purge(&bch->rqueue);
307  bch->rcount = 0;
308  }
309  bch->rcount++;
310  skb_queue_tail(&bch->rqueue, skb);
312 }
314 
315 static void
316 confirm_Dsend(struct dchannel *dch)
317 {
318  struct sk_buff *skb;
319 
320  skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
321  0, NULL, GFP_ATOMIC);
322  if (!skb) {
323  printk(KERN_ERR "%s: no skb id %x\n", __func__,
324  mISDN_HEAD_ID(dch->tx_skb));
325  return;
326  }
327  skb_queue_tail(&dch->rqueue, skb);
329 }
330 
331 int
333 {
334  dch->tx_idx = 0;
335  dch->tx_skb = skb_dequeue(&dch->squeue);
336  if (dch->tx_skb) {
337  confirm_Dsend(dch);
338  return 1;
339  }
340  dch->tx_skb = NULL;
342  return 0;
343 }
345 
346 static void
347 confirm_Bsend(struct bchannel *bch)
348 {
349  struct sk_buff *skb;
350 
351  if (bch->rcount >= 64) {
352  printk(KERN_WARNING "B-channel %p receive queue overflow, "
353  "flushing!\n", bch);
354  skb_queue_purge(&bch->rqueue);
355  bch->rcount = 0;
356  }
357  skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
358  0, NULL, GFP_ATOMIC);
359  if (!skb) {
360  printk(KERN_ERR "%s: no skb id %x\n", __func__,
361  mISDN_HEAD_ID(bch->tx_skb));
362  return;
363  }
364  bch->rcount++;
365  skb_queue_tail(&bch->rqueue, skb);
367 }
368 
369 int
371 {
372  bch->tx_idx = 0;
373  if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
374  bch->tx_skb = bch->next_skb;
375  if (bch->tx_skb) {
376  bch->next_skb = NULL;
378  /* confirm imediately to allow next data */
379  confirm_Bsend(bch);
380  return 1;
381  } else {
383  printk(KERN_WARNING "B TX_NEXT without skb\n");
384  }
385  }
386  bch->tx_skb = NULL;
388  return 0;
389 }
391 
392 void
393 queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
394 {
395  struct mISDNhead *hh;
396 
397  if (!skb) {
398  _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
399  } else {
400  if (ch->peer) {
401  hh = mISDN_HEAD_P(skb);
402  hh->prim = pr;
403  hh->id = id;
404  if (!ch->recv(ch->peer, skb))
405  return;
406  }
407  dev_kfree_skb(skb);
408  }
409 }
411 
412 int
413 dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
414 {
415  /* check oversize */
416  if (skb->len <= 0) {
417  printk(KERN_WARNING "%s: skb too small\n", __func__);
418  return -EINVAL;
419  }
420  if (skb->len > ch->maxlen) {
421  printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
422  __func__, skb->len, ch->maxlen);
423  return -EINVAL;
424  }
425  /* HW lock must be obtained */
426  if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
427  skb_queue_tail(&ch->squeue, skb);
428  return 0;
429  } else {
430  /* write to fifo */
431  ch->tx_skb = skb;
432  ch->tx_idx = 0;
433  return 1;
434  }
435 }
437 
438 int
439 bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
440 {
441 
442  /* check oversize */
443  if (skb->len <= 0) {
444  printk(KERN_WARNING "%s: skb too small\n", __func__);
445  return -EINVAL;
446  }
447  if (skb->len > ch->maxlen) {
448  printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
449  __func__, skb->len, ch->maxlen);
450  return -EINVAL;
451  }
452  /* HW lock must be obtained */
453  /* check for pending next_skb */
454  if (ch->next_skb) {
456  "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
457  __func__, skb->len, ch->next_skb->len);
458  return -EBUSY;
459  }
460  if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
462  ch->next_skb = skb;
463  return 0;
464  } else {
465  /* write to fifo */
466  ch->tx_skb = skb;
467  ch->tx_idx = 0;
468  confirm_Bsend(ch);
469  return 1;
470  }
471 }
473 
474 /* The function allocates a new receive skb on demand with a size for the
475  * requirements of the current protocol. It returns the tailroom of the
476  * receive skb or an error.
477  */
478 int
479 bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
480 {
481  int len;
482 
483  if (bch->rx_skb) {
484  len = skb_tailroom(bch->rx_skb);
485  if (len < reqlen) {
486  pr_warning("B%d no space for %d (only %d) bytes\n",
487  bch->nr, reqlen, len);
488  if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
489  /* send what we have now and try a new buffer */
490  recv_Bchannel(bch, 0, true);
491  } else {
492  /* on HDLC we have to drop too big frames */
493  return -EMSGSIZE;
494  }
495  } else {
496  return len;
497  }
498  }
499  /* update current min/max length first */
500  if (unlikely(bch->maxlen != bch->next_maxlen))
501  bch->maxlen = bch->next_maxlen;
502  if (unlikely(bch->minlen != bch->next_minlen))
503  bch->minlen = bch->next_minlen;
504  if (unlikely(reqlen > bch->maxlen))
505  return -EMSGSIZE;
506  if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
507  if (reqlen >= bch->minlen) {
508  len = reqlen;
509  } else {
510  len = 2 * bch->minlen;
511  if (len > bch->maxlen)
512  len = bch->maxlen;
513  }
514  } else {
515  /* with HDLC we do not know the length yet */
516  len = bch->maxlen;
517  }
518  bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
519  if (!bch->rx_skb) {
520  pr_warning("B%d receive no memory for %d bytes\n",
521  bch->nr, len);
522  len = -ENOMEM;
523  }
524  return len;
525 }