Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ozpd.c
Go to the documentation of this file.
1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include "ozconfig.h"
13 #include "ozprotocol.h"
14 #include "ozeltbuf.h"
15 #include "ozpd.h"
16 #include "ozproto.h"
17 #include "oztrace.h"
18 #include "ozevent.h"
19 #include "ozcdev.h"
20 #include "ozusbsvc.h"
21 #include <asm/unaligned.h>
22 #include <linux/uaccess.h>
23 #include <net/psnap.h>
24 /*------------------------------------------------------------------------------
25  */
26 #define OZ_MAX_TX_POOL_SIZE 6
27 /*------------------------------------------------------------------------------
28  */
29 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
30 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
31 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
32 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
33 static int oz_send_isoc_frame(struct oz_pd *pd);
34 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
35 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
36 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
37 static void oz_isoc_destructor(struct sk_buff *skb);
38 static int oz_def_app_init(void);
39 static void oz_def_app_term(void);
40 static int oz_def_app_start(struct oz_pd *pd, int resume);
41 static void oz_def_app_stop(struct oz_pd *pd, int pause);
42 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
43 /*------------------------------------------------------------------------------
44  * Counts the uncompleted isoc frames submitted to netcard.
45  */
46 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
47 /* Application handler functions.
48  */
49 static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
50  {oz_usb_init,
54  oz_usb_rx,
57  OZ_APPID_USB},
58 
59  {oz_def_app_init,
60  oz_def_app_term,
61  oz_def_app_start,
62  oz_def_app_stop,
63  oz_def_app_rx,
64  0,
65  0,
67 
68  {oz_def_app_init,
69  oz_def_app_term,
70  oz_def_app_start,
71  oz_def_app_stop,
72  oz_def_app_rx,
73  0,
74  0,
76 
77  {oz_cdev_init,
81  oz_cdev_rx,
82  0,
83  0,
85 };
86 /*------------------------------------------------------------------------------
87  * Context: process
88  */
89 static int oz_def_app_init(void)
90 {
91  return 0;
92 }
93 /*------------------------------------------------------------------------------
94  * Context: process
95  */
96 static void oz_def_app_term(void)
97 {
98 }
99 /*------------------------------------------------------------------------------
100  * Context: softirq
101  */
102 static int oz_def_app_start(struct oz_pd *pd, int resume)
103 {
104  return 0;
105 }
106 /*------------------------------------------------------------------------------
107  * Context: softirq
108  */
109 static void oz_def_app_stop(struct oz_pd *pd, int pause)
110 {
111 }
112 /*------------------------------------------------------------------------------
113  * Context: softirq
114  */
115 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
116 {
117 }
118 /*------------------------------------------------------------------------------
119  * Context: softirq or process
120  */
121 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
122 {
123  pd->state = state;
124  oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
125 #ifdef WANT_TRACE
126  switch (state) {
127  case OZ_PD_S_IDLE:
128  oz_trace("PD State: OZ_PD_S_IDLE\n");
129  break;
130  case OZ_PD_S_CONNECTED:
131  oz_trace("PD State: OZ_PD_S_CONNECTED\n");
132  break;
133  case OZ_PD_S_STOPPED:
134  oz_trace("PD State: OZ_PD_S_STOPPED\n");
135  break;
136  case OZ_PD_S_SLEEP:
137  oz_trace("PD State: OZ_PD_S_SLEEP\n");
138  break;
139  }
140 #endif /* WANT_TRACE */
141 }
142 /*------------------------------------------------------------------------------
143  * Context: softirq or process
144  */
145 void oz_pd_get(struct oz_pd *pd)
146 {
147  atomic_inc(&pd->ref_count);
148 }
149 /*------------------------------------------------------------------------------
150  * Context: softirq or process
151  */
152 void oz_pd_put(struct oz_pd *pd)
153 {
154  if (atomic_dec_and_test(&pd->ref_count))
155  oz_pd_destroy(pd);
156 }
157 /*------------------------------------------------------------------------------
158  * Context: softirq-serialized
159  */
161 {
162  struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
163  if (pd) {
164  int i;
165  atomic_set(&pd->ref_count, 2);
166  for (i = 0; i < OZ_APPID_MAX; i++)
167  spin_lock_init(&pd->app_lock[i]);
168  pd->last_rx_pkt_num = 0xffffffff;
171  memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
172  if (0 != oz_elt_buf_init(&pd->elt_buff)) {
173  kfree(pd);
174  pd = 0;
175  }
177  INIT_LIST_HEAD(&pd->tx_queue);
178  INIT_LIST_HEAD(&pd->farewell_list);
179  pd->last_sent_frame = &pd->tx_queue;
181  INIT_LIST_HEAD(&pd->stream_list);
182  }
183  return pd;
184 }
185 /*------------------------------------------------------------------------------
186  * Context: softirq or process
187  */
188 void oz_pd_destroy(struct oz_pd *pd)
189 {
190  struct list_head *e;
191  struct oz_tx_frame *f;
192  struct oz_isoc_stream *st;
193  struct oz_farewell *fwell;
194  oz_trace("Destroying PD\n");
195  /* Delete any streams.
196  */
197  e = pd->stream_list.next;
198  while (e != &pd->stream_list) {
199  st = container_of(e, struct oz_isoc_stream, link);
200  e = e->next;
201  oz_isoc_stream_free(st);
202  }
203  /* Free any queued tx frames.
204  */
205  e = pd->tx_queue.next;
206  while (e != &pd->tx_queue) {
207  f = container_of(e, struct oz_tx_frame, link);
208  e = e->next;
209  if (f->skb != NULL)
210  kfree_skb(f->skb);
211  oz_retire_frame(pd, f);
212  }
214  /* Free any farewells.
215  */
216  e = pd->farewell_list.next;
217  while (e != &pd->farewell_list) {
218  fwell = container_of(e, struct oz_farewell, link);
219  e = e->next;
220  kfree(fwell);
221  }
222  /* Deallocate all frames in tx pool.
223  */
224  while (pd->tx_pool) {
225  e = pd->tx_pool;
226  pd->tx_pool = e->next;
227  kfree(container_of(e, struct oz_tx_frame, link));
228  }
229  if (pd->net_dev)
230  dev_put(pd->net_dev);
231  kfree(pd);
232 }
233 /*------------------------------------------------------------------------------
234  * Context: softirq-serialized
235  */
236 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
237 {
238  struct oz_app_if *ai;
239  int rc = 0;
240  oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
241  for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
242  if (apps & (1<<ai->app_id)) {
243  if (ai->start(pd, resume)) {
244  rc = -1;
245  oz_trace("Unabled to start service %d\n",
246  ai->app_id);
247  break;
248  }
250  pd->total_apps |= (1<<ai->app_id);
251  if (resume)
252  pd->paused_apps &= ~(1<<ai->app_id);
254  }
255  }
256  return rc;
257 }
258 /*------------------------------------------------------------------------------
259  * Context: softirq or process
260  */
261 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
262 {
263  struct oz_app_if *ai;
264  oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
265  for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
266  if (apps & (1<<ai->app_id)) {
268  if (pause) {
269  pd->paused_apps |= (1<<ai->app_id);
270  } else {
271  pd->total_apps &= ~(1<<ai->app_id);
272  pd->paused_apps &= ~(1<<ai->app_id);
273  }
275  ai->stop(pd, pause);
276  }
277  }
278 }
279 /*------------------------------------------------------------------------------
280  * Context: softirq
281  */
282 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
283 {
284  struct oz_app_if *ai;
285  int more = 0;
286  for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
287  if (ai->heartbeat && (apps & (1<<ai->app_id))) {
288  if (ai->heartbeat(pd))
289  more = 1;
290  }
291  }
292  if (more)
294  if (pd->mode & OZ_F_ISOC_ANYTIME) {
295  int count = 8;
296  while (count-- && (oz_send_isoc_frame(pd) >= 0))
297  ;
298  }
299 }
300 /*------------------------------------------------------------------------------
301  * Context: softirq or process
302  */
303 void oz_pd_stop(struct oz_pd *pd)
304 {
305  u16 stop_apps = 0;
306  oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
309  stop_apps = pd->total_apps;
310  pd->total_apps = 0;
311  pd->paused_apps = 0;
313  oz_services_stop(pd, stop_apps, 0);
316  /* Remove from PD list.*/
317  list_del(&pd->link);
319  oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
320  oz_timer_delete(pd, 0);
321  oz_pd_put(pd);
322 }
323 /*------------------------------------------------------------------------------
324  * Context: softirq
325  */
326 int oz_pd_sleep(struct oz_pd *pd)
327 {
328  int do_stop = 0;
329  u16 stop_apps = 0;
331  if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
333  return 0;
334  }
335  if (pd->keep_alive_j && pd->session_id) {
337  pd->pulse_time_j = jiffies + pd->keep_alive_j;
338  oz_trace("Sleep Now %lu until %lu\n",
339  jiffies, pd->pulse_time_j);
340  } else {
341  do_stop = 1;
342  }
343  stop_apps = pd->total_apps;
345  if (do_stop) {
346  oz_pd_stop(pd);
347  } else {
348  oz_services_stop(pd, stop_apps, 1);
350  }
351  return do_stop;
352 }
353 /*------------------------------------------------------------------------------
354  * Context: softirq
355  */
356 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
357 {
358  struct oz_tx_frame *f = 0;
359  spin_lock_bh(&pd->tx_frame_lock);
360  if (pd->tx_pool) {
361  f = container_of(pd->tx_pool, struct oz_tx_frame, link);
362  pd->tx_pool = pd->tx_pool->next;
363  pd->tx_pool_count--;
364  }
365  spin_unlock_bh(&pd->tx_frame_lock);
366  if (f == 0)
367  f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
368  if (f) {
369  f->total_size = sizeof(struct oz_hdr);
370  INIT_LIST_HEAD(&f->link);
371  INIT_LIST_HEAD(&f->elt_list);
372  }
373  return f;
374 }
375 /*------------------------------------------------------------------------------
376  * Context: softirq or process
377  */
378 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
379 {
380  pd->nb_queued_isoc_frames--;
381  list_del_init(&f->link);
383  f->link.next = pd->tx_pool;
384  pd->tx_pool = &f->link;
385  pd->tx_pool_count++;
386  } else {
387  kfree(f);
388  }
389  oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
391 }
392 /*------------------------------------------------------------------------------
393  * Context: softirq or process
394  */
395 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
396 {
397  spin_lock_bh(&pd->tx_frame_lock);
399  f->link.next = pd->tx_pool;
400  pd->tx_pool = &f->link;
401  pd->tx_pool_count++;
402  f = 0;
403  }
404  spin_unlock_bh(&pd->tx_frame_lock);
405  if (f)
406  kfree(f);
407 }
408 /*------------------------------------------------------------------------------
409  * Context: softirq-serialized
410  */
412 {
413  struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
414  oz_hdr->control |= OZ_F_MORE_DATA;
415 }
416 /*------------------------------------------------------------------------------
417  * Context: softirq-serialized
418  */
419 void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
420 {
421  struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
423 }
424 /*------------------------------------------------------------------------------
425  * Context: softirq
426  */
427 int oz_prepare_frame(struct oz_pd *pd, int empty)
428 {
429  struct oz_tx_frame *f;
430  if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
431  return -1;
433  return -1;
434  if (!empty && !oz_are_elts_available(&pd->elt_buff))
435  return -1;
436  f = oz_tx_frame_alloc(pd);
437  if (f == 0)
438  return -1;
439  f->skb = NULL;
440  f->hdr.control =
442  ++pd->last_tx_pkt_num;
443  put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
444  if (empty == 0) {
446  pd->max_tx_size, &f->elt_list);
447  }
448  spin_lock(&pd->tx_frame_lock);
449  list_add_tail(&f->link, &pd->tx_queue);
450  pd->nb_queued_frames++;
451  spin_unlock(&pd->tx_frame_lock);
452  return 0;
453 }
454 /*------------------------------------------------------------------------------
455  * Context: softirq-serialized
456  */
457 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
458 {
459  struct sk_buff *skb = 0;
460  struct net_device *dev = pd->net_dev;
461  struct oz_hdr *oz_hdr;
462  struct oz_elt *elt;
463  struct list_head *e;
464  /* Allocate skb with enough space for the lower layers as well
465  * as the space we need.
466  */
467  skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
468  if (skb == 0)
469  return 0;
470  /* Reserve the head room for lower layers.
471  */
472  skb_reserve(skb, LL_RESERVED_SPACE(dev));
473  skb_reset_network_header(skb);
474  skb->dev = dev;
475  skb->protocol = htons(OZ_ETHERTYPE);
476  if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
477  dev->dev_addr, skb->len) < 0)
478  goto fail;
479  /* Push the tail to the end of the area we are going to copy to.
480  */
481  oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
482  f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
483  memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
484  /* Copy the elements into the frame body.
485  */
486  elt = (struct oz_elt *)(oz_hdr+1);
487  for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
488  struct oz_elt_info *ei;
489  ei = container_of(e, struct oz_elt_info, link);
490  memcpy(elt, ei->data, ei->length);
491  elt = oz_next_elt(elt);
492  }
493  return skb;
494 fail:
495  kfree_skb(skb);
496  return 0;
497 }
498 /*------------------------------------------------------------------------------
499  * Context: softirq or process
500  */
501 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
502 {
503  struct list_head *e;
504  struct oz_elt_info *ei;
505  e = f->elt_list.next;
506  while (e != &f->elt_list) {
507  ei = container_of(e, struct oz_elt_info, link);
508  e = e->next;
509  list_del_init(&ei->link);
510  if (ei->callback)
511  ei->callback(pd, ei->context);
512  spin_lock_bh(&pd->elt_buff.lock);
513  oz_elt_info_free(&pd->elt_buff, ei);
514  spin_unlock_bh(&pd->elt_buff.lock);
515  }
516  oz_tx_frame_free(pd, f);
517  if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
519 }
520 /*------------------------------------------------------------------------------
521  * Context: softirq-serialized
522  */
523 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
524 {
525  struct sk_buff *skb;
526  struct oz_tx_frame *f;
527  struct list_head *e;
528  spin_lock(&pd->tx_frame_lock);
529  e = pd->last_sent_frame->next;
530  if (e == &pd->tx_queue) {
531  spin_unlock(&pd->tx_frame_lock);
532  return -1;
533  }
534  f = container_of(e, struct oz_tx_frame, link);
535 
536  if (f->skb != NULL) {
537  skb = f->skb;
538  oz_tx_isoc_free(pd, f);
539  spin_unlock(&pd->tx_frame_lock);
540  if (more_data)
541  oz_set_more_bit(skb);
542  oz_set_last_pkt_nb(pd, skb);
543  if ((int)atomic_read(&g_submitted_isoc) <
545  if (dev_queue_xmit(skb) < 0) {
547  "Dropping ISOC Frame\n");
548  oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
549  return -1;
550  }
551  atomic_inc(&g_submitted_isoc);
553  "Sending ISOC Frame, nb_isoc= %d\n",
555  return 0;
556  } else {
557  kfree_skb(skb);
558  oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
559  oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
560  return -1;
561  }
562  }
563 
564  pd->last_sent_frame = e;
565  skb = oz_build_frame(pd, f);
566  spin_unlock(&pd->tx_frame_lock);
567  if (more_data)
568  oz_set_more_bit(skb);
569  oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
570  if (skb) {
572  0,
573  (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
574  0, f->hdr.pkt_num);
575  if (dev_queue_xmit(skb) < 0)
576  return -1;
577 
578  }
579  return 0;
580 }
581 /*------------------------------------------------------------------------------
582  * Context: softirq-serialized
583  */
584 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
585 {
586  while (oz_prepare_frame(pd, 0) >= 0)
587  backlog++;
588 
589  switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
590 
591  case OZ_F_ISOC_NO_ELTS: {
592  backlog += pd->nb_queued_isoc_frames;
593  if (backlog <= 0)
594  goto out;
595  if (backlog > OZ_MAX_SUBMITTED_ISOC)
596  backlog = OZ_MAX_SUBMITTED_ISOC;
597  break;
598  }
599  case OZ_NO_ELTS_ANYTIME: {
600  if ((backlog <= 0) && (pd->isoc_sent == 0))
601  goto out;
602  break;
603  }
604  default: {
605  if (backlog <= 0)
606  goto out;
607  break;
608  }
609  }
610  while (backlog--) {
611  if (oz_send_next_queued_frame(pd, backlog) < 0)
612  break;
613  }
614  return;
615 
616 out: oz_prepare_frame(pd, 1);
617  oz_send_next_queued_frame(pd, 0);
618 }
619 /*------------------------------------------------------------------------------
620  * Context: softirq
621  */
622 static int oz_send_isoc_frame(struct oz_pd *pd)
623 {
624  struct sk_buff *skb = 0;
625  struct net_device *dev = pd->net_dev;
626  struct oz_hdr *oz_hdr;
627  struct oz_elt *elt;
628  struct list_head *e;
629  struct list_head list;
630  int total_size = sizeof(struct oz_hdr);
631  INIT_LIST_HEAD(&list);
632 
633  oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
634  pd->max_tx_size, &list);
635  if (list.next == &list)
636  return 0;
637  skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
638  if (skb == 0) {
639  oz_trace("Cannot alloc skb\n");
641  return -1;
642  }
643  skb_reserve(skb, LL_RESERVED_SPACE(dev));
644  skb_reset_network_header(skb);
645  skb->dev = dev;
646  skb->protocol = htons(OZ_ETHERTYPE);
647  if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
648  dev->dev_addr, skb->len) < 0) {
649  kfree_skb(skb);
650  return -1;
651  }
652  oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
655  elt = (struct oz_elt *)(oz_hdr+1);
656 
657  for (e = list.next; e != &list; e = e->next) {
658  struct oz_elt_info *ei;
659  ei = container_of(e, struct oz_elt_info, link);
660  memcpy(elt, ei->data, ei->length);
661  elt = oz_next_elt(elt);
662  }
663  oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
664  dev_queue_xmit(skb);
666  return 0;
667 }
668 /*------------------------------------------------------------------------------
669  * Context: softirq-serialized
670  */
671 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
672 {
673  struct list_head *e;
674  struct oz_tx_frame *f;
675  struct list_head *first = 0;
676  struct list_head *last = 0;
677  u8 diff;
678  u32 pkt_num;
679 
680  spin_lock(&pd->tx_frame_lock);
681  e = pd->tx_queue.next;
682  while (e != &pd->tx_queue) {
683  f = container_of(e, struct oz_tx_frame, link);
684  pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
685  diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
686  if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
687  break;
688  oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
689  pkt_num, pd->nb_queued_frames);
690  if (first == 0)
691  first = e;
692  last = e;
693  e = e->next;
694  pd->nb_queued_frames--;
695  }
696  if (first) {
697  last->next->prev = &pd->tx_queue;
698  pd->tx_queue.next = last->next;
699  last->next = 0;
700  }
701  pd->last_sent_frame = &pd->tx_queue;
702  spin_unlock(&pd->tx_frame_lock);
703  while (first) {
704  f = container_of(first, struct oz_tx_frame, link);
705  first = first->next;
706  oz_retire_frame(pd, f);
707  }
708 }
709 /*------------------------------------------------------------------------------
710  * Precondition: stream_lock must be held.
711  * Context: softirq
712  */
713 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
714 {
715  struct list_head *e;
716  struct oz_isoc_stream *st;
717  list_for_each(e, &pd->stream_list) {
718  st = container_of(e, struct oz_isoc_stream, link);
719  if (st->ep_num == ep_num)
720  return st;
721  }
722  return 0;
723 }
724 /*------------------------------------------------------------------------------
725  * Context: softirq
726  */
727 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
728 {
729  struct oz_isoc_stream *st =
730  kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
731  if (!st)
732  return -ENOMEM;
733  st->ep_num = ep_num;
734  spin_lock_bh(&pd->stream_lock);
735  if (!pd_stream_find(pd, ep_num)) {
736  list_add(&st->link, &pd->stream_list);
737  st = 0;
738  }
739  spin_unlock_bh(&pd->stream_lock);
740  if (st)
741  kfree(st);
742  return 0;
743 }
744 /*------------------------------------------------------------------------------
745  * Context: softirq or process
746  */
747 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
748 {
749  kfree_skb(st->skb);
750  kfree(st);
751 }
752 /*------------------------------------------------------------------------------
753  * Context: softirq
754  */
755 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
756 {
757  struct oz_isoc_stream *st;
758  spin_lock_bh(&pd->stream_lock);
759  st = pd_stream_find(pd, ep_num);
760  if (st)
761  list_del(&st->link);
762  spin_unlock_bh(&pd->stream_lock);
763  if (st)
764  oz_isoc_stream_free(st);
765  return 0;
766 }
767 /*------------------------------------------------------------------------------
768  * Context: any
769  */
770 static void oz_isoc_destructor(struct sk_buff *skb)
771 {
772  atomic_dec(&g_submitted_isoc);
773  oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
774  0, skb, 0);
775 }
776 /*------------------------------------------------------------------------------
777  * Context: softirq
778  */
779 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
780 {
781  struct net_device *dev = pd->net_dev;
782  struct oz_isoc_stream *st;
783  u8 nb_units = 0;
784  struct sk_buff *skb = 0;
785  struct oz_hdr *oz_hdr = 0;
786  int size = 0;
787  spin_lock_bh(&pd->stream_lock);
788  st = pd_stream_find(pd, ep_num);
789  if (st) {
790  skb = st->skb;
791  st->skb = 0;
792  nb_units = st->nb_units;
793  st->nb_units = 0;
794  oz_hdr = st->oz_hdr;
795  size = st->size;
796  }
797  spin_unlock_bh(&pd->stream_lock);
798  if (!st)
799  return 0;
800  if (!skb) {
801  /* Allocate enough space for max size frame. */
802  skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
803  GFP_ATOMIC);
804  if (skb == 0)
805  return 0;
806  /* Reserve the head room for lower layers. */
807  skb_reserve(skb, LL_RESERVED_SPACE(dev));
808  skb_reset_network_header(skb);
809  skb->dev = dev;
810  skb->protocol = htons(OZ_ETHERTYPE);
811  /* For audio packet set priority to AC_VO */
812  skb->priority = 0x7;
813  size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
814  oz_hdr = (struct oz_hdr *)skb_put(skb, size);
815  }
816  memcpy(skb_put(skb, len), data, len);
817  size += len;
818  if (++nb_units < pd->ms_per_isoc) {
819  spin_lock_bh(&pd->stream_lock);
820  st->skb = skb;
821  st->nb_units = nb_units;
822  st->oz_hdr = oz_hdr;
823  st->size = size;
824  spin_unlock_bh(&pd->stream_lock);
825  } else {
826  struct oz_hdr oz;
827  struct oz_isoc_large iso;
828  spin_lock_bh(&pd->stream_lock);
829  iso.frame_number = st->frame_num;
830  st->frame_num += nb_units;
831  spin_unlock_bh(&pd->stream_lock);
832  oz.control =
835  oz.pkt_num = 0;
836  iso.endpoint = ep_num;
838  iso.ms_data = nb_units;
839  memcpy(oz_hdr, &oz, sizeof(oz));
840  memcpy(oz_hdr+1, &iso, sizeof(iso));
841  if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
842  dev->dev_addr, skb->len) < 0)
843  goto out;
844 
845  skb->destructor = oz_isoc_destructor;
846  /*Queue for Xmit if mode is not ANYTIME*/
847  if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
848  struct oz_tx_frame *isoc_unit = NULL;
849  int nb = pd->nb_queued_isoc_frames;
850  if (nb >= pd->isoc_latency) {
852  "Dropping ISOC Unit nb= %d\n",
853  nb);
854  goto out;
855  }
856  isoc_unit = oz_tx_frame_alloc(pd);
857  if (isoc_unit == NULL)
858  goto out;
859  isoc_unit->hdr = oz;
860  isoc_unit->skb = skb;
861  spin_lock_bh(&pd->tx_frame_lock);
862  list_add_tail(&isoc_unit->link, &pd->tx_queue);
863  pd->nb_queued_isoc_frames++;
864  spin_unlock_bh(&pd->tx_frame_lock);
866  "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
869  skb, atomic_read(&g_submitted_isoc));
870  return 0;
871  }
872 
873  /*In ANYTIME mode Xmit unit immediately*/
874  if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
875  atomic_inc(&g_submitted_isoc);
877  skb, atomic_read(&g_submitted_isoc));
878  if (dev_queue_xmit(skb) < 0) {
879  oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
880  return -1;
881  } else
882  return 0;
883  }
884 
885 out: oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
886  kfree_skb(skb);
887  return -1;
888 
889  }
890  return 0;
891 }
892 /*------------------------------------------------------------------------------
893  * Context: process
894  */
895 void oz_apps_init(void)
896 {
897  int i;
898  for (i = 0; i < OZ_APPID_MAX; i++)
899  if (g_app_if[i].init)
900  g_app_if[i].init();
901 }
902 /*------------------------------------------------------------------------------
903  * Context: process
904  */
905 void oz_apps_term(void)
906 {
907  int i;
908  /* Terminate all the apps. */
909  for (i = 0; i < OZ_APPID_MAX; i++)
910  if (g_app_if[i].term)
911  g_app_if[i].term();
912 }
913 /*------------------------------------------------------------------------------
914  * Context: softirq-serialized
915  */
916 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
917 {
918  struct oz_app_if *ai;
919  if (app_id == 0 || app_id > OZ_APPID_MAX)
920  return;
921  ai = &g_app_if[app_id-1];
922  ai->rx(pd, elt);
923 }
924 /*------------------------------------------------------------------------------
925  * Context: softirq or process
926  */
928 {
929  struct oz_farewell *f;
930  struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
931  while (1) {
933  if (list_empty(&pd->farewell_list)) {
935  break;
936  }
938  struct oz_farewell, link);
939  list_del(&f->link);
941  if (ai->farewell)
942  ai->farewell(pd, f->ep_num, f->report, f->len);
943  kfree(f);
944  }
945 }