Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
core.c
Go to the documentation of this file.
1 /*
2  * The NFC Controller Interface is the communication protocol between an
3  * NFC Controller (NFCC) and a Device Host (DH).
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  * Written by Ilan Elias <[email protected]>
8  *
9  * Acknowledgements:
10  * This file is based on hci_core.c, which was written
11  * by Maxim Krasnyansky.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25  *
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
29 
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33 #include <linux/completion.h>
34 #include <linux/export.h>
35 #include <linux/sched.h>
36 #include <linux/bitops.h>
37 #include <linux/skbuff.h>
38 
39 #include "../nfc.h"
40 #include <net/nfc/nci.h>
41 #include <net/nfc/nci_core.h>
42 #include <linux/nfc.h>
43 
44 static void nci_cmd_work(struct work_struct *work);
45 static void nci_rx_work(struct work_struct *work);
46 static void nci_tx_work(struct work_struct *work);
47 
48 /* ---- NCI requests ---- */
49 
50 void nci_req_complete(struct nci_dev *ndev, int result)
51 {
52  if (ndev->req_status == NCI_REQ_PEND) {
53  ndev->req_result = result;
54  ndev->req_status = NCI_REQ_DONE;
55  complete(&ndev->req_completion);
56  }
57 }
58 
59 static void nci_req_cancel(struct nci_dev *ndev, int err)
60 {
61  if (ndev->req_status == NCI_REQ_PEND) {
62  ndev->req_result = err;
64  complete(&ndev->req_completion);
65  }
66 }
67 
68 /* Execute request and wait for completion. */
69 static int __nci_request(struct nci_dev *ndev,
70  void (*req)(struct nci_dev *ndev, unsigned long opt),
71  unsigned long opt, __u32 timeout)
72 {
73  int rc = 0;
74  long completion_rc;
75 
76  ndev->req_status = NCI_REQ_PEND;
77 
78  init_completion(&ndev->req_completion);
79  req(ndev, opt);
80  completion_rc =
82  timeout);
83 
84  pr_debug("wait_for_completion return %ld\n", completion_rc);
85 
86  if (completion_rc > 0) {
87  switch (ndev->req_status) {
88  case NCI_REQ_DONE:
89  rc = nci_to_errno(ndev->req_result);
90  break;
91 
92  case NCI_REQ_CANCELED:
93  rc = -ndev->req_result;
94  break;
95 
96  default:
97  rc = -ETIMEDOUT;
98  break;
99  }
100  } else {
101  pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
102  completion_rc);
103 
104  rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
105  }
106 
107  ndev->req_status = ndev->req_result = 0;
108 
109  return rc;
110 }
111 
112 static inline int nci_request(struct nci_dev *ndev,
113  void (*req)(struct nci_dev *ndev,
114  unsigned long opt),
115  unsigned long opt, __u32 timeout)
116 {
117  int rc;
118 
119  if (!test_bit(NCI_UP, &ndev->flags))
120  return -ENETDOWN;
121 
122  /* Serialize all requests */
123  mutex_lock(&ndev->req_lock);
124  rc = __nci_request(ndev, req, opt, timeout);
125  mutex_unlock(&ndev->req_lock);
126 
127  return rc;
128 }
129 
130 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
131 {
132  struct nci_core_reset_cmd cmd;
133 
134  cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
136 }
137 
138 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
139 {
141 }
142 
143 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
144 {
145  struct nci_rf_disc_map_cmd cmd;
146  struct disc_map_config *cfg = cmd.mapping_configs;
147  __u8 *num = &cmd.num_mapping_configs;
148  int i;
149 
150  /* set rf mapping configurations */
151  *num = 0;
152 
153  /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
154  for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
155  if (ndev->supported_rf_interfaces[i] ==
158  cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
161  (*num)++;
162  } else if (ndev->supported_rf_interfaces[i] ==
165  cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
168  (*num)++;
169  }
170 
171  if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
172  break;
173  }
174 
176  (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
177 }
178 
181  size_t len;
183 };
184 
185 static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
186 {
187  struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
188  struct nci_core_set_config_cmd cmd;
189 
190  BUG_ON(param->len > NCI_MAX_PARAM_LEN);
191 
192  cmd.num_params = 1;
193  cmd.param.id = param->id;
194  cmd.param.len = param->len;
195  memcpy(cmd.param.val, param->val, param->len);
196 
197  nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
198 }
199 
200 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
201 {
202  struct nci_rf_disc_cmd cmd;
203  __u32 protocols = opt;
204 
205  cmd.num_disc_configs = 0;
206 
207  if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
208  (protocols & NFC_PROTO_JEWEL_MASK
209  || protocols & NFC_PROTO_MIFARE_MASK
210  || protocols & NFC_PROTO_ISO14443_MASK
211  || protocols & NFC_PROTO_NFC_DEP_MASK)) {
212  cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
214  cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
215  cmd.num_disc_configs++;
216  }
217 
218  if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
219  (protocols & NFC_PROTO_ISO14443_B_MASK)) {
220  cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
222  cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
223  cmd.num_disc_configs++;
224  }
225 
226  if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
227  (protocols & NFC_PROTO_FELICA_MASK
228  || protocols & NFC_PROTO_NFC_DEP_MASK)) {
229  cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
231  cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
232  cmd.num_disc_configs++;
233  }
234 
236  (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
237  &cmd);
238 }
239 
243 };
244 
245 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
246 {
248  (struct nci_rf_discover_select_param *)opt;
249  struct nci_rf_discover_select_cmd cmd;
250 
251  cmd.rf_discovery_id = param->rf_discovery_id;
252  cmd.rf_protocol = param->rf_protocol;
253 
254  switch (cmd.rf_protocol) {
257  break;
258 
261  break;
262 
263  default:
265  break;
266  }
267 
269  sizeof(struct nci_rf_discover_select_cmd), &cmd);
270 }
271 
272 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
273 {
274  struct nci_rf_deactivate_cmd cmd;
275 
277 
279  sizeof(struct nci_rf_deactivate_cmd), &cmd);
280 }
281 
282 static int nci_open_device(struct nci_dev *ndev)
283 {
284  int rc = 0;
285 
286  mutex_lock(&ndev->req_lock);
287 
288  if (test_bit(NCI_UP, &ndev->flags)) {
289  rc = -EALREADY;
290  goto done;
291  }
292 
293  if (ndev->ops->open(ndev)) {
294  rc = -EIO;
295  goto done;
296  }
297 
298  atomic_set(&ndev->cmd_cnt, 1);
299 
300  set_bit(NCI_INIT, &ndev->flags);
301 
302  rc = __nci_request(ndev, nci_reset_req, 0,
304 
305  if (!rc) {
306  rc = __nci_request(ndev, nci_init_req, 0,
308  }
309 
310  if (!rc) {
311  rc = __nci_request(ndev, nci_init_complete_req, 0,
313  }
314 
315  clear_bit(NCI_INIT, &ndev->flags);
316 
317  if (!rc) {
318  set_bit(NCI_UP, &ndev->flags);
319  nci_clear_target_list(ndev);
320  atomic_set(&ndev->state, NCI_IDLE);
321  } else {
322  /* Init failed, cleanup */
323  skb_queue_purge(&ndev->cmd_q);
324  skb_queue_purge(&ndev->rx_q);
325  skb_queue_purge(&ndev->tx_q);
326 
327  ndev->ops->close(ndev);
328  ndev->flags = 0;
329  }
330 
331 done:
332  mutex_unlock(&ndev->req_lock);
333  return rc;
334 }
335 
336 static int nci_close_device(struct nci_dev *ndev)
337 {
338  nci_req_cancel(ndev, ENODEV);
339  mutex_lock(&ndev->req_lock);
340 
341  if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
342  del_timer_sync(&ndev->cmd_timer);
343  del_timer_sync(&ndev->data_timer);
344  mutex_unlock(&ndev->req_lock);
345  return 0;
346  }
347 
348  /* Drop RX and TX queues */
349  skb_queue_purge(&ndev->rx_q);
350  skb_queue_purge(&ndev->tx_q);
351 
352  /* Flush RX and TX wq */
353  flush_workqueue(ndev->rx_wq);
354  flush_workqueue(ndev->tx_wq);
355 
356  /* Reset device */
357  skb_queue_purge(&ndev->cmd_q);
358  atomic_set(&ndev->cmd_cnt, 1);
359 
360  set_bit(NCI_INIT, &ndev->flags);
361  __nci_request(ndev, nci_reset_req, 0,
363  clear_bit(NCI_INIT, &ndev->flags);
364 
365  /* Flush cmd wq */
366  flush_workqueue(ndev->cmd_wq);
367 
368  /* After this point our queues are empty
369  * and no works are scheduled. */
370  ndev->ops->close(ndev);
371 
372  /* Clear flags */
373  ndev->flags = 0;
374 
375  mutex_unlock(&ndev->req_lock);
376 
377  return 0;
378 }
379 
380 /* NCI command timer function */
381 static void nci_cmd_timer(unsigned long arg)
382 {
383  struct nci_dev *ndev = (void *) arg;
384 
385  atomic_set(&ndev->cmd_cnt, 1);
386  queue_work(ndev->cmd_wq, &ndev->cmd_work);
387 }
388 
389 /* NCI data exchange timer function */
390 static void nci_data_timer(unsigned long arg)
391 {
392  struct nci_dev *ndev = (void *) arg;
393 
395  queue_work(ndev->rx_wq, &ndev->rx_work);
396 }
397 
398 static int nci_dev_up(struct nfc_dev *nfc_dev)
399 {
400  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
401 
402  return nci_open_device(ndev);
403 }
404 
405 static int nci_dev_down(struct nfc_dev *nfc_dev)
406 {
407  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
408 
409  return nci_close_device(ndev);
410 }
411 
412 static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
413 {
414  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
415  struct nci_set_config_param param;
416  __u8 local_gb[NFC_MAX_GT_LEN];
417  int i, rc = 0;
418 
419  param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
420  if ((param.val == NULL) || (param.len == 0))
421  return rc;
422 
423  if (param.len > NCI_MAX_PARAM_LEN)
424  return -EINVAL;
425 
426  for (i = 0; i < param.len; i++)
427  local_gb[param.len-1-i] = param.val[i];
428 
429  param.id = NCI_PN_ATR_REQ_GEN_BYTES;
430  param.val = local_gb;
431 
432  rc = nci_request(ndev, nci_set_config_req, (unsigned long)&param,
434 
435  return rc;
436 }
437 
438 static int nci_start_poll(struct nfc_dev *nfc_dev,
439  __u32 im_protocols, __u32 tm_protocols)
440 {
441  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
442  int rc;
443 
444  if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
446  pr_err("unable to start poll, since poll is already active\n");
447  return -EBUSY;
448  }
449 
450  if (ndev->target_active_prot) {
451  pr_err("there is an active target\n");
452  return -EBUSY;
453  }
454 
455  if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
456  (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
457  pr_debug("target active or w4 select, implicitly deactivate\n");
458 
459  rc = nci_request(ndev, nci_rf_deactivate_req, 0,
461  if (rc)
462  return -EBUSY;
463  }
464 
465  if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
466  rc = nci_set_local_general_bytes(nfc_dev);
467  if (rc) {
468  pr_err("failed to set local general bytes\n");
469  return rc;
470  }
471  }
472 
473  rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
475 
476  if (!rc)
477  ndev->poll_prots = im_protocols;
478 
479  return rc;
480 }
481 
482 static void nci_stop_poll(struct nfc_dev *nfc_dev)
483 {
484  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
485 
486  if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
488  pr_err("unable to stop poll, since poll is not active\n");
489  return;
490  }
491 
492  nci_request(ndev, nci_rf_deactivate_req, 0,
494 }
495 
496 static int nci_activate_target(struct nfc_dev *nfc_dev,
497  struct nfc_target *target, __u32 protocol)
498 {
499  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
500  struct nci_rf_discover_select_param param;
501  struct nfc_target *nci_target = NULL;
502  int i;
503  int rc = 0;
504 
505  pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
506 
507  if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
508  (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
509  pr_err("there is no available target to activate\n");
510  return -EINVAL;
511  }
512 
513  if (ndev->target_active_prot) {
514  pr_err("there is already an active target\n");
515  return -EBUSY;
516  }
517 
518  for (i = 0; i < ndev->n_targets; i++) {
519  if (ndev->targets[i].idx == target->idx) {
520  nci_target = &ndev->targets[i];
521  break;
522  }
523  }
524 
525  if (!nci_target) {
526  pr_err("unable to find the selected target\n");
527  return -EINVAL;
528  }
529 
530  if (!(nci_target->supported_protocols & (1 << protocol))) {
531  pr_err("target does not support the requested protocol 0x%x\n",
532  protocol);
533  return -EINVAL;
534  }
535 
536  if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
537  param.rf_discovery_id = nci_target->logical_idx;
538 
539  if (protocol == NFC_PROTO_JEWEL)
540  param.rf_protocol = NCI_RF_PROTOCOL_T1T;
541  else if (protocol == NFC_PROTO_MIFARE)
542  param.rf_protocol = NCI_RF_PROTOCOL_T2T;
543  else if (protocol == NFC_PROTO_FELICA)
544  param.rf_protocol = NCI_RF_PROTOCOL_T3T;
545  else if (protocol == NFC_PROTO_ISO14443 ||
546  protocol == NFC_PROTO_ISO14443_B)
547  param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
548  else
549  param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
550 
551  rc = nci_request(ndev, nci_rf_discover_select_req,
552  (unsigned long)&param,
554  }
555 
556  if (!rc)
558 
559  return rc;
560 }
561 
562 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
563  struct nfc_target *target)
564 {
565  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
566 
567  pr_debug("entry\n");
568 
569  if (!ndev->target_active_prot) {
570  pr_err("unable to deactivate target, no active target\n");
571  return;
572  }
573 
574  ndev->target_active_prot = 0;
575 
576  if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
577  nci_request(ndev, nci_rf_deactivate_req, 0,
579  }
580 }
581 
582 
583 static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
584  __u8 comm_mode, __u8 *gb, size_t gb_len)
585 {
586  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
587  int rc;
588 
589  pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
590 
591  rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
592  if (rc)
593  return rc;
594 
595  rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
596  ndev->remote_gb_len);
597  if (!rc)
598  rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
600 
601  return rc;
602 }
603 
604 static int nci_dep_link_down(struct nfc_dev *nfc_dev)
605 {
606  pr_debug("entry\n");
607 
608  nci_deactivate_target(nfc_dev, NULL);
609 
610  return 0;
611 }
612 
613 
614 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
615  struct sk_buff *skb,
616  data_exchange_cb_t cb, void *cb_context)
617 {
618  struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
619  int rc;
620 
621  pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
622 
623  if (!ndev->target_active_prot) {
624  pr_err("unable to exchange data, no active target\n");
625  return -EINVAL;
626  }
627 
629  return -EBUSY;
630 
631  /* store cb and context to be used on receiving data */
632  ndev->data_exchange_cb = cb;
633  ndev->data_exchange_cb_context = cb_context;
634 
635  rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
636  if (rc)
638 
639  return rc;
640 }
641 
642 static struct nfc_ops nci_nfc_ops = {
643  .dev_up = nci_dev_up,
644  .dev_down = nci_dev_down,
645  .start_poll = nci_start_poll,
646  .stop_poll = nci_stop_poll,
647  .dep_link_up = nci_dep_link_up,
648  .dep_link_down = nci_dep_link_down,
649  .activate_target = nci_activate_target,
650  .deactivate_target = nci_deactivate_target,
651  .im_transceive = nci_transceive,
652 };
653 
654 /* ---- Interface to NCI drivers ---- */
655 
663  __u32 supported_protocols,
664  int tx_headroom, int tx_tailroom)
665 {
666  struct nci_dev *ndev;
667 
668  pr_debug("supported_protocols 0x%x\n", supported_protocols);
669 
670  if (!ops->open || !ops->close || !ops->send)
671  return NULL;
672 
673  if (!supported_protocols)
674  return NULL;
675 
676  ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
677  if (!ndev)
678  return NULL;
679 
680  ndev->ops = ops;
681  ndev->tx_headroom = tx_headroom;
682  ndev->tx_tailroom = tx_tailroom;
683 
684  ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
685  supported_protocols,
686  tx_headroom + NCI_DATA_HDR_SIZE,
687  tx_tailroom);
688  if (!ndev->nfc_dev)
689  goto free_exit;
690 
691  nfc_set_drvdata(ndev->nfc_dev, ndev);
692 
693  return ndev;
694 
695 free_exit:
696  kfree(ndev);
697  return NULL;
698 }
700 
706 void nci_free_device(struct nci_dev *ndev)
707 {
708  nfc_free_device(ndev->nfc_dev);
709  kfree(ndev);
710 }
712 
718 int nci_register_device(struct nci_dev *ndev)
719 {
720  int rc;
721  struct device *dev = &ndev->nfc_dev->dev;
722  char name[32];
723 
724  rc = nfc_register_device(ndev->nfc_dev);
725  if (rc)
726  goto exit;
727 
728  ndev->flags = 0;
729 
730  INIT_WORK(&ndev->cmd_work, nci_cmd_work);
731  snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
733  if (!ndev->cmd_wq) {
734  rc = -ENOMEM;
735  goto unreg_exit;
736  }
737 
738  INIT_WORK(&ndev->rx_work, nci_rx_work);
739  snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
740  ndev->rx_wq = create_singlethread_workqueue(name);
741  if (!ndev->rx_wq) {
742  rc = -ENOMEM;
743  goto destroy_cmd_wq_exit;
744  }
745 
746  INIT_WORK(&ndev->tx_work, nci_tx_work);
747  snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
748  ndev->tx_wq = create_singlethread_workqueue(name);
749  if (!ndev->tx_wq) {
750  rc = -ENOMEM;
751  goto destroy_rx_wq_exit;
752  }
753 
754  skb_queue_head_init(&ndev->cmd_q);
755  skb_queue_head_init(&ndev->rx_q);
756  skb_queue_head_init(&ndev->tx_q);
757 
758  setup_timer(&ndev->cmd_timer, nci_cmd_timer,
759  (unsigned long) ndev);
760  setup_timer(&ndev->data_timer, nci_data_timer,
761  (unsigned long) ndev);
762 
763  mutex_init(&ndev->req_lock);
764 
765  goto exit;
766 
767 destroy_rx_wq_exit:
768  destroy_workqueue(ndev->rx_wq);
769 
770 destroy_cmd_wq_exit:
771  destroy_workqueue(ndev->cmd_wq);
772 
773 unreg_exit:
774  nfc_unregister_device(ndev->nfc_dev);
775 
776 exit:
777  return rc;
778 }
780 
786 void nci_unregister_device(struct nci_dev *ndev)
787 {
788  nci_close_device(ndev);
789 
790  destroy_workqueue(ndev->cmd_wq);
791  destroy_workqueue(ndev->rx_wq);
792  destroy_workqueue(ndev->tx_wq);
793 
795 }
797 
803 int nci_recv_frame(struct sk_buff *skb)
804 {
805  struct nci_dev *ndev = (struct nci_dev *) skb->dev;
806 
807  pr_debug("len %d\n", skb->len);
808 
809  if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
810  && !test_bit(NCI_INIT, &ndev->flags))) {
811  kfree_skb(skb);
812  return -ENXIO;
813  }
814 
815  /* Queue frame for rx worker thread */
816  skb_queue_tail(&ndev->rx_q, skb);
817  queue_work(ndev->rx_wq, &ndev->rx_work);
818 
819  return 0;
820 }
822 
823 static int nci_send_frame(struct sk_buff *skb)
824 {
825  struct nci_dev *ndev = (struct nci_dev *) skb->dev;
826 
827  pr_debug("len %d\n", skb->len);
828 
829  if (!ndev) {
830  kfree_skb(skb);
831  return -ENODEV;
832  }
833 
834  /* Get rid of skb owner, prior to sending to the driver. */
835  skb_orphan(skb);
836 
837  return ndev->ops->send(skb);
838 }
839 
840 /* Send NCI command */
841 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
842 {
843  struct nci_ctrl_hdr *hdr;
844  struct sk_buff *skb;
845 
846  pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
847 
848  skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
849  if (!skb) {
850  pr_err("no memory for command\n");
851  return -ENOMEM;
852  }
853 
854  hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
855  hdr->gid = nci_opcode_gid(opcode);
856  hdr->oid = nci_opcode_oid(opcode);
857  hdr->plen = plen;
858 
859  nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
860  nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
861 
862  if (plen)
863  memcpy(skb_put(skb, plen), payload, plen);
864 
865  skb->dev = (void *) ndev;
866 
867  skb_queue_tail(&ndev->cmd_q, skb);
868  queue_work(ndev->cmd_wq, &ndev->cmd_work);
869 
870  return 0;
871 }
872 
873 /* ---- NCI TX Data worker thread ---- */
874 
875 static void nci_tx_work(struct work_struct *work)
876 {
877  struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
878  struct sk_buff *skb;
879 
880  pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
881 
882  /* Send queued tx data */
883  while (atomic_read(&ndev->credits_cnt)) {
884  skb = skb_dequeue(&ndev->tx_q);
885  if (!skb)
886  return;
887 
888  /* Check if data flow control is used */
889  if (atomic_read(&ndev->credits_cnt) !=
891  atomic_dec(&ndev->credits_cnt);
892 
893  pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
894  nci_pbf(skb->data),
895  nci_conn_id(skb->data),
896  nci_plen(skb->data));
897 
898  nci_send_frame(skb);
899 
900  mod_timer(&ndev->data_timer,
902  }
903 }
904 
905 /* ----- NCI RX worker thread (data & control) ----- */
906 
907 static void nci_rx_work(struct work_struct *work)
908 {
909  struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
910  struct sk_buff *skb;
911 
912  while ((skb = skb_dequeue(&ndev->rx_q))) {
913  /* Process frame */
914  switch (nci_mt(skb->data)) {
915  case NCI_MT_RSP_PKT:
916  nci_rsp_packet(ndev, skb);
917  break;
918 
919  case NCI_MT_NTF_PKT:
920  nci_ntf_packet(ndev, skb);
921  break;
922 
923  case NCI_MT_DATA_PKT:
924  nci_rx_data_packet(ndev, skb);
925  break;
926 
927  default:
928  pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
929  kfree_skb(skb);
930  break;
931  }
932  }
933 
934  /* check if a data exchange timout has occurred */
935  if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
936  /* complete the data exchange transaction, if exists */
937  if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
939 
941  }
942 }
943 
944 /* ----- NCI TX CMD worker thread ----- */
945 
946 static void nci_cmd_work(struct work_struct *work)
947 {
948  struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
949  struct sk_buff *skb;
950 
951  pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
952 
953  /* Send queued command */
954  if (atomic_read(&ndev->cmd_cnt)) {
955  skb = skb_dequeue(&ndev->cmd_q);
956  if (!skb)
957  return;
958 
959  atomic_dec(&ndev->cmd_cnt);
960 
961  pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
962  nci_pbf(skb->data),
965  nci_plen(skb->data));
966 
967  nci_send_frame(skb);
968 
969  mod_timer(&ndev->cmd_timer,
970  jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
971  }
972 }
973 
974 MODULE_LICENSE("GPL");