Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hci_core.c
Go to the documentation of this file.
1 /*
2  BlueZ - Bluetooth protocol stack for Linux
3  Copyright (C) 2000-2001 Qualcomm Incorporated
4  Copyright (C) 2011 ProFUSION Embedded Systems
5 
6  Written 2000,2001 by Maxim Krasnyansky <[email protected]>
7 
8  This program is free software; you can redistribute it and/or modify
9  it under the terms of the GNU General Public License version 2 as
10  published by the Free Software Foundation;
11 
12  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13  OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16  CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21  ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22  COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23  SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 
31 #include <linux/rfkill.h>
32 
34 #include <net/bluetooth/hci_core.h>
35 
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39 
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43 
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47 
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50 
51 /* ---- HCI notifications ---- */
52 
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55  hci_sock_dev_event(hdev, event);
56 }
57 
58 /* ---- HCI requests ---- */
59 
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62  BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63 
64  /* If this is the init phase check if the completed command matches
65  * the last init command, and if not just return.
66  */
67  if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68  struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69  u16 opcode = __le16_to_cpu(sent->opcode);
70  struct sk_buff *skb;
71 
72  /* Some CSR based controllers generate a spontaneous
73  * reset complete event during init and any pending
74  * command will never be completed. In such a case we
75  * need to resend whatever was the last sent
76  * command.
77  */
78 
79  if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80  return;
81 
82  skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83  if (skb) {
84  skb_queue_head(&hdev->cmd_q, skb);
85  queue_work(hdev->workqueue, &hdev->cmd_work);
86  }
87 
88  return;
89  }
90 
91  if (hdev->req_status == HCI_REQ_PEND) {
92  hdev->req_result = result;
93  hdev->req_status = HCI_REQ_DONE;
95  }
96 }
97 
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100  BT_DBG("%s err 0x%2.2x", hdev->name, err);
101 
102  if (hdev->req_status == HCI_REQ_PEND) {
103  hdev->req_result = err;
106  }
107 }
108 
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111  void (*req)(struct hci_dev *hdev, unsigned long opt),
112  unsigned long opt, __u32 timeout)
113 {
115  int err = 0;
116 
117  BT_DBG("%s start", hdev->name);
118 
119  hdev->req_status = HCI_REQ_PEND;
120 
121  add_wait_queue(&hdev->req_wait_q, &wait);
123 
124  req(hdev, opt);
125  schedule_timeout(timeout);
126 
128 
129  if (signal_pending(current))
130  return -EINTR;
131 
132  switch (hdev->req_status) {
133  case HCI_REQ_DONE:
134  err = -bt_to_errno(hdev->req_result);
135  break;
136 
137  case HCI_REQ_CANCELED:
138  err = -hdev->req_result;
139  break;
140 
141  default:
142  err = -ETIMEDOUT;
143  break;
144  }
145 
146  hdev->req_status = hdev->req_result = 0;
147 
148  BT_DBG("%s end: err %d", hdev->name, err);
149 
150  return err;
151 }
152 
153 static int hci_request(struct hci_dev *hdev,
154  void (*req)(struct hci_dev *hdev, unsigned long opt),
155  unsigned long opt, __u32 timeout)
156 {
157  int ret;
158 
159  if (!test_bit(HCI_UP, &hdev->flags))
160  return -ENETDOWN;
161 
162  /* Serialize all requests */
163  hci_req_lock(hdev);
164  ret = __hci_request(hdev, req, opt, timeout);
165  hci_req_unlock(hdev);
166 
167  return ret;
168 }
169 
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172  BT_DBG("%s %ld", hdev->name, opt);
173 
174  /* Reset device */
175  set_bit(HCI_RESET, &hdev->flags);
176  hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178 
179 static void bredr_init(struct hci_dev *hdev)
180 {
182  __le16 param;
183  __u8 flt_type;
184 
186 
187  /* Mandatory initialization */
188 
189  /* Read Local Supported Features */
191 
192  /* Read Local Version */
194 
195  /* Read Buffer Size (ACL mtu, max pkt, etc.) */
197 
198  /* Read BD Address */
200 
201  /* Read Class of Device */
203 
204  /* Read Local Name */
206 
207  /* Read Voice Setting */
209 
210  /* Optional initialization */
211 
212  /* Clear Event Filters */
213  flt_type = HCI_FLT_CLEAR_ALL;
214  hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215 
216  /* Connection accept timeout ~20 secs */
217  param = __constant_cpu_to_le16(0x7d00);
218  hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
219 
220  bacpy(&cp.bdaddr, BDADDR_ANY);
221  cp.delete_all = 1;
223 }
224 
225 static void amp_init(struct hci_dev *hdev)
226 {
228 
229  /* Read Local Version */
231 
232  /* Read Local AMP Info */
234 
235  /* Read Data Blk size */
237 }
238 
239 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240 {
241  struct sk_buff *skb;
242 
243  BT_DBG("%s %ld", hdev->name, opt);
244 
245  /* Driver initialization */
246 
247  /* Special commands */
248  while ((skb = skb_dequeue(&hdev->driver_init))) {
249  bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250  skb->dev = (void *) hdev;
251 
252  skb_queue_tail(&hdev->cmd_q, skb);
253  queue_work(hdev->workqueue, &hdev->cmd_work);
254  }
256 
257  /* Reset */
259  hci_reset_req(hdev, 0);
260 
261  switch (hdev->dev_type) {
262  case HCI_BREDR:
263  bredr_init(hdev);
264  break;
265 
266  case HCI_AMP:
267  amp_init(hdev);
268  break;
269 
270  default:
271  BT_ERR("Unknown device type %d", hdev->dev_type);
272  break;
273  }
274 }
275 
276 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277 {
278  BT_DBG("%s", hdev->name);
279 
280  /* Read LE buffer size */
282 }
283 
284 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285 {
286  __u8 scan = opt;
287 
288  BT_DBG("%s %x", hdev->name, scan);
289 
290  /* Inquiry and Page scans */
291  hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
292 }
293 
294 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
295 {
296  __u8 auth = opt;
297 
298  BT_DBG("%s %x", hdev->name, auth);
299 
300  /* Authentication */
301  hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
302 }
303 
304 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
305 {
306  __u8 encrypt = opt;
307 
308  BT_DBG("%s %x", hdev->name, encrypt);
309 
310  /* Encryption */
311  hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
312 }
313 
314 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
315 {
316  __le16 policy = cpu_to_le16(opt);
317 
318  BT_DBG("%s %x", hdev->name, policy);
319 
320  /* Default link policy */
321  hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
322 }
323 
324 /* Get HCI device by index.
325  * Device is held on return. */
327 {
328  struct hci_dev *hdev = NULL, *d;
329 
330  BT_DBG("%d", index);
331 
332  if (index < 0)
333  return NULL;
334 
337  if (d->id == index) {
338  hdev = hci_dev_hold(d);
339  break;
340  }
341  }
343  return hdev;
344 }
345 
346 /* ---- Inquiry support ---- */
347 
348 bool hci_discovery_active(struct hci_dev *hdev)
349 {
350  struct discovery_state *discov = &hdev->discovery;
351 
352  switch (discov->state) {
353  case DISCOVERY_FINDING:
354  case DISCOVERY_RESOLVING:
355  return true;
356 
357  default:
358  return false;
359  }
360 }
361 
362 void hci_discovery_set_state(struct hci_dev *hdev, int state)
363 {
364  BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
365 
366  if (hdev->discovery.state == state)
367  return;
368 
369  switch (state) {
370  case DISCOVERY_STOPPED:
371  if (hdev->discovery.state != DISCOVERY_STARTING)
372  mgmt_discovering(hdev, 0);
373  break;
374  case DISCOVERY_STARTING:
375  break;
376  case DISCOVERY_FINDING:
377  mgmt_discovering(hdev, 1);
378  break;
379  case DISCOVERY_RESOLVING:
380  break;
381  case DISCOVERY_STOPPING:
382  break;
383  }
384 
385  hdev->discovery.state = state;
386 }
387 
388 static void inquiry_cache_flush(struct hci_dev *hdev)
389 {
390  struct discovery_state *cache = &hdev->discovery;
391  struct inquiry_entry *p, *n;
392 
393  list_for_each_entry_safe(p, n, &cache->all, all) {
394  list_del(&p->all);
395  kfree(p);
396  }
397 
398  INIT_LIST_HEAD(&cache->unknown);
399  INIT_LIST_HEAD(&cache->resolve);
400 }
401 
403  bdaddr_t *bdaddr)
404 {
405  struct discovery_state *cache = &hdev->discovery;
406  struct inquiry_entry *e;
407 
408  BT_DBG("cache %p, %s", cache, batostr(bdaddr));
409 
410  list_for_each_entry(e, &cache->all, all) {
411  if (!bacmp(&e->data.bdaddr, bdaddr))
412  return e;
413  }
414 
415  return NULL;
416 }
417 
419  bdaddr_t *bdaddr)
420 {
421  struct discovery_state *cache = &hdev->discovery;
422  struct inquiry_entry *e;
423 
424  BT_DBG("cache %p, %s", cache, batostr(bdaddr));
425 
426  list_for_each_entry(e, &cache->unknown, list) {
427  if (!bacmp(&e->data.bdaddr, bdaddr))
428  return e;
429  }
430 
431  return NULL;
432 }
433 
435  bdaddr_t *bdaddr,
436  int state)
437 {
438  struct discovery_state *cache = &hdev->discovery;
439  struct inquiry_entry *e;
440 
441  BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
442 
443  list_for_each_entry(e, &cache->resolve, list) {
444  if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445  return e;
446  if (!bacmp(&e->data.bdaddr, bdaddr))
447  return e;
448  }
449 
450  return NULL;
451 }
452 
454  struct inquiry_entry *ie)
455 {
456  struct discovery_state *cache = &hdev->discovery;
457  struct list_head *pos = &cache->resolve;
458  struct inquiry_entry *p;
459 
460  list_del(&ie->list);
461 
462  list_for_each_entry(p, &cache->resolve, list) {
463  if (p->name_state != NAME_PENDING &&
464  abs(p->data.rssi) >= abs(ie->data.rssi))
465  break;
466  pos = &p->list;
467  }
468 
469  list_add(&ie->list, pos);
470 }
471 
473  bool name_known, bool *ssp)
474 {
475  struct discovery_state *cache = &hdev->discovery;
476  struct inquiry_entry *ie;
477 
478  BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
479 
480  if (ssp)
481  *ssp = data->ssp_mode;
482 
483  ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
484  if (ie) {
485  if (ie->data.ssp_mode && ssp)
486  *ssp = true;
487 
488  if (ie->name_state == NAME_NEEDED &&
489  data->rssi != ie->data.rssi) {
490  ie->data.rssi = data->rssi;
492  }
493 
494  goto update;
495  }
496 
497  /* Entry not in the cache. Add new one. */
498  ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499  if (!ie)
500  return false;
501 
502  list_add(&ie->all, &cache->all);
503 
504  if (name_known) {
505  ie->name_state = NAME_KNOWN;
506  } else {
508  list_add(&ie->list, &cache->unknown);
509  }
510 
511 update:
512  if (name_known && ie->name_state != NAME_KNOWN &&
513  ie->name_state != NAME_PENDING) {
514  ie->name_state = NAME_KNOWN;
515  list_del(&ie->list);
516  }
517 
518  memcpy(&ie->data, data, sizeof(*data));
519  ie->timestamp = jiffies;
520  cache->timestamp = jiffies;
521 
522  if (ie->name_state == NAME_NOT_KNOWN)
523  return false;
524 
525  return true;
526 }
527 
528 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
529 {
530  struct discovery_state *cache = &hdev->discovery;
531  struct inquiry_info *info = (struct inquiry_info *) buf;
532  struct inquiry_entry *e;
533  int copied = 0;
534 
535  list_for_each_entry(e, &cache->all, all) {
536  struct inquiry_data *data = &e->data;
537 
538  if (copied >= num)
539  break;
540 
541  bacpy(&info->bdaddr, &data->bdaddr);
542  info->pscan_rep_mode = data->pscan_rep_mode;
543  info->pscan_period_mode = data->pscan_period_mode;
544  info->pscan_mode = data->pscan_mode;
545  memcpy(info->dev_class, data->dev_class, 3);
546  info->clock_offset = data->clock_offset;
547 
548  info++;
549  copied++;
550  }
551 
552  BT_DBG("cache %p, copied %d", cache, copied);
553  return copied;
554 }
555 
556 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
557 {
558  struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559  struct hci_cp_inquiry cp;
560 
561  BT_DBG("%s", hdev->name);
562 
563  if (test_bit(HCI_INQUIRY, &hdev->flags))
564  return;
565 
566  /* Start Inquiry */
567  memcpy(&cp.lap, &ir->lap, 3);
568  cp.length = ir->length;
569  cp.num_rsp = ir->num_rsp;
570  hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
571 }
572 
573 int hci_inquiry(void __user *arg)
574 {
575  __u8 __user *ptr = arg;
576  struct hci_inquiry_req ir;
577  struct hci_dev *hdev;
578  int err = 0, do_inquiry = 0, max_rsp;
579  long timeo;
580  __u8 *buf;
581 
582  if (copy_from_user(&ir, ptr, sizeof(ir)))
583  return -EFAULT;
584 
585  hdev = hci_dev_get(ir.dev_id);
586  if (!hdev)
587  return -ENODEV;
588 
589  hci_dev_lock(hdev);
590  if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
591  inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
592  inquiry_cache_flush(hdev);
593  do_inquiry = 1;
594  }
595  hci_dev_unlock(hdev);
596 
597  timeo = ir.length * msecs_to_jiffies(2000);
598 
599  if (do_inquiry) {
600  err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601  if (err < 0)
602  goto done;
603  }
604 
605  /* for unlimited number of responses we will use buffer with
606  * 255 entries
607  */
608  max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609 
610  /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611  * copy it to the user space.
612  */
613  buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
614  if (!buf) {
615  err = -ENOMEM;
616  goto done;
617  }
618 
619  hci_dev_lock(hdev);
620  ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
621  hci_dev_unlock(hdev);
622 
623  BT_DBG("num_rsp %d", ir.num_rsp);
624 
625  if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626  ptr += sizeof(ir);
627  if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628  ir.num_rsp))
629  err = -EFAULT;
630  } else
631  err = -EFAULT;
632 
633  kfree(buf);
634 
635 done:
636  hci_dev_put(hdev);
637  return err;
638 }
639 
640 /* ---- HCI ioctl helpers ---- */
641 
643 {
644  struct hci_dev *hdev;
645  int ret = 0;
646 
647  hdev = hci_dev_get(dev);
648  if (!hdev)
649  return -ENODEV;
650 
651  BT_DBG("%s %p", hdev->name, hdev);
652 
653  hci_req_lock(hdev);
654 
655  if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656  ret = -ENODEV;
657  goto done;
658  }
659 
660  if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661  ret = -ERFKILL;
662  goto done;
663  }
664 
665  if (test_bit(HCI_UP, &hdev->flags)) {
666  ret = -EALREADY;
667  goto done;
668  }
669 
670  if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671  set_bit(HCI_RAW, &hdev->flags);
672 
673  /* Treat all non BR/EDR controllers as raw devices if
674  enable_hs is not set */
675  if (hdev->dev_type != HCI_BREDR && !enable_hs)
676  set_bit(HCI_RAW, &hdev->flags);
677 
678  if (hdev->open(hdev)) {
679  ret = -EIO;
680  goto done;
681  }
682 
683  if (!test_bit(HCI_RAW, &hdev->flags)) {
684  atomic_set(&hdev->cmd_cnt, 1);
685  set_bit(HCI_INIT, &hdev->flags);
686  hdev->init_last_cmd = 0;
687 
688  ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
689 
690  if (lmp_host_le_capable(hdev))
691  ret = __hci_request(hdev, hci_le_init_req, 0,
693 
694  clear_bit(HCI_INIT, &hdev->flags);
695  }
696 
697  if (!ret) {
698  hci_dev_hold(hdev);
699  set_bit(HCI_UP, &hdev->flags);
700  hci_notify(hdev, HCI_DEV_UP);
701  if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702  mgmt_valid_hdev(hdev)) {
703  hci_dev_lock(hdev);
704  mgmt_powered(hdev, 1);
705  hci_dev_unlock(hdev);
706  }
707  } else {
708  /* Init failed, cleanup */
709  flush_work(&hdev->tx_work);
710  flush_work(&hdev->cmd_work);
711  flush_work(&hdev->rx_work);
712 
713  skb_queue_purge(&hdev->cmd_q);
714  skb_queue_purge(&hdev->rx_q);
715 
716  if (hdev->flush)
717  hdev->flush(hdev);
718 
719  if (hdev->sent_cmd) {
720  kfree_skb(hdev->sent_cmd);
721  hdev->sent_cmd = NULL;
722  }
723 
724  hdev->close(hdev);
725  hdev->flags = 0;
726  }
727 
728 done:
729  hci_req_unlock(hdev);
730  hci_dev_put(hdev);
731  return ret;
732 }
733 
734 static int hci_dev_do_close(struct hci_dev *hdev)
735 {
736  BT_DBG("%s %p", hdev->name, hdev);
737 
738  cancel_work_sync(&hdev->le_scan);
739 
741 
742  hci_req_cancel(hdev, ENODEV);
743  hci_req_lock(hdev);
744 
745  if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
746  del_timer_sync(&hdev->cmd_timer);
747  hci_req_unlock(hdev);
748  return 0;
749  }
750 
751  /* Flush RX and TX works */
752  flush_work(&hdev->tx_work);
753  flush_work(&hdev->rx_work);
754 
755  if (hdev->discov_timeout > 0) {
757  hdev->discov_timeout = 0;
759  }
760 
763 
765 
766  hci_dev_lock(hdev);
767  inquiry_cache_flush(hdev);
768  hci_conn_hash_flush(hdev);
769  hci_dev_unlock(hdev);
770 
771  hci_notify(hdev, HCI_DEV_DOWN);
772 
773  if (hdev->flush)
774  hdev->flush(hdev);
775 
776  /* Reset device */
777  skb_queue_purge(&hdev->cmd_q);
778  atomic_set(&hdev->cmd_cnt, 1);
779  if (!test_bit(HCI_RAW, &hdev->flags) &&
781  set_bit(HCI_INIT, &hdev->flags);
782  __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
783  clear_bit(HCI_INIT, &hdev->flags);
784  }
785 
786  /* flush cmd work */
787  flush_work(&hdev->cmd_work);
788 
789  /* Drop queues */
790  skb_queue_purge(&hdev->rx_q);
791  skb_queue_purge(&hdev->cmd_q);
792  skb_queue_purge(&hdev->raw_q);
793 
794  /* Drop last sent command */
795  if (hdev->sent_cmd) {
796  del_timer_sync(&hdev->cmd_timer);
797  kfree_skb(hdev->sent_cmd);
798  hdev->sent_cmd = NULL;
799  }
800 
801  /* After this point our queues are empty
802  * and no tasks are scheduled. */
803  hdev->close(hdev);
804 
805  if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
806  mgmt_valid_hdev(hdev)) {
807  hci_dev_lock(hdev);
808  mgmt_powered(hdev, 0);
809  hci_dev_unlock(hdev);
810  }
811 
812  /* Clear flags */
813  hdev->flags = 0;
814 
815  memset(hdev->eir, 0, sizeof(hdev->eir));
816  memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
817 
818  hci_req_unlock(hdev);
819 
820  hci_dev_put(hdev);
821  return 0;
822 }
823 
825 {
826  struct hci_dev *hdev;
827  int err;
828 
829  hdev = hci_dev_get(dev);
830  if (!hdev)
831  return -ENODEV;
832 
835 
836  err = hci_dev_do_close(hdev);
837 
838  hci_dev_put(hdev);
839  return err;
840 }
841 
843 {
844  struct hci_dev *hdev;
845  int ret = 0;
846 
847  hdev = hci_dev_get(dev);
848  if (!hdev)
849  return -ENODEV;
850 
851  hci_req_lock(hdev);
852 
853  if (!test_bit(HCI_UP, &hdev->flags))
854  goto done;
855 
856  /* Drop queues */
857  skb_queue_purge(&hdev->rx_q);
858  skb_queue_purge(&hdev->cmd_q);
859 
860  hci_dev_lock(hdev);
861  inquiry_cache_flush(hdev);
862  hci_conn_hash_flush(hdev);
863  hci_dev_unlock(hdev);
864 
865  if (hdev->flush)
866  hdev->flush(hdev);
867 
868  atomic_set(&hdev->cmd_cnt, 1);
869  hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
870 
871  if (!test_bit(HCI_RAW, &hdev->flags))
872  ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
873 
874 done:
875  hci_req_unlock(hdev);
876  hci_dev_put(hdev);
877  return ret;
878 }
879 
881 {
882  struct hci_dev *hdev;
883  int ret = 0;
884 
885  hdev = hci_dev_get(dev);
886  if (!hdev)
887  return -ENODEV;
888 
889  memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
890 
891  hci_dev_put(hdev);
892 
893  return ret;
894 }
895 
896 int hci_dev_cmd(unsigned int cmd, void __user *arg)
897 {
898  struct hci_dev *hdev;
899  struct hci_dev_req dr;
900  int err = 0;
901 
902  if (copy_from_user(&dr, arg, sizeof(dr)))
903  return -EFAULT;
904 
905  hdev = hci_dev_get(dr.dev_id);
906  if (!hdev)
907  return -ENODEV;
908 
909  switch (cmd) {
910  case HCISETAUTH:
911  err = hci_request(hdev, hci_auth_req, dr.dev_opt,
913  break;
914 
915  case HCISETENCRYPT:
916  if (!lmp_encrypt_capable(hdev)) {
917  err = -EOPNOTSUPP;
918  break;
919  }
920 
921  if (!test_bit(HCI_AUTH, &hdev->flags)) {
922  /* Auth must be enabled first */
923  err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925  if (err)
926  break;
927  }
928 
929  err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
931  break;
932 
933  case HCISETSCAN:
934  err = hci_request(hdev, hci_scan_req, dr.dev_opt,
936  break;
937 
938  case HCISETLINKPOL:
939  err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
941  break;
942 
943  case HCISETLINKMODE:
944  hdev->link_mode = ((__u16) dr.dev_opt) &
946  break;
947 
948  case HCISETPTYPE:
949  hdev->pkt_type = (__u16) dr.dev_opt;
950  break;
951 
952  case HCISETACLMTU:
953  hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954  hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
955  break;
956 
957  case HCISETSCOMTU:
958  hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959  hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
960  break;
961 
962  default:
963  err = -EINVAL;
964  break;
965  }
966 
967  hci_dev_put(hdev);
968  return err;
969 }
970 
971 int hci_get_dev_list(void __user *arg)
972 {
973  struct hci_dev *hdev;
974  struct hci_dev_list_req *dl;
975  struct hci_dev_req *dr;
976  int n = 0, size, err;
977  __u16 dev_num;
978 
979  if (get_user(dev_num, (__u16 __user *) arg))
980  return -EFAULT;
981 
982  if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983  return -EINVAL;
984 
985  size = sizeof(*dl) + dev_num * sizeof(*dr);
986 
987  dl = kzalloc(size, GFP_KERNEL);
988  if (!dl)
989  return -ENOMEM;
990 
991  dr = dl->dev_req;
992 
997 
998  if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999  set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1000 
1001  (dr + n)->dev_id = hdev->id;
1002  (dr + n)->dev_opt = hdev->flags;
1003 
1004  if (++n >= dev_num)
1005  break;
1006  }
1008 
1009  dl->dev_num = n;
1010  size = sizeof(*dl) + n * sizeof(*dr);
1011 
1012  err = copy_to_user(arg, dl, size);
1013  kfree(dl);
1014 
1015  return err ? -EFAULT : 0;
1016 }
1017 
1018 int hci_get_dev_info(void __user *arg)
1019 {
1020  struct hci_dev *hdev;
1021  struct hci_dev_info di;
1022  int err = 0;
1023 
1024  if (copy_from_user(&di, arg, sizeof(di)))
1025  return -EFAULT;
1026 
1027  hdev = hci_dev_get(di.dev_id);
1028  if (!hdev)
1029  return -ENODEV;
1030 
1033 
1034  if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035  set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1036 
1037  strcpy(di.name, hdev->name);
1038  di.bdaddr = hdev->bdaddr;
1039  di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040  di.flags = hdev->flags;
1041  di.pkt_type = hdev->pkt_type;
1042  di.acl_mtu = hdev->acl_mtu;
1043  di.acl_pkts = hdev->acl_pkts;
1044  di.sco_mtu = hdev->sco_mtu;
1045  di.sco_pkts = hdev->sco_pkts;
1046  di.link_policy = hdev->link_policy;
1047  di.link_mode = hdev->link_mode;
1048 
1049  memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050  memcpy(&di.features, &hdev->features, sizeof(di.features));
1051 
1052  if (copy_to_user(arg, &di, sizeof(di)))
1053  err = -EFAULT;
1054 
1055  hci_dev_put(hdev);
1056 
1057  return err;
1058 }
1059 
1060 /* ---- Interface to HCI drivers ---- */
1061 
1062 static int hci_rfkill_set_block(void *data, bool blocked)
1063 {
1064  struct hci_dev *hdev = data;
1065 
1066  BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067 
1068  if (!blocked)
1069  return 0;
1070 
1071  hci_dev_do_close(hdev);
1072 
1073  return 0;
1074 }
1075 
1076 static const struct rfkill_ops hci_rfkill_ops = {
1077  .set_block = hci_rfkill_set_block,
1078 };
1079 
1080 static void hci_power_on(struct work_struct *work)
1081 {
1082  struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083 
1084  BT_DBG("%s", hdev->name);
1085 
1086  if (hci_dev_open(hdev->id) < 0)
1087  return;
1088 
1089  if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1091 
1092  if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1093  mgmt_index_added(hdev);
1094 }
1095 
1096 static void hci_power_off(struct work_struct *work)
1097 {
1098  struct hci_dev *hdev = container_of(work, struct hci_dev,
1099  power_off.work);
1100 
1101  BT_DBG("%s", hdev->name);
1102 
1103  hci_dev_do_close(hdev);
1104 }
1105 
1106 static void hci_discov_off(struct work_struct *work)
1107 {
1108  struct hci_dev *hdev;
1109  u8 scan = SCAN_PAGE;
1110 
1111  hdev = container_of(work, struct hci_dev, discov_off.work);
1112 
1113  BT_DBG("%s", hdev->name);
1114 
1115  hci_dev_lock(hdev);
1116 
1117  hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1118 
1119  hdev->discov_timeout = 0;
1120 
1121  hci_dev_unlock(hdev);
1122 }
1123 
1124 int hci_uuids_clear(struct hci_dev *hdev)
1125 {
1126  struct list_head *p, *n;
1127 
1128  list_for_each_safe(p, n, &hdev->uuids) {
1129  struct bt_uuid *uuid;
1130 
1131  uuid = list_entry(p, struct bt_uuid, list);
1132 
1133  list_del(p);
1134  kfree(uuid);
1135  }
1136 
1137  return 0;
1138 }
1139 
1140 int hci_link_keys_clear(struct hci_dev *hdev)
1141 {
1142  struct list_head *p, *n;
1143 
1144  list_for_each_safe(p, n, &hdev->link_keys) {
1145  struct link_key *key;
1146 
1147  key = list_entry(p, struct link_key, list);
1148 
1149  list_del(p);
1150  kfree(key);
1151  }
1152 
1153  return 0;
1154 }
1155 
1156 int hci_smp_ltks_clear(struct hci_dev *hdev)
1157 {
1158  struct smp_ltk *k, *tmp;
1159 
1160  list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1161  list_del(&k->list);
1162  kfree(k);
1163  }
1164 
1165  return 0;
1166 }
1167 
1168 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1169 {
1170  struct link_key *k;
1171 
1172  list_for_each_entry(k, &hdev->link_keys, list)
1173  if (bacmp(bdaddr, &k->bdaddr) == 0)
1174  return k;
1175 
1176  return NULL;
1177 }
1178 
1179 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1180  u8 key_type, u8 old_key_type)
1181 {
1182  /* Legacy key */
1183  if (key_type < 0x03)
1184  return true;
1185 
1186  /* Debug keys are insecure so don't store them persistently */
1187  if (key_type == HCI_LK_DEBUG_COMBINATION)
1188  return false;
1189 
1190  /* Changed combination key and there's no previous one */
1191  if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1192  return false;
1193 
1194  /* Security mode 3 case */
1195  if (!conn)
1196  return true;
1197 
1198  /* Neither local nor remote side had no-bonding as requirement */
1199  if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1200  return true;
1201 
1202  /* Local side had dedicated bonding as requirement */
1203  if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1204  return true;
1205 
1206  /* Remote side had dedicated bonding as requirement */
1207  if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1208  return true;
1209 
1210  /* If none of the above criteria match, then don't store the key
1211  * persistently */
1212  return false;
1213 }
1214 
1215 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1216 {
1217  struct smp_ltk *k;
1218 
1220  if (k->ediv != ediv ||
1221  memcmp(rand, k->rand, sizeof(k->rand)))
1222  continue;
1223 
1224  return k;
1225  }
1226 
1227  return NULL;
1228 }
1229 
1230 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1231  u8 addr_type)
1232 {
1233  struct smp_ltk *k;
1234 
1236  if (addr_type == k->bdaddr_type &&
1237  bacmp(bdaddr, &k->bdaddr) == 0)
1238  return k;
1239 
1240  return NULL;
1241 }
1242 
1243 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1244  bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1245 {
1246  struct link_key *key, *old_key;
1247  u8 old_key_type;
1248  bool persistent;
1249 
1250  old_key = hci_find_link_key(hdev, bdaddr);
1251  if (old_key) {
1252  old_key_type = old_key->type;
1253  key = old_key;
1254  } else {
1255  old_key_type = conn ? conn->key_type : 0xff;
1256  key = kzalloc(sizeof(*key), GFP_ATOMIC);
1257  if (!key)
1258  return -ENOMEM;
1259  list_add(&key->list, &hdev->link_keys);
1260  }
1261 
1262  BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1263 
1264  /* Some buggy controller combinations generate a changed
1265  * combination key for legacy pairing even when there's no
1266  * previous key */
1267  if (type == HCI_LK_CHANGED_COMBINATION &&
1268  (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1269  type = HCI_LK_COMBINATION;
1270  if (conn)
1271  conn->key_type = type;
1272  }
1273 
1274  bacpy(&key->bdaddr, bdaddr);
1275  memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1276  key->pin_len = pin_len;
1277 
1278  if (type == HCI_LK_CHANGED_COMBINATION)
1279  key->type = old_key_type;
1280  else
1281  key->type = type;
1282 
1283  if (!new_key)
1284  return 0;
1285 
1286  persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1287 
1288  mgmt_new_link_key(hdev, key, persistent);
1289 
1290  if (conn)
1291  conn->flush_key = !persistent;
1292 
1293  return 0;
1294 }
1295 
1296 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1297  int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1298  ediv, u8 rand[8])
1299 {
1300  struct smp_ltk *key, *old_key;
1301 
1302  if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1303  return 0;
1304 
1305  old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1306  if (old_key)
1307  key = old_key;
1308  else {
1309  key = kzalloc(sizeof(*key), GFP_ATOMIC);
1310  if (!key)
1311  return -ENOMEM;
1312  list_add(&key->list, &hdev->long_term_keys);
1313  }
1314 
1315  bacpy(&key->bdaddr, bdaddr);
1316  key->bdaddr_type = addr_type;
1317  memcpy(key->val, tk, sizeof(key->val));
1319  key->ediv = ediv;
1320  key->enc_size = enc_size;
1321  key->type = type;
1322  memcpy(key->rand, rand, sizeof(key->rand));
1323 
1324  if (!new_key)
1325  return 0;
1326 
1327  if (type & HCI_SMP_LTK)
1328  mgmt_new_ltk(hdev, key, 1);
1329 
1330  return 0;
1331 }
1332 
1333 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334 {
1335  struct link_key *key;
1336 
1337  key = hci_find_link_key(hdev, bdaddr);
1338  if (!key)
1339  return -ENOENT;
1340 
1341  BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1342 
1343  list_del(&key->list);
1344  kfree(key);
1345 
1346  return 0;
1347 }
1348 
1349 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350 {
1351  struct smp_ltk *k, *tmp;
1352 
1353  list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1354  if (bacmp(bdaddr, &k->bdaddr))
1355  continue;
1356 
1357  BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1358 
1359  list_del(&k->list);
1360  kfree(k);
1361  }
1362 
1363  return 0;
1364 }
1365 
1366 /* HCI command timer function */
1367 static void hci_cmd_timeout(unsigned long arg)
1368 {
1369  struct hci_dev *hdev = (void *) arg;
1370 
1371  if (hdev->sent_cmd) {
1372  struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1373  u16 opcode = __le16_to_cpu(sent->opcode);
1374 
1375  BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1376  } else {
1377  BT_ERR("%s command tx timeout", hdev->name);
1378  }
1379 
1380  atomic_set(&hdev->cmd_cnt, 1);
1381  queue_work(hdev->workqueue, &hdev->cmd_work);
1382 }
1383 
1385  bdaddr_t *bdaddr)
1386 {
1387  struct oob_data *data;
1388 
1390  if (bacmp(bdaddr, &data->bdaddr) == 0)
1391  return data;
1392 
1393  return NULL;
1394 }
1395 
1396 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397 {
1398  struct oob_data *data;
1399 
1400  data = hci_find_remote_oob_data(hdev, bdaddr);
1401  if (!data)
1402  return -ENOENT;
1403 
1404  BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1405 
1406  list_del(&data->list);
1407  kfree(data);
1408 
1409  return 0;
1410 }
1411 
1413 {
1414  struct oob_data *data, *n;
1415 
1416  list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1417  list_del(&data->list);
1418  kfree(data);
1419  }
1420 
1421  return 0;
1422 }
1423 
1424 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1425  u8 *randomizer)
1426 {
1427  struct oob_data *data;
1428 
1429  data = hci_find_remote_oob_data(hdev, bdaddr);
1430 
1431  if (!data) {
1432  data = kmalloc(sizeof(*data), GFP_ATOMIC);
1433  if (!data)
1434  return -ENOMEM;
1435 
1436  bacpy(&data->bdaddr, bdaddr);
1437  list_add(&data->list, &hdev->remote_oob_data);
1438  }
1439 
1440  memcpy(data->hash, hash, sizeof(data->hash));
1441  memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1442 
1443  BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1444 
1445  return 0;
1446 }
1447 
1448 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1449 {
1450  struct bdaddr_list *b;
1451 
1452  list_for_each_entry(b, &hdev->blacklist, list)
1453  if (bacmp(bdaddr, &b->bdaddr) == 0)
1454  return b;
1455 
1456  return NULL;
1457 }
1458 
1459 int hci_blacklist_clear(struct hci_dev *hdev)
1460 {
1461  struct list_head *p, *n;
1462 
1463  list_for_each_safe(p, n, &hdev->blacklist) {
1464  struct bdaddr_list *b;
1465 
1466  b = list_entry(p, struct bdaddr_list, list);
1467 
1468  list_del(p);
1469  kfree(b);
1470  }
1471 
1472  return 0;
1473 }
1474 
1475 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1476 {
1477  struct bdaddr_list *entry;
1478 
1479  if (bacmp(bdaddr, BDADDR_ANY) == 0)
1480  return -EBADF;
1481 
1482  if (hci_blacklist_lookup(hdev, bdaddr))
1483  return -EEXIST;
1484 
1485  entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1486  if (!entry)
1487  return -ENOMEM;
1488 
1489  bacpy(&entry->bdaddr, bdaddr);
1490 
1491  list_add(&entry->list, &hdev->blacklist);
1492 
1493  return mgmt_device_blocked(hdev, bdaddr, type);
1494 }
1495 
1496 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1497 {
1498  struct bdaddr_list *entry;
1499 
1500  if (bacmp(bdaddr, BDADDR_ANY) == 0)
1501  return hci_blacklist_clear(hdev);
1502 
1503  entry = hci_blacklist_lookup(hdev, bdaddr);
1504  if (!entry)
1505  return -ENOENT;
1506 
1507  list_del(&entry->list);
1508  kfree(entry);
1509 
1510  return mgmt_device_unblocked(hdev, bdaddr, type);
1511 }
1512 
1513 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1514 {
1515  struct le_scan_params *param = (struct le_scan_params *) opt;
1517 
1518  memset(&cp, 0, sizeof(cp));
1519  cp.type = param->type;
1520  cp.interval = cpu_to_le16(param->interval);
1521  cp.window = cpu_to_le16(param->window);
1522 
1523  hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1524 }
1525 
1526 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1527 {
1529 
1530  memset(&cp, 0, sizeof(cp));
1531  cp.enable = 1;
1532  cp.filter_dup = 1;
1533 
1534  hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1535 }
1536 
1537 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1538  u16 window, int timeout)
1539 {
1540  long timeo = msecs_to_jiffies(3000);
1541  struct le_scan_params param;
1542  int err;
1543 
1544  BT_DBG("%s", hdev->name);
1545 
1546  if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1547  return -EINPROGRESS;
1548 
1549  param.type = type;
1550  param.interval = interval;
1551  param.window = window;
1552 
1553  hci_req_lock(hdev);
1554 
1555  err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1556  timeo);
1557  if (!err)
1558  err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1559 
1560  hci_req_unlock(hdev);
1561 
1562  if (err < 0)
1563  return err;
1564 
1566  msecs_to_jiffies(timeout));
1567 
1568  return 0;
1569 }
1570 
1571 int hci_cancel_le_scan(struct hci_dev *hdev)
1572 {
1573  BT_DBG("%s", hdev->name);
1574 
1575  if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1576  return -EALREADY;
1577 
1578  if (cancel_delayed_work(&hdev->le_scan_disable)) {
1579  struct hci_cp_le_set_scan_enable cp;
1580 
1581  /* Send HCI command to disable LE Scan */
1582  memset(&cp, 0, sizeof(cp));
1583  hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1584  }
1585 
1586  return 0;
1587 }
1588 
1589 static void le_scan_disable_work(struct work_struct *work)
1590 {
1591  struct hci_dev *hdev = container_of(work, struct hci_dev,
1592  le_scan_disable.work);
1594 
1595  BT_DBG("%s", hdev->name);
1596 
1597  memset(&cp, 0, sizeof(cp));
1598 
1599  hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600 }
1601 
1602 static void le_scan_work(struct work_struct *work)
1603 {
1604  struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1605  struct le_scan_params *param = &hdev->le_scan_params;
1606 
1607  BT_DBG("%s", hdev->name);
1608 
1609  hci_do_le_scan(hdev, param->type, param->interval, param->window,
1610  param->timeout);
1611 }
1612 
1613 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1614  int timeout)
1615 {
1616  struct le_scan_params *param = &hdev->le_scan_params;
1617 
1618  BT_DBG("%s", hdev->name);
1619 
1620  if (work_busy(&hdev->le_scan))
1621  return -EINPROGRESS;
1622 
1623  param->type = type;
1624  param->interval = interval;
1625  param->window = window;
1626  param->timeout = timeout;
1627 
1629 
1630  return 0;
1631 }
1632 
1633 /* Alloc HCI device */
1634 struct hci_dev *hci_alloc_dev(void)
1635 {
1636  struct hci_dev *hdev;
1637 
1638  hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1639  if (!hdev)
1640  return NULL;
1641 
1642  hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1643  hdev->esco_type = (ESCO_HV1);
1644  hdev->link_mode = (HCI_LM_ACCEPT);
1645  hdev->io_capability = 0x03; /* No Input No Output */
1646 
1647  hdev->sniff_max_interval = 800;
1648  hdev->sniff_min_interval = 80;
1649 
1650  mutex_init(&hdev->lock);
1651  mutex_init(&hdev->req_lock);
1652 
1653  INIT_LIST_HEAD(&hdev->mgmt_pending);
1654  INIT_LIST_HEAD(&hdev->blacklist);
1655  INIT_LIST_HEAD(&hdev->uuids);
1656  INIT_LIST_HEAD(&hdev->link_keys);
1657  INIT_LIST_HEAD(&hdev->long_term_keys);
1658  INIT_LIST_HEAD(&hdev->remote_oob_data);
1659  INIT_LIST_HEAD(&hdev->conn_hash.list);
1660 
1661  INIT_WORK(&hdev->rx_work, hci_rx_work);
1662  INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1663  INIT_WORK(&hdev->tx_work, hci_tx_work);
1664  INIT_WORK(&hdev->power_on, hci_power_on);
1665  INIT_WORK(&hdev->le_scan, le_scan_work);
1666 
1667  INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1668  INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1669  INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1670 
1671  skb_queue_head_init(&hdev->driver_init);
1672  skb_queue_head_init(&hdev->rx_q);
1673  skb_queue_head_init(&hdev->cmd_q);
1674  skb_queue_head_init(&hdev->raw_q);
1675 
1677 
1678  setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1679 
1680  hci_init_sysfs(hdev);
1681  discovery_init(hdev);
1682 
1683  return hdev;
1684 }
1686 
1687 /* Free HCI device */
1688 void hci_free_dev(struct hci_dev *hdev)
1689 {
1690  skb_queue_purge(&hdev->driver_init);
1691 
1692  /* will free via device release */
1693  put_device(&hdev->dev);
1694 }
1696 
1697 /* Register HCI device */
1698 int hci_register_dev(struct hci_dev *hdev)
1699 {
1700  int id, error;
1701 
1702  if (!hdev->open || !hdev->close)
1703  return -EINVAL;
1704 
1705  /* Do not allow HCI_AMP devices to register at index 0,
1706  * so the index can be used as the AMP controller ID.
1707  */
1708  switch (hdev->dev_type) {
1709  case HCI_BREDR:
1710  id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1711  break;
1712  case HCI_AMP:
1713  id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1714  break;
1715  default:
1716  return -EINVAL;
1717  }
1718 
1719  if (id < 0)
1720  return id;
1721 
1722  sprintf(hdev->name, "hci%d", id);
1723  hdev->id = id;
1724 
1725  BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1726 
1728  list_add(&hdev->list, &hci_dev_list);
1730 
1732  WQ_MEM_RECLAIM, 1);
1733  if (!hdev->workqueue) {
1734  error = -ENOMEM;
1735  goto err;
1736  }
1737 
1738  error = hci_add_sysfs(hdev);
1739  if (error < 0)
1740  goto err_wqueue;
1741 
1742  hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1743  RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1744  hdev);
1745  if (hdev->rfkill) {
1746  if (rfkill_register(hdev->rfkill) < 0) {
1747  rfkill_destroy(hdev->rfkill);
1748  hdev->rfkill = NULL;
1749  }
1750  }
1751 
1752  set_bit(HCI_SETUP, &hdev->dev_flags);
1753 
1754  if (hdev->dev_type != HCI_AMP)
1755  set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1756 
1757  hci_notify(hdev, HCI_DEV_REG);
1758  hci_dev_hold(hdev);
1759 
1760  schedule_work(&hdev->power_on);
1761 
1762  return id;
1763 
1764 err_wqueue:
1766 err:
1767  ida_simple_remove(&hci_index_ida, hdev->id);
1769  list_del(&hdev->list);
1771 
1772  return error;
1773 }
1775 
1776 /* Unregister HCI device */
1777 void hci_unregister_dev(struct hci_dev *hdev)
1778 {
1779  int i, id;
1780 
1781  BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1782 
1783  set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1784 
1785  id = hdev->id;
1786 
1788  list_del(&hdev->list);
1790 
1791  hci_dev_do_close(hdev);
1792 
1793  for (i = 0; i < NUM_REASSEMBLY; i++)
1794  kfree_skb(hdev->reassembly[i]);
1795 
1796  if (!test_bit(HCI_INIT, &hdev->flags) &&
1797  !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1798  hci_dev_lock(hdev);
1799  mgmt_index_removed(hdev);
1800  hci_dev_unlock(hdev);
1801  }
1802 
1803  /* mgmt_index_removed should take care of emptying the
1804  * pending list */
1805  BUG_ON(!list_empty(&hdev->mgmt_pending));
1806 
1807  hci_notify(hdev, HCI_DEV_UNREG);
1808 
1809  if (hdev->rfkill) {
1810  rfkill_unregister(hdev->rfkill);
1811  rfkill_destroy(hdev->rfkill);
1812  }
1813 
1814  hci_del_sysfs(hdev);
1815 
1817 
1818  hci_dev_lock(hdev);
1819  hci_blacklist_clear(hdev);
1820  hci_uuids_clear(hdev);
1821  hci_link_keys_clear(hdev);
1822  hci_smp_ltks_clear(hdev);
1824  hci_dev_unlock(hdev);
1825 
1826  hci_dev_put(hdev);
1827 
1828  ida_simple_remove(&hci_index_ida, id);
1829 }
1831 
1832 /* Suspend HCI device */
1833 int hci_suspend_dev(struct hci_dev *hdev)
1834 {
1835  hci_notify(hdev, HCI_DEV_SUSPEND);
1836  return 0;
1837 }
1839 
1840 /* Resume HCI device */
1841 int hci_resume_dev(struct hci_dev *hdev)
1842 {
1843  hci_notify(hdev, HCI_DEV_RESUME);
1844  return 0;
1845 }
1847 
1848 /* Receive frame from HCI drivers */
1849 int hci_recv_frame(struct sk_buff *skb)
1850 {
1851  struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852  if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1853  && !test_bit(HCI_INIT, &hdev->flags))) {
1854  kfree_skb(skb);
1855  return -ENXIO;
1856  }
1857 
1858  /* Incomming skb */
1859  bt_cb(skb)->incoming = 1;
1860 
1861  /* Time stamp */
1862  __net_timestamp(skb);
1863 
1864  skb_queue_tail(&hdev->rx_q, skb);
1865  queue_work(hdev->workqueue, &hdev->rx_work);
1866 
1867  return 0;
1868 }
1870 
1871 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1872  int count, __u8 index)
1873 {
1874  int len = 0;
1875  int hlen = 0;
1876  int remain = count;
1877  struct sk_buff *skb;
1878  struct bt_skb_cb *scb;
1879 
1881  index >= NUM_REASSEMBLY)
1882  return -EILSEQ;
1883 
1884  skb = hdev->reassembly[index];
1885 
1886  if (!skb) {
1887  switch (type) {
1888  case HCI_ACLDATA_PKT:
1889  len = HCI_MAX_FRAME_SIZE;
1890  hlen = HCI_ACL_HDR_SIZE;
1891  break;
1892  case HCI_EVENT_PKT:
1893  len = HCI_MAX_EVENT_SIZE;
1894  hlen = HCI_EVENT_HDR_SIZE;
1895  break;
1896  case HCI_SCODATA_PKT:
1897  len = HCI_MAX_SCO_SIZE;
1898  hlen = HCI_SCO_HDR_SIZE;
1899  break;
1900  }
1901 
1902  skb = bt_skb_alloc(len, GFP_ATOMIC);
1903  if (!skb)
1904  return -ENOMEM;
1905 
1906  scb = (void *) skb->cb;
1907  scb->expect = hlen;
1908  scb->pkt_type = type;
1909 
1910  skb->dev = (void *) hdev;
1911  hdev->reassembly[index] = skb;
1912  }
1913 
1914  while (count) {
1915  scb = (void *) skb->cb;
1916  len = min_t(uint, scb->expect, count);
1917 
1918  memcpy(skb_put(skb, len), data, len);
1919 
1920  count -= len;
1921  data += len;
1922  scb->expect -= len;
1923  remain = count;
1924 
1925  switch (type) {
1926  case HCI_EVENT_PKT:
1927  if (skb->len == HCI_EVENT_HDR_SIZE) {
1928  struct hci_event_hdr *h = hci_event_hdr(skb);
1929  scb->expect = h->plen;
1930 
1931  if (skb_tailroom(skb) < scb->expect) {
1932  kfree_skb(skb);
1933  hdev->reassembly[index] = NULL;
1934  return -ENOMEM;
1935  }
1936  }
1937  break;
1938 
1939  case HCI_ACLDATA_PKT:
1940  if (skb->len == HCI_ACL_HDR_SIZE) {
1941  struct hci_acl_hdr *h = hci_acl_hdr(skb);
1942  scb->expect = __le16_to_cpu(h->dlen);
1943 
1944  if (skb_tailroom(skb) < scb->expect) {
1945  kfree_skb(skb);
1946  hdev->reassembly[index] = NULL;
1947  return -ENOMEM;
1948  }
1949  }
1950  break;
1951 
1952  case HCI_SCODATA_PKT:
1953  if (skb->len == HCI_SCO_HDR_SIZE) {
1954  struct hci_sco_hdr *h = hci_sco_hdr(skb);
1955  scb->expect = h->dlen;
1956 
1957  if (skb_tailroom(skb) < scb->expect) {
1958  kfree_skb(skb);
1959  hdev->reassembly[index] = NULL;
1960  return -ENOMEM;
1961  }
1962  }
1963  break;
1964  }
1965 
1966  if (scb->expect == 0) {
1967  /* Complete frame */
1968 
1969  bt_cb(skb)->pkt_type = type;
1970  hci_recv_frame(skb);
1971 
1972  hdev->reassembly[index] = NULL;
1973  return remain;
1974  }
1975  }
1976 
1977  return remain;
1978 }
1979 
1980 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1981 {
1982  int rem = 0;
1983 
1984  if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1985  return -EILSEQ;
1986 
1987  while (count) {
1988  rem = hci_reassembly(hdev, type, data, count, type - 1);
1989  if (rem < 0)
1990  return rem;
1991 
1992  data += (count - rem);
1993  count = rem;
1994  }
1995 
1996  return rem;
1997 }
1999 
2000 #define STREAM_REASSEMBLY 0
2001 
2002 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2003 {
2004  int type;
2005  int rem = 0;
2006 
2007  while (count) {
2008  struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2009 
2010  if (!skb) {
2011  struct { char type; } *pkt;
2012 
2013  /* Start of the frame */
2014  pkt = data;
2015  type = pkt->type;
2016 
2017  data++;
2018  count--;
2019  } else
2020  type = bt_cb(skb)->pkt_type;
2021 
2022  rem = hci_reassembly(hdev, type, data, count,
2024  if (rem < 0)
2025  return rem;
2026 
2027  data += (count - rem);
2028  count = rem;
2029  }
2030 
2031  return rem;
2032 }
2034 
2035 /* ---- Interface to upper protocols ---- */
2036 
2038 {
2039  BT_DBG("%p name %s", cb, cb->name);
2040 
2042  list_add(&cb->list, &hci_cb_list);
2044 
2045  return 0;
2046 }
2048 
2050 {
2051  BT_DBG("%p name %s", cb, cb->name);
2052 
2054  list_del(&cb->list);
2056 
2057  return 0;
2058 }
2060 
2061 static int hci_send_frame(struct sk_buff *skb)
2062 {
2063  struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2064 
2065  if (!hdev) {
2066  kfree_skb(skb);
2067  return -ENODEV;
2068  }
2069 
2070  BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2071 
2072  /* Time stamp */
2073  __net_timestamp(skb);
2074 
2075  /* Send copy to monitor */
2076  hci_send_to_monitor(hdev, skb);
2077 
2078  if (atomic_read(&hdev->promisc)) {
2079  /* Send copy to the sockets */
2080  hci_send_to_sock(hdev, skb);
2081  }
2082 
2083  /* Get rid of skb owner, prior to sending to the driver. */
2084  skb_orphan(skb);
2085 
2086  return hdev->send(skb);
2087 }
2088 
2089 /* Send HCI command */
2090 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2091 {
2092  int len = HCI_COMMAND_HDR_SIZE + plen;
2093  struct hci_command_hdr *hdr;
2094  struct sk_buff *skb;
2095 
2096  BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2097 
2098  skb = bt_skb_alloc(len, GFP_ATOMIC);
2099  if (!skb) {
2100  BT_ERR("%s no memory for command", hdev->name);
2101  return -ENOMEM;
2102  }
2103 
2104  hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2105  hdr->opcode = cpu_to_le16(opcode);
2106  hdr->plen = plen;
2107 
2108  if (plen)
2109  memcpy(skb_put(skb, plen), param, plen);
2110 
2111  BT_DBG("skb len %d", skb->len);
2112 
2113  bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2114  skb->dev = (void *) hdev;
2115 
2116  if (test_bit(HCI_INIT, &hdev->flags))
2117  hdev->init_last_cmd = opcode;
2118 
2119  skb_queue_tail(&hdev->cmd_q, skb);
2120  queue_work(hdev->workqueue, &hdev->cmd_work);
2121 
2122  return 0;
2123 }
2124 
2125 /* Get data from the previously sent command */
2127 {
2128  struct hci_command_hdr *hdr;
2129 
2130  if (!hdev->sent_cmd)
2131  return NULL;
2132 
2133  hdr = (void *) hdev->sent_cmd->data;
2134 
2135  if (hdr->opcode != cpu_to_le16(opcode))
2136  return NULL;
2137 
2138  BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2139 
2140  return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2141 }
2142 
2143 /* Send ACL data */
2144 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2145 {
2146  struct hci_acl_hdr *hdr;
2147  int len = skb->len;
2148 
2149  skb_push(skb, HCI_ACL_HDR_SIZE);
2150  skb_reset_transport_header(skb);
2151  hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2152  hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2153  hdr->dlen = cpu_to_le16(len);
2154 }
2155 
2156 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2157  struct sk_buff *skb, __u16 flags)
2158 {
2159  struct hci_dev *hdev = conn->hdev;
2160  struct sk_buff *list;
2161 
2162  skb->len = skb_headlen(skb);
2163  skb->data_len = 0;
2164 
2165  bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2166  hci_add_acl_hdr(skb, conn->handle, flags);
2167 
2168  list = skb_shinfo(skb)->frag_list;
2169  if (!list) {
2170  /* Non fragmented */
2171  BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2172 
2173  skb_queue_tail(queue, skb);
2174  } else {
2175  /* Fragmented */
2176  BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2177 
2178  skb_shinfo(skb)->frag_list = NULL;
2179 
2180  /* Queue all fragments atomically */
2181  spin_lock(&queue->lock);
2182 
2183  __skb_queue_tail(queue, skb);
2184 
2185  flags &= ~ACL_START;
2186  flags |= ACL_CONT;
2187  do {
2188  skb = list; list = list->next;
2189 
2190  skb->dev = (void *) hdev;
2191  bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2192  hci_add_acl_hdr(skb, conn->handle, flags);
2193 
2194  BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2195 
2196  __skb_queue_tail(queue, skb);
2197  } while (list);
2198 
2199  spin_unlock(&queue->lock);
2200  }
2201 }
2202 
2203 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2204 {
2205  struct hci_conn *conn = chan->conn;
2206  struct hci_dev *hdev = conn->hdev;
2207 
2208  BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2209 
2210  skb->dev = (void *) hdev;
2211 
2212  hci_queue_acl(conn, &chan->data_q, skb, flags);
2213 
2214  queue_work(hdev->workqueue, &hdev->tx_work);
2215 }
2216 
2217 /* Send SCO data */
2218 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2219 {
2220  struct hci_dev *hdev = conn->hdev;
2221  struct hci_sco_hdr hdr;
2222 
2223  BT_DBG("%s len %d", hdev->name, skb->len);
2224 
2225  hdr.handle = cpu_to_le16(conn->handle);
2226  hdr.dlen = skb->len;
2227 
2228  skb_push(skb, HCI_SCO_HDR_SIZE);
2229  skb_reset_transport_header(skb);
2230  memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2231 
2232  skb->dev = (void *) hdev;
2233  bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2234 
2235  skb_queue_tail(&conn->data_q, skb);
2236  queue_work(hdev->workqueue, &hdev->tx_work);
2237 }
2238 
2239 /* ---- HCI TX task (outgoing data) ---- */
2240 
2241 /* HCI Connection scheduler */
2242 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2243  int *quote)
2244 {
2245  struct hci_conn_hash *h = &hdev->conn_hash;
2246  struct hci_conn *conn = NULL, *c;
2247  unsigned int num = 0, min = ~0;
2248 
2249  /* We don't have to lock device here. Connections are always
2250  * added and removed with TX task disabled. */
2251 
2252  rcu_read_lock();
2253 
2254  list_for_each_entry_rcu(c, &h->list, list) {
2255  if (c->type != type || skb_queue_empty(&c->data_q))
2256  continue;
2257 
2258  if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2259  continue;
2260 
2261  num++;
2262 
2263  if (c->sent < min) {
2264  min = c->sent;
2265  conn = c;
2266  }
2267 
2268  if (hci_conn_num(hdev, type) == num)
2269  break;
2270  }
2271 
2272  rcu_read_unlock();
2273 
2274  if (conn) {
2275  int cnt, q;
2276 
2277  switch (conn->type) {
2278  case ACL_LINK:
2279  cnt = hdev->acl_cnt;
2280  break;
2281  case SCO_LINK:
2282  case ESCO_LINK:
2283  cnt = hdev->sco_cnt;
2284  break;
2285  case LE_LINK:
2286  cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2287  break;
2288  default:
2289  cnt = 0;
2290  BT_ERR("Unknown link type");
2291  }
2292 
2293  q = cnt / num;
2294  *quote = q ? q : 1;
2295  } else
2296  *quote = 0;
2297 
2298  BT_DBG("conn %p quote %d", conn, *quote);
2299  return conn;
2300 }
2301 
2302 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2303 {
2304  struct hci_conn_hash *h = &hdev->conn_hash;
2305  struct hci_conn *c;
2306 
2307  BT_ERR("%s link tx timeout", hdev->name);
2308 
2309  rcu_read_lock();
2310 
2311  /* Kill stalled connections */
2312  list_for_each_entry_rcu(c, &h->list, list) {
2313  if (c->type == type && c->sent) {
2314  BT_ERR("%s killing stalled connection %s",
2315  hdev->name, batostr(&c->dst));
2317  }
2318  }
2319 
2320  rcu_read_unlock();
2321 }
2322 
2323 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2324  int *quote)
2325 {
2326  struct hci_conn_hash *h = &hdev->conn_hash;
2327  struct hci_chan *chan = NULL;
2328  unsigned int num = 0, min = ~0, cur_prio = 0;
2329  struct hci_conn *conn;
2330  int cnt, q, conn_num = 0;
2331 
2332  BT_DBG("%s", hdev->name);
2333 
2334  rcu_read_lock();
2335 
2336  list_for_each_entry_rcu(conn, &h->list, list) {
2337  struct hci_chan *tmp;
2338 
2339  if (conn->type != type)
2340  continue;
2341 
2342  if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2343  continue;
2344 
2345  conn_num++;
2346 
2347  list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2348  struct sk_buff *skb;
2349 
2350  if (skb_queue_empty(&tmp->data_q))
2351  continue;
2352 
2353  skb = skb_peek(&tmp->data_q);
2354  if (skb->priority < cur_prio)
2355  continue;
2356 
2357  if (skb->priority > cur_prio) {
2358  num = 0;
2359  min = ~0;
2360  cur_prio = skb->priority;
2361  }
2362 
2363  num++;
2364 
2365  if (conn->sent < min) {
2366  min = conn->sent;
2367  chan = tmp;
2368  }
2369  }
2370 
2371  if (hci_conn_num(hdev, type) == conn_num)
2372  break;
2373  }
2374 
2375  rcu_read_unlock();
2376 
2377  if (!chan)
2378  return NULL;
2379 
2380  switch (chan->conn->type) {
2381  case ACL_LINK:
2382  cnt = hdev->acl_cnt;
2383  break;
2384  case SCO_LINK:
2385  case ESCO_LINK:
2386  cnt = hdev->sco_cnt;
2387  break;
2388  case LE_LINK:
2389  cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2390  break;
2391  default:
2392  cnt = 0;
2393  BT_ERR("Unknown link type");
2394  }
2395 
2396  q = cnt / num;
2397  *quote = q ? q : 1;
2398  BT_DBG("chan %p quote %d", chan, *quote);
2399  return chan;
2400 }
2401 
2402 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2403 {
2404  struct hci_conn_hash *h = &hdev->conn_hash;
2405  struct hci_conn *conn;
2406  int num = 0;
2407 
2408  BT_DBG("%s", hdev->name);
2409 
2410  rcu_read_lock();
2411 
2412  list_for_each_entry_rcu(conn, &h->list, list) {
2413  struct hci_chan *chan;
2414 
2415  if (conn->type != type)
2416  continue;
2417 
2418  if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2419  continue;
2420 
2421  num++;
2422 
2423  list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2424  struct sk_buff *skb;
2425 
2426  if (chan->sent) {
2427  chan->sent = 0;
2428  continue;
2429  }
2430 
2431  if (skb_queue_empty(&chan->data_q))
2432  continue;
2433 
2434  skb = skb_peek(&chan->data_q);
2435  if (skb->priority >= HCI_PRIO_MAX - 1)
2436  continue;
2437 
2438  skb->priority = HCI_PRIO_MAX - 1;
2439 
2440  BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2441  skb->priority);
2442  }
2443 
2444  if (hci_conn_num(hdev, type) == num)
2445  break;
2446  }
2447 
2448  rcu_read_unlock();
2449 
2450 }
2451 
2452 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2453 {
2454  /* Calculate count of blocks used by this packet */
2455  return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2456 }
2457 
2458 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2459 {
2460  if (!test_bit(HCI_RAW, &hdev->flags)) {
2461  /* ACL tx timeout must be longer than maximum
2462  * link supervision timeout (40.9 seconds) */
2463  if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2465  hci_link_tx_to(hdev, ACL_LINK);
2466  }
2467 }
2468 
2469 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2470 {
2471  unsigned int cnt = hdev->acl_cnt;
2472  struct hci_chan *chan;
2473  struct sk_buff *skb;
2474  int quote;
2475 
2476  __check_timeout(hdev, cnt);
2477 
2478  while (hdev->acl_cnt &&
2479  (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2480  u32 priority = (skb_peek(&chan->data_q))->priority;
2481  while (quote-- && (skb = skb_peek(&chan->data_q))) {
2482  BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2483  skb->len, skb->priority);
2484 
2485  /* Stop if priority has changed */
2486  if (skb->priority < priority)
2487  break;
2488 
2489  skb = skb_dequeue(&chan->data_q);
2490 
2492  bt_cb(skb)->force_active);
2493 
2494  hci_send_frame(skb);
2495  hdev->acl_last_tx = jiffies;
2496 
2497  hdev->acl_cnt--;
2498  chan->sent++;
2499  chan->conn->sent++;
2500  }
2501  }
2502 
2503  if (cnt != hdev->acl_cnt)
2504  hci_prio_recalculate(hdev, ACL_LINK);
2505 }
2506 
2507 static void hci_sched_acl_blk(struct hci_dev *hdev)
2508 {
2509  unsigned int cnt = hdev->block_cnt;
2510  struct hci_chan *chan;
2511  struct sk_buff *skb;
2512  int quote;
2513 
2514  __check_timeout(hdev, cnt);
2515 
2516  while (hdev->block_cnt > 0 &&
2517  (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2518  u32 priority = (skb_peek(&chan->data_q))->priority;
2519  while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2520  int blocks;
2521 
2522  BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2523  skb->len, skb->priority);
2524 
2525  /* Stop if priority has changed */
2526  if (skb->priority < priority)
2527  break;
2528 
2529  skb = skb_dequeue(&chan->data_q);
2530 
2531  blocks = __get_blocks(hdev, skb);
2532  if (blocks > hdev->block_cnt)
2533  return;
2534 
2536  bt_cb(skb)->force_active);
2537 
2538  hci_send_frame(skb);
2539  hdev->acl_last_tx = jiffies;
2540 
2541  hdev->block_cnt -= blocks;
2542  quote -= blocks;
2543 
2544  chan->sent += blocks;
2545  chan->conn->sent += blocks;
2546  }
2547  }
2548 
2549  if (cnt != hdev->block_cnt)
2550  hci_prio_recalculate(hdev, ACL_LINK);
2551 }
2552 
2553 static void hci_sched_acl(struct hci_dev *hdev)
2554 {
2555  BT_DBG("%s", hdev->name);
2556 
2557  if (!hci_conn_num(hdev, ACL_LINK))
2558  return;
2559 
2560  switch (hdev->flow_ctl_mode) {
2562  hci_sched_acl_pkt(hdev);
2563  break;
2564 
2566  hci_sched_acl_blk(hdev);
2567  break;
2568  }
2569 }
2570 
2571 /* Schedule SCO */
2572 static void hci_sched_sco(struct hci_dev *hdev)
2573 {
2574  struct hci_conn *conn;
2575  struct sk_buff *skb;
2576  int quote;
2577 
2578  BT_DBG("%s", hdev->name);
2579 
2580  if (!hci_conn_num(hdev, SCO_LINK))
2581  return;
2582 
2583  while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2584  while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2585  BT_DBG("skb %p len %d", skb, skb->len);
2586  hci_send_frame(skb);
2587 
2588  conn->sent++;
2589  if (conn->sent == ~0)
2590  conn->sent = 0;
2591  }
2592  }
2593 }
2594 
2595 static void hci_sched_esco(struct hci_dev *hdev)
2596 {
2597  struct hci_conn *conn;
2598  struct sk_buff *skb;
2599  int quote;
2600 
2601  BT_DBG("%s", hdev->name);
2602 
2603  if (!hci_conn_num(hdev, ESCO_LINK))
2604  return;
2605 
2606  while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2607  &quote))) {
2608  while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2609  BT_DBG("skb %p len %d", skb, skb->len);
2610  hci_send_frame(skb);
2611 
2612  conn->sent++;
2613  if (conn->sent == ~0)
2614  conn->sent = 0;
2615  }
2616  }
2617 }
2618 
2619 static void hci_sched_le(struct hci_dev *hdev)
2620 {
2621  struct hci_chan *chan;
2622  struct sk_buff *skb;
2623  int quote, cnt, tmp;
2624 
2625  BT_DBG("%s", hdev->name);
2626 
2627  if (!hci_conn_num(hdev, LE_LINK))
2628  return;
2629 
2630  if (!test_bit(HCI_RAW, &hdev->flags)) {
2631  /* LE tx timeout must be longer than maximum
2632  * link supervision timeout (40.9 seconds) */
2633  if (!hdev->le_cnt && hdev->le_pkts &&
2634  time_after(jiffies, hdev->le_last_tx + HZ * 45))
2635  hci_link_tx_to(hdev, LE_LINK);
2636  }
2637 
2638  cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2639  tmp = cnt;
2640  while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2641  u32 priority = (skb_peek(&chan->data_q))->priority;
2642  while (quote-- && (skb = skb_peek(&chan->data_q))) {
2643  BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2644  skb->len, skb->priority);
2645 
2646  /* Stop if priority has changed */
2647  if (skb->priority < priority)
2648  break;
2649 
2650  skb = skb_dequeue(&chan->data_q);
2651 
2652  hci_send_frame(skb);
2653  hdev->le_last_tx = jiffies;
2654 
2655  cnt--;
2656  chan->sent++;
2657  chan->conn->sent++;
2658  }
2659  }
2660 
2661  if (hdev->le_pkts)
2662  hdev->le_cnt = cnt;
2663  else
2664  hdev->acl_cnt = cnt;
2665 
2666  if (cnt != tmp)
2667  hci_prio_recalculate(hdev, LE_LINK);
2668 }
2669 
2670 static void hci_tx_work(struct work_struct *work)
2671 {
2672  struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2673  struct sk_buff *skb;
2674 
2675  BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2676  hdev->sco_cnt, hdev->le_cnt);
2677 
2678  /* Schedule queues and send stuff to HCI driver */
2679 
2680  hci_sched_acl(hdev);
2681 
2682  hci_sched_sco(hdev);
2683 
2684  hci_sched_esco(hdev);
2685 
2686  hci_sched_le(hdev);
2687 
2688  /* Send next queued raw (unknown type) packet */
2689  while ((skb = skb_dequeue(&hdev->raw_q)))
2690  hci_send_frame(skb);
2691 }
2692 
2693 /* ----- HCI RX task (incoming data processing) ----- */
2694 
2695 /* ACL data packet */
2696 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2697 {
2698  struct hci_acl_hdr *hdr = (void *) skb->data;
2699  struct hci_conn *conn;
2700  __u16 handle, flags;
2701 
2702  skb_pull(skb, HCI_ACL_HDR_SIZE);
2703 
2704  handle = __le16_to_cpu(hdr->handle);
2705  flags = hci_flags(handle);
2706  handle = hci_handle(handle);
2707 
2708  BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2709  handle, flags);
2710 
2711  hdev->stat.acl_rx++;
2712 
2713  hci_dev_lock(hdev);
2714  conn = hci_conn_hash_lookup_handle(hdev, handle);
2715  hci_dev_unlock(hdev);
2716 
2717  if (conn) {
2719 
2720  hci_dev_lock(hdev);
2721  if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2723  mgmt_device_connected(hdev, &conn->dst, conn->type,
2724  conn->dst_type, 0, NULL, 0,
2725  conn->dev_class);
2726  hci_dev_unlock(hdev);
2727 
2728  /* Send to upper protocol */
2729  l2cap_recv_acldata(conn, skb, flags);
2730  return;
2731  } else {
2732  BT_ERR("%s ACL packet for unknown connection handle %d",
2733  hdev->name, handle);
2734  }
2735 
2736  kfree_skb(skb);
2737 }
2738 
2739 /* SCO data packet */
2740 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2741 {
2742  struct hci_sco_hdr *hdr = (void *) skb->data;
2743  struct hci_conn *conn;
2744  __u16 handle;
2745 
2746  skb_pull(skb, HCI_SCO_HDR_SIZE);
2747 
2748  handle = __le16_to_cpu(hdr->handle);
2749 
2750  BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2751 
2752  hdev->stat.sco_rx++;
2753 
2754  hci_dev_lock(hdev);
2755  conn = hci_conn_hash_lookup_handle(hdev, handle);
2756  hci_dev_unlock(hdev);
2757 
2758  if (conn) {
2759  /* Send to upper protocol */
2760  sco_recv_scodata(conn, skb);
2761  return;
2762  } else {
2763  BT_ERR("%s SCO packet for unknown connection handle %d",
2764  hdev->name, handle);
2765  }
2766 
2767  kfree_skb(skb);
2768 }
2769 
2770 static void hci_rx_work(struct work_struct *work)
2771 {
2772  struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2773  struct sk_buff *skb;
2774 
2775  BT_DBG("%s", hdev->name);
2776 
2777  while ((skb = skb_dequeue(&hdev->rx_q))) {
2778  /* Send copy to monitor */
2779  hci_send_to_monitor(hdev, skb);
2780 
2781  if (atomic_read(&hdev->promisc)) {
2782  /* Send copy to the sockets */
2783  hci_send_to_sock(hdev, skb);
2784  }
2785 
2786  if (test_bit(HCI_RAW, &hdev->flags)) {
2787  kfree_skb(skb);
2788  continue;
2789  }
2790 
2791  if (test_bit(HCI_INIT, &hdev->flags)) {
2792  /* Don't process data packets in this states. */
2793  switch (bt_cb(skb)->pkt_type) {
2794  case HCI_ACLDATA_PKT:
2795  case HCI_SCODATA_PKT:
2796  kfree_skb(skb);
2797  continue;
2798  }
2799  }
2800 
2801  /* Process frame */
2802  switch (bt_cb(skb)->pkt_type) {
2803  case HCI_EVENT_PKT:
2804  BT_DBG("%s Event packet", hdev->name);
2805  hci_event_packet(hdev, skb);
2806  break;
2807 
2808  case HCI_ACLDATA_PKT:
2809  BT_DBG("%s ACL data packet", hdev->name);
2810  hci_acldata_packet(hdev, skb);
2811  break;
2812 
2813  case HCI_SCODATA_PKT:
2814  BT_DBG("%s SCO data packet", hdev->name);
2815  hci_scodata_packet(hdev, skb);
2816  break;
2817 
2818  default:
2819  kfree_skb(skb);
2820  break;
2821  }
2822  }
2823 }
2824 
2825 static void hci_cmd_work(struct work_struct *work)
2826 {
2827  struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2828  struct sk_buff *skb;
2829 
2830  BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2831  atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2832 
2833  /* Send queued commands */
2834  if (atomic_read(&hdev->cmd_cnt)) {
2835  skb = skb_dequeue(&hdev->cmd_q);
2836  if (!skb)
2837  return;
2838 
2839  kfree_skb(hdev->sent_cmd);
2840 
2841  hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2842  if (hdev->sent_cmd) {
2843  atomic_dec(&hdev->cmd_cnt);
2844  hci_send_frame(skb);
2845  if (test_bit(HCI_RESET, &hdev->flags))
2846  del_timer(&hdev->cmd_timer);
2847  else
2848  mod_timer(&hdev->cmd_timer,
2849  jiffies + HCI_CMD_TIMEOUT);
2850  } else {
2851  skb_queue_head(&hdev->cmd_q, skb);
2852  queue_work(hdev->workqueue, &hdev->cmd_work);
2853  }
2854  }
2855 }
2856 
2857 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2858 {
2859  /* General inquiry access code (GIAC) */
2860  u8 lap[3] = { 0x33, 0x8b, 0x9e };
2861  struct hci_cp_inquiry cp;
2862 
2863  BT_DBG("%s", hdev->name);
2864 
2865  if (test_bit(HCI_INQUIRY, &hdev->flags))
2866  return -EINPROGRESS;
2867 
2868  inquiry_cache_flush(hdev);
2869 
2870  memset(&cp, 0, sizeof(cp));
2871  memcpy(&cp.lap, lap, sizeof(cp.lap));
2872  cp.length = length;
2873 
2874  return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2875 }
2876 
2877 int hci_cancel_inquiry(struct hci_dev *hdev)
2878 {
2879  BT_DBG("%s", hdev->name);
2880 
2881  if (!test_bit(HCI_INQUIRY, &hdev->flags))
2882  return -EALREADY;
2883 
2884  return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2885 }
2886 
2887 u8 bdaddr_to_le(u8 bdaddr_type)
2888 {
2889  switch (bdaddr_type) {
2890  case BDADDR_LE_PUBLIC:
2891  return ADDR_LE_DEV_PUBLIC;
2892 
2893  default:
2894  /* Fallback to LE Random address type */
2895  return ADDR_LE_DEV_RANDOM;
2896  }
2897 }