blob: faf7c711234c5790fd916e2fd676dbd97bcdc6c5 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
Johan Hedberge62144872015-04-02 13:41:08 +030049static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020051{
52 struct hci_dev *hdev = req->hdev;
53 struct sk_buff *skb;
54 unsigned long flags;
55
56 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
60 */
61 if (req->err) {
62 skb_queue_purge(&req->cmd_q);
63 return req->err;
64 }
65
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req->cmd_q))
68 return -ENODATA;
69
70 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020071 if (complete) {
72 bt_cb(skb)->hci.req_complete = complete;
73 } else if (complete_skb) {
74 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020077
78 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81
82 queue_work(hdev->workqueue, &hdev->cmd_work);
83
84 return 0;
85}
86
Johan Hedberge62144872015-04-02 13:41:08 +030087int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88{
89 return req_run(req, complete, NULL);
90}
91
92int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93{
94 return req_run(req, NULL, complete);
95}
96
Johan Hedbergbe91cd02015-11-10 09:44:54 +020097static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98 struct sk_buff *skb)
99{
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 if (skb)
106 hdev->req_skb = skb_get(skb);
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
Johan Hedbergb5044302015-11-10 09:44:55 +0200111void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 const void *param, u8 event, u32 timeout)
124{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200125 struct hci_request req;
126 struct sk_buff *skb;
127 int err = 0;
128
129 BT_DBG("%s", hdev->name);
130
131 hci_req_init(&req, hdev);
132
133 hci_req_add_ev(&req, opcode, plen, param, event);
134
135 hdev->req_status = HCI_REQ_PEND;
136
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200137 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100138 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200139 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200140
John Keeping67d8cee2018-04-19 16:29:37 +0100141 err = wait_event_interruptible_timeout(hdev->req_wait_q,
142 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200143
John Keeping67d8cee2018-04-19 16:29:37 +0100144 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200192 int err = 0;
193
194 BT_DBG("%s start", hdev->name);
195
196 hci_req_init(&req, hdev);
197
198 hdev->req_status = HCI_REQ_PEND;
199
Johan Hedberga1d01db2015-11-11 08:11:25 +0200200 err = func(&req, opt);
201 if (err) {
202 if (hci_status)
203 *hci_status = HCI_ERROR_UNSPECIFIED;
204 return err;
205 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200206
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207 err = hci_req_run_skb(&req, hci_req_sync_complete);
208 if (err < 0) {
209 hdev->req_status = 0;
210
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211 /* ENODATA means the HCI request command queue is empty.
212 * This can happen when a request with conditionals doesn't
213 * trigger any commands to be sent. This is normal behavior
214 * and should not trigger an error return.
215 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200216 if (err == -ENODATA) {
217 if (hci_status)
218 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200219 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200220 }
221
222 if (hci_status)
223 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224
225 return err;
226 }
227
John Keeping67d8cee2018-04-19 16:29:37 +0100228 err = wait_event_interruptible_timeout(hdev->req_wait_q,
229 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200230
John Keeping67d8cee2018-04-19 16:29:37 +0100231 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200232 return -EINTR;
233
234 switch (hdev->req_status) {
235 case HCI_REQ_DONE:
236 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200237 if (hci_status)
238 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200239 break;
240
241 case HCI_REQ_CANCELED:
242 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200243 if (hci_status)
244 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200245 break;
246
247 default:
248 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200249 if (hci_status)
250 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200251 break;
252 }
253
Frederic Dalleau9afee942016-08-23 07:59:19 +0200254 kfree_skb(hdev->req_skb);
255 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 hdev->req_status = hdev->req_result = 0;
257
258 BT_DBG("%s end: err %d", hdev->name, err);
259
260 return err;
261}
262
Johan Hedberga1d01db2015-11-11 08:11:25 +0200263int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200265 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200273 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200275 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276
277 return ret;
278}
279
Johan Hedberg0857dd32014-12-19 13:40:20 +0200280struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 const void *param)
282{
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
285 struct sk_buff *skb;
286
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
288 if (!skb)
289 return NULL;
290
Johannes Berg4df864c2017-06-16 14:29:21 +0200291 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200292 hdr->opcode = cpu_to_le16(opcode);
293 hdr->plen = plen;
294
295 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200296 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297
298 BT_DBG("skb len %d", skb->len);
299
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 return skb;
304}
305
306/* Queue a command to an asynchronous HCI request */
307void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
309{
310 struct hci_dev *hdev = req->hdev;
311 struct sk_buff *skb;
312
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
317 */
318 if (req->err)
319 return;
320
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100323 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
324 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200325 req->err = -ENOMEM;
326 return;
327 }
328
329 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100332 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200333
334 skb_queue_tail(&req->cmd_q, skb);
335}
336
337void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 const void *param)
339{
340 hci_req_add_ev(req, opcode, plen, param, 0);
341}
342
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200343void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
344{
345 struct hci_dev *hdev = req->hdev;
346 struct hci_cp_write_page_scan_activity acp;
347 u8 type;
348
349 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
350 return;
351
352 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
353 return;
354
355 if (enable) {
356 type = PAGE_SCAN_TYPE_INTERLACED;
357
358 /* 160 msec page scan interval */
359 acp.interval = cpu_to_le16(0x0100);
360 } else {
361 type = PAGE_SCAN_TYPE_STANDARD; /* default */
362
363 /* default 1.28 sec page scan */
364 acp.interval = cpu_to_le16(0x0800);
365 }
366
367 acp.window = cpu_to_le16(0x0012);
368
369 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370 __cpu_to_le16(hdev->page_scan_window) != acp.window)
371 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
372 sizeof(acp), &acp);
373
374 if (hdev->page_scan_type != type)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
376}
377
Johan Hedberg196a5e92015-11-22 18:55:44 +0200378/* This function controls the background scanning based on hdev->pend_le_conns
379 * list. If there are pending LE connection we start the background scanning,
380 * otherwise we stop it.
381 *
382 * This function requires the caller holds hdev->lock.
383 */
384static void __hci_update_background_scan(struct hci_request *req)
385{
386 struct hci_dev *hdev = req->hdev;
387
388 if (!test_bit(HCI_UP, &hdev->flags) ||
389 test_bit(HCI_INIT, &hdev->flags) ||
390 hci_dev_test_flag(hdev, HCI_SETUP) ||
391 hci_dev_test_flag(hdev, HCI_CONFIG) ||
392 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393 hci_dev_test_flag(hdev, HCI_UNREGISTER))
394 return;
395
396 /* No point in doing scanning if LE support hasn't been enabled */
397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
398 return;
399
400 /* If discovery is active don't interfere with it */
401 if (hdev->discovery.state != DISCOVERY_STOPPED)
402 return;
403
404 /* Reset RSSI and UUID filters when starting background scanning
405 * since these filters are meant for service discovery only.
406 *
407 * The Start Discovery and Start Service Discovery operations
408 * ensure to set proper values for RSSI threshold and UUID
409 * filter list. So it is safe to just reset them here.
410 */
411 hci_discovery_filter_clear(hdev);
412
413 if (list_empty(&hdev->pend_le_conns) &&
414 list_empty(&hdev->pend_le_reports)) {
415 /* If there is no pending LE connections or devices
416 * to be scanned for, we should stop the background
417 * scanning.
418 */
419
420 /* If controller is not scanning we are done. */
421 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
422 return;
423
424 hci_req_add_le_scan_disable(req);
425
426 BT_DBG("%s stopping background scanning", hdev->name);
427 } else {
428 /* If there is at least one pending LE connection, we should
429 * keep the background scan running.
430 */
431
432 /* If controller is connecting, we should not start scanning
433 * since some controllers are not able to scan and connect at
434 * the same time.
435 */
436 if (hci_lookup_le_connect(hdev))
437 return;
438
439 /* If controller is currently scanning, we stop it to ensure we
440 * don't miss any advertising (due to duplicates filter).
441 */
442 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443 hci_req_add_le_scan_disable(req);
444
445 hci_req_add_le_passive_scan(req);
446
447 BT_DBG("%s starting background scanning", hdev->name);
448 }
449}
450
Johan Hedberg00cf5042015-11-25 16:15:41 +0200451void __hci_req_update_name(struct hci_request *req)
452{
453 struct hci_dev *hdev = req->hdev;
454 struct hci_cp_write_local_name cp;
455
456 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
457
458 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
459}
460
Johan Hedbergb1a89172015-11-25 16:15:42 +0200461#define PNP_INFO_SVCLASS_ID 0x1200
462
463static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464{
465 u8 *ptr = data, *uuids_start = NULL;
466 struct bt_uuid *uuid;
467
468 if (len < 4)
469 return ptr;
470
471 list_for_each_entry(uuid, &hdev->uuids, list) {
472 u16 uuid16;
473
474 if (uuid->size != 16)
475 continue;
476
477 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478 if (uuid16 < 0x1100)
479 continue;
480
481 if (uuid16 == PNP_INFO_SVCLASS_ID)
482 continue;
483
484 if (!uuids_start) {
485 uuids_start = ptr;
486 uuids_start[0] = 1;
487 uuids_start[1] = EIR_UUID16_ALL;
488 ptr += 2;
489 }
490
491 /* Stop if not enough space to put next UUID */
492 if ((ptr - data) + sizeof(u16) > len) {
493 uuids_start[1] = EIR_UUID16_SOME;
494 break;
495 }
496
497 *ptr++ = (uuid16 & 0x00ff);
498 *ptr++ = (uuid16 & 0xff00) >> 8;
499 uuids_start[0] += sizeof(uuid16);
500 }
501
502 return ptr;
503}
504
505static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506{
507 u8 *ptr = data, *uuids_start = NULL;
508 struct bt_uuid *uuid;
509
510 if (len < 6)
511 return ptr;
512
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 if (uuid->size != 32)
515 continue;
516
517 if (!uuids_start) {
518 uuids_start = ptr;
519 uuids_start[0] = 1;
520 uuids_start[1] = EIR_UUID32_ALL;
521 ptr += 2;
522 }
523
524 /* Stop if not enough space to put next UUID */
525 if ((ptr - data) + sizeof(u32) > len) {
526 uuids_start[1] = EIR_UUID32_SOME;
527 break;
528 }
529
530 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531 ptr += sizeof(u32);
532 uuids_start[0] += sizeof(u32);
533 }
534
535 return ptr;
536}
537
538static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539{
540 u8 *ptr = data, *uuids_start = NULL;
541 struct bt_uuid *uuid;
542
543 if (len < 18)
544 return ptr;
545
546 list_for_each_entry(uuid, &hdev->uuids, list) {
547 if (uuid->size != 128)
548 continue;
549
550 if (!uuids_start) {
551 uuids_start = ptr;
552 uuids_start[0] = 1;
553 uuids_start[1] = EIR_UUID128_ALL;
554 ptr += 2;
555 }
556
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + 16 > len) {
559 uuids_start[1] = EIR_UUID128_SOME;
560 break;
561 }
562
563 memcpy(ptr, uuid->uuid, 16);
564 ptr += 16;
565 uuids_start[0] += 16;
566 }
567
568 return ptr;
569}
570
571static void create_eir(struct hci_dev *hdev, u8 *data)
572{
573 u8 *ptr = data;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577
578 if (name_len > 0) {
579 /* EIR Data type */
580 if (name_len > 48) {
581 name_len = 48;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 /* EIR Data length */
587 ptr[0] = name_len + 1;
588
589 memcpy(ptr + 2, hdev->dev_name, name_len);
590
591 ptr += (name_len + 2);
592 }
593
594 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
595 ptr[0] = 2;
596 ptr[1] = EIR_TX_POWER;
597 ptr[2] = (u8) hdev->inq_tx_power;
598
599 ptr += 3;
600 }
601
602 if (hdev->devid_source > 0) {
603 ptr[0] = 9;
604 ptr[1] = EIR_DEVICE_ID;
605
606 put_unaligned_le16(hdev->devid_source, ptr + 2);
607 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608 put_unaligned_le16(hdev->devid_product, ptr + 6);
609 put_unaligned_le16(hdev->devid_version, ptr + 8);
610
611 ptr += 10;
612 }
613
614 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
617}
618
619void __hci_req_update_eir(struct hci_request *req)
620{
621 struct hci_dev *hdev = req->hdev;
622 struct hci_cp_write_eir cp;
623
624 if (!hdev_is_powered(hdev))
625 return;
626
627 if (!lmp_ext_inq_capable(hdev))
628 return;
629
630 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 return;
632
633 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
634 return;
635
636 memset(&cp, 0, sizeof(cp));
637
638 create_eir(hdev, cp.data);
639
640 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
641 return;
642
643 memcpy(hdev->eir, cp.data, sizeof(cp.data));
644
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646}
647
Johan Hedberg0857dd32014-12-19 13:40:20 +0200648void hci_req_add_le_scan_disable(struct hci_request *req)
649{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530650 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200651
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530652 if (use_ext_scan(hdev)) {
653 struct hci_cp_le_set_ext_scan_enable cp;
654
655 memset(&cp, 0, sizeof(cp));
656 cp.enable = LE_SCAN_DISABLE;
657 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
658 &cp);
659 } else {
660 struct hci_cp_le_set_scan_enable cp;
661
662 memset(&cp, 0, sizeof(cp));
663 cp.enable = LE_SCAN_DISABLE;
664 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
665 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200666}
667
668static void add_to_white_list(struct hci_request *req,
669 struct hci_conn_params *params)
670{
671 struct hci_cp_le_add_to_white_list cp;
672
673 cp.bdaddr_type = params->addr_type;
674 bacpy(&cp.bdaddr, &params->addr);
675
676 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677}
678
679static u8 update_white_list(struct hci_request *req)
680{
681 struct hci_dev *hdev = req->hdev;
682 struct hci_conn_params *params;
683 struct bdaddr_list *b;
684 uint8_t white_list_entries = 0;
685
686 /* Go through the current white list programmed into the
687 * controller one by one and check if that address is still
688 * in the list of pending connections or list of devices to
689 * report. If not present in either list, then queue the
690 * command to remove it from the controller.
691 */
692 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500693 /* If the device is neither in pend_le_conns nor
694 * pend_le_reports then remove it from the whitelist.
695 */
696 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697 &b->bdaddr, b->bdaddr_type) &&
698 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699 &b->bdaddr, b->bdaddr_type)) {
700 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200701
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500702 cp.bdaddr_type = b->bdaddr_type;
703 bacpy(&cp.bdaddr, &b->bdaddr);
704
705 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200707 continue;
708 }
709
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500710 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711 /* White list can not be used with RPAs */
712 return 0x00;
713 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200714
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500715 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200716 }
717
718 /* Since all no longer valid white list entries have been
719 * removed, walk through the list of pending connections
720 * and ensure that any new device gets programmed into
721 * the controller.
722 *
723 * If the list of the devices is larger than the list of
724 * available white list entries in the controller, then
725 * just abort and return filer policy value to not use the
726 * white list.
727 */
728 list_for_each_entry(params, &hdev->pend_le_conns, action) {
729 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730 &params->addr, params->addr_type))
731 continue;
732
733 if (white_list_entries >= hdev->le_white_list_size) {
734 /* Select filter policy to accept all advertising */
735 return 0x00;
736 }
737
738 if (hci_find_irk_by_addr(hdev, &params->addr,
739 params->addr_type)) {
740 /* White list can not be used with RPAs */
741 return 0x00;
742 }
743
744 white_list_entries++;
745 add_to_white_list(req, params);
746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
750 * white list if there is still space.
751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754 &params->addr, params->addr_type))
755 continue;
756
757 if (white_list_entries >= hdev->le_white_list_size) {
758 /* Select filter policy to accept all advertising */
759 return 0x00;
760 }
761
762 if (hci_find_irk_by_addr(hdev, &params->addr,
763 params->addr_type)) {
764 /* White list can not be used with RPAs */
765 return 0x00;
766 }
767
768 white_list_entries++;
769 add_to_white_list(req, params);
770 }
771
772 /* Select filter policy to use white list */
773 return 0x01;
774}
775
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200776static bool scan_use_rpa(struct hci_dev *hdev)
777{
778 return hci_dev_test_flag(hdev, HCI_PRIVACY);
779}
780
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530781static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
782 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200783{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530784 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530785
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530786 /* Use ext scanning if set ext scan param and ext scan enable is
787 * supported
788 */
789 if (use_ext_scan(hdev)) {
790 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
791 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
792 struct hci_cp_le_scan_phy_params *phy_params;
793 /* Ony single PHY (1M) is supported as of now */
794 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 1];
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530795
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530796 ext_param_cp = (void *)data;
797 phy_params = (void *)ext_param_cp->data;
798
799 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
800 ext_param_cp->own_addr_type = own_addr_type;
801 ext_param_cp->filter_policy = filter_policy;
802 ext_param_cp->scanning_phys = LE_SCAN_PHY_1M;
803
804 memset(phy_params, 0, sizeof(*phy_params));
805 phy_params->type = type;
806 phy_params->interval = cpu_to_le16(interval);
807 phy_params->window = cpu_to_le16(window);
808
809 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
810 sizeof(*ext_param_cp) + sizeof(*phy_params),
811 ext_param_cp);
812
813 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
814 ext_enable_cp.enable = LE_SCAN_ENABLE;
815 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
816
817 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
818 sizeof(ext_enable_cp), &ext_enable_cp);
819 } else {
820 struct hci_cp_le_set_scan_param param_cp;
821 struct hci_cp_le_set_scan_enable enable_cp;
822
823 memset(&param_cp, 0, sizeof(param_cp));
824 param_cp.type = type;
825 param_cp.interval = cpu_to_le16(interval);
826 param_cp.window = cpu_to_le16(window);
827 param_cp.own_address_type = own_addr_type;
828 param_cp.filter_policy = filter_policy;
829 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
830 &param_cp);
831
832 memset(&enable_cp, 0, sizeof(enable_cp));
833 enable_cp.enable = LE_SCAN_ENABLE;
834 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
836 &enable_cp);
837 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530838}
839
840void hci_req_add_le_passive_scan(struct hci_request *req)
841{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200842 struct hci_dev *hdev = req->hdev;
843 u8 own_addr_type;
844 u8 filter_policy;
845
846 /* Set require_privacy to false since no SCAN_REQ are send
847 * during passive scanning. Not using an non-resolvable address
848 * here is important so that peer devices using direct
849 * advertising with our address will be correctly reported
850 * by the controller.
851 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200852 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
853 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200854 return;
855
856 /* Adding or removing entries from the white list must
857 * happen before enabling scanning. The controller does
858 * not allow white list modification while scanning.
859 */
860 filter_policy = update_white_list(req);
861
862 /* When the controller is using random resolvable addresses and
863 * with that having LE privacy enabled, then controllers with
864 * Extended Scanner Filter Policies support can now enable support
865 * for handling directed advertising.
866 *
867 * So instead of using filter polices 0x00 (no whitelist)
868 * and 0x01 (whitelist enabled) use the new filter policies
869 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
870 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700871 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200872 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
873 filter_policy |= 0x02;
874
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530875 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
876 hdev->le_scan_window, own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200877}
878
Johan Hedbergf2252572015-11-18 12:49:20 +0200879static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
880{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200881 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200882 struct adv_info *adv_instance;
883
884 /* Ignore instance 0 */
885 if (instance == 0x00)
886 return 0;
887
888 adv_instance = hci_find_adv_instance(hdev, instance);
889 if (!adv_instance)
890 return 0;
891
892 /* TODO: Take into account the "appearance" and "local-name" flags here.
893 * These are currently being ignored as they are not supported.
894 */
895 return adv_instance->scan_rsp_len;
896}
897
898void __hci_req_disable_advertising(struct hci_request *req)
899{
900 u8 enable = 0x00;
901
902 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
903}
904
905static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
906{
907 u32 flags;
908 struct adv_info *adv_instance;
909
910 if (instance == 0x00) {
911 /* Instance 0 always manages the "Tx Power" and "Flags"
912 * fields
913 */
914 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
915
916 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
917 * corresponds to the "connectable" instance flag.
918 */
919 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
920 flags |= MGMT_ADV_FLAG_CONNECTABLE;
921
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200922 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
924 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200925 flags |= MGMT_ADV_FLAG_DISCOV;
926
Johan Hedbergf2252572015-11-18 12:49:20 +0200927 return flags;
928 }
929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931
932 /* Return 0 when we got an invalid instance identifier. */
933 if (!adv_instance)
934 return 0;
935
936 return adv_instance->flags;
937}
938
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200939static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
940{
941 /* If privacy is not enabled don't use RPA */
942 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
943 return false;
944
945 /* If basic privacy mode is enabled use RPA */
946 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
947 return true;
948
949 /* If limited privacy mode is enabled don't use RPA if we're
950 * both discoverable and bondable.
951 */
952 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
953 hci_dev_test_flag(hdev, HCI_BONDABLE))
954 return false;
955
956 /* We're neither bondable nor discoverable in the limited
957 * privacy mode, therefore use RPA.
958 */
959 return true;
960}
961
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100962static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
963{
964 /* If there is no connection we are OK to advertise. */
965 if (hci_conn_num(hdev, LE_LINK) == 0)
966 return true;
967
968 /* Check le_states if there is any connection in slave role. */
969 if (hdev->conn_hash.le_num_slave > 0) {
970 /* Slave connection state and non connectable mode bit 20. */
971 if (!connectable && !(hdev->le_states[2] & 0x10))
972 return false;
973
974 /* Slave connection state and connectable mode bit 38
975 * and scannable bit 21.
976 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100977 if (connectable && (!(hdev->le_states[4] & 0x40) ||
978 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100979 return false;
980 }
981
982 /* Check le_states if there is any connection in master role. */
983 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
984 /* Master connection state and non connectable mode bit 18. */
985 if (!connectable && !(hdev->le_states[2] & 0x02))
986 return false;
987
988 /* Master connection state and connectable mode bit 35 and
989 * scannable 19.
990 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100991 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100992 !(hdev->le_states[2] & 0x08)))
993 return false;
994 }
995
996 return true;
997}
998
Johan Hedbergf2252572015-11-18 12:49:20 +0200999void __hci_req_enable_advertising(struct hci_request *req)
1000{
1001 struct hci_dev *hdev = req->hdev;
1002 struct hci_cp_le_set_adv_param cp;
1003 u8 own_addr_type, enable = 0x01;
1004 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +02001005 u32 flags;
1006
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001007 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1008
1009 /* If the "connectable" instance flag was not set, then choose between
1010 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1011 */
1012 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1013 mgmt_get_connectable(hdev);
1014
1015 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001016 return;
1017
1018 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1019 __hci_req_disable_advertising(req);
1020
1021 /* Clear the HCI_LE_ADV bit temporarily so that the
1022 * hci_update_random_address knows that it's safe to go ahead
1023 * and write a new random address. The flag will be set back on
1024 * as soon as the SET_ADV_ENABLE HCI command completes.
1025 */
1026 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1027
Johan Hedbergf2252572015-11-18 12:49:20 +02001028 /* Set require_privacy to true only when non-connectable
1029 * advertising is used. In that case it is fine to use a
1030 * non-resolvable private address.
1031 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001032 if (hci_update_random_address(req, !connectable,
1033 adv_use_rpa(hdev, flags),
1034 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001035 return;
1036
1037 memset(&cp, 0, sizeof(cp));
1038 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1039 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1040
1041 if (connectable)
1042 cp.type = LE_ADV_IND;
1043 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1044 cp.type = LE_ADV_SCAN_IND;
1045 else
1046 cp.type = LE_ADV_NONCONN_IND;
1047
1048 cp.own_address_type = own_addr_type;
1049 cp.channel_map = hdev->le_adv_channel_map;
1050
1051 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1052
1053 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1054}
1055
Michał Narajowskif61851f2016-10-19 10:20:27 +02001056u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001057{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001058 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001059 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001060
Michał Narajowskif61851f2016-10-19 10:20:27 +02001061 /* no space left for name (+ NULL + type + len) */
1062 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1063 return ad_len;
1064
1065 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001066 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001067 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001068 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001069 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001070
Michał Narajowskif61851f2016-10-19 10:20:27 +02001071 /* use short name if present */
1072 short_len = strlen(hdev->short_name);
1073 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001074 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001075 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001076
Michał Narajowskif61851f2016-10-19 10:20:27 +02001077 /* use shortened full name if present, we already know that name
1078 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1079 */
1080 if (complete_len) {
1081 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1082
1083 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1084 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1085
1086 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1087 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001088 }
1089
1090 return ad_len;
1091}
1092
Michał Narajowski1b422062016-10-05 12:28:27 +02001093static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1094{
1095 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1096}
1097
Michał Narajowski7c295c42016-09-18 12:50:02 +02001098static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1099{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001100 u8 scan_rsp_len = 0;
1101
1102 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001103 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001104 }
1105
Michał Narajowski1b422062016-10-05 12:28:27 +02001106 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001107}
1108
Johan Hedbergf2252572015-11-18 12:49:20 +02001109static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1110 u8 *ptr)
1111{
1112 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001113 u32 instance_flags;
1114 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001115
1116 adv_instance = hci_find_adv_instance(hdev, instance);
1117 if (!adv_instance)
1118 return 0;
1119
Michał Narajowski7c295c42016-09-18 12:50:02 +02001120 instance_flags = adv_instance->flags;
1121
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001122 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001123 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001124 }
1125
Michał Narajowski1b422062016-10-05 12:28:27 +02001126 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001127 adv_instance->scan_rsp_len);
1128
Michał Narajowski7c295c42016-09-18 12:50:02 +02001129 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001130
1131 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1132 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1133
1134 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001135}
1136
Johan Hedbergcab054a2015-11-30 11:21:45 +02001137void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001138{
1139 struct hci_dev *hdev = req->hdev;
1140 struct hci_cp_le_set_scan_rsp_data cp;
1141 u8 len;
1142
1143 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1144 return;
1145
1146 memset(&cp, 0, sizeof(cp));
1147
1148 if (instance)
1149 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1150 else
1151 len = create_default_scan_rsp_data(hdev, cp.data);
1152
1153 if (hdev->scan_rsp_data_len == len &&
1154 !memcmp(cp.data, hdev->scan_rsp_data, len))
1155 return;
1156
1157 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1158 hdev->scan_rsp_data_len = len;
1159
1160 cp.length = len;
1161
1162 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1163}
1164
Johan Hedbergf2252572015-11-18 12:49:20 +02001165static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1166{
1167 struct adv_info *adv_instance = NULL;
1168 u8 ad_len = 0, flags = 0;
1169 u32 instance_flags;
1170
1171 /* Return 0 when the current instance identifier is invalid. */
1172 if (instance) {
1173 adv_instance = hci_find_adv_instance(hdev, instance);
1174 if (!adv_instance)
1175 return 0;
1176 }
1177
1178 instance_flags = get_adv_instance_flags(hdev, instance);
1179
1180 /* The Add Advertising command allows userspace to set both the general
1181 * and limited discoverable flags.
1182 */
1183 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1184 flags |= LE_AD_GENERAL;
1185
1186 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1187 flags |= LE_AD_LIMITED;
1188
Johan Hedbergf18ba582016-04-06 13:09:05 +03001189 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1190 flags |= LE_AD_NO_BREDR;
1191
Johan Hedbergf2252572015-11-18 12:49:20 +02001192 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1193 /* If a discovery flag wasn't provided, simply use the global
1194 * settings.
1195 */
1196 if (!flags)
1197 flags |= mgmt_get_adv_discov_flags(hdev);
1198
Johan Hedbergf2252572015-11-18 12:49:20 +02001199 /* If flags would still be empty, then there is no need to
1200 * include the "Flags" AD field".
1201 */
1202 if (flags) {
1203 ptr[0] = 0x02;
1204 ptr[1] = EIR_FLAGS;
1205 ptr[2] = flags;
1206
1207 ad_len += 3;
1208 ptr += 3;
1209 }
1210 }
1211
1212 if (adv_instance) {
1213 memcpy(ptr, adv_instance->adv_data,
1214 adv_instance->adv_data_len);
1215 ad_len += adv_instance->adv_data_len;
1216 ptr += adv_instance->adv_data_len;
1217 }
1218
1219 /* Provide Tx Power only if we can provide a valid value for it */
1220 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1221 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1222 ptr[0] = 0x02;
1223 ptr[1] = EIR_TX_POWER;
1224 ptr[2] = (u8)hdev->adv_tx_power;
1225
1226 ad_len += 3;
1227 ptr += 3;
1228 }
1229
1230 return ad_len;
1231}
1232
Johan Hedbergcab054a2015-11-30 11:21:45 +02001233void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001234{
1235 struct hci_dev *hdev = req->hdev;
1236 struct hci_cp_le_set_adv_data cp;
1237 u8 len;
1238
1239 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1240 return;
1241
1242 memset(&cp, 0, sizeof(cp));
1243
1244 len = create_instance_adv_data(hdev, instance, cp.data);
1245
1246 /* There's nothing to do if the data hasn't changed */
1247 if (hdev->adv_data_len == len &&
1248 memcmp(cp.data, hdev->adv_data, len) == 0)
1249 return;
1250
1251 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1252 hdev->adv_data_len = len;
1253
1254 cp.length = len;
1255
1256 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1257}
1258
Johan Hedbergcab054a2015-11-30 11:21:45 +02001259int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001260{
1261 struct hci_request req;
1262
1263 hci_req_init(&req, hdev);
1264 __hci_req_update_adv_data(&req, instance);
1265
1266 return hci_req_run(&req, NULL);
1267}
1268
1269static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1270{
1271 BT_DBG("%s status %u", hdev->name, status);
1272}
1273
1274void hci_req_reenable_advertising(struct hci_dev *hdev)
1275{
1276 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001277
1278 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001279 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001280 return;
1281
Johan Hedbergf2252572015-11-18 12:49:20 +02001282 hci_req_init(&req, hdev);
1283
Johan Hedbergcab054a2015-11-30 11:21:45 +02001284 if (hdev->cur_adv_instance) {
1285 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1286 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001287 } else {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001288 __hci_req_update_adv_data(&req, 0x00);
1289 __hci_req_update_scan_rsp_data(&req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001290 __hci_req_enable_advertising(&req);
1291 }
1292
1293 hci_req_run(&req, adv_enable_complete);
1294}
1295
1296static void adv_timeout_expire(struct work_struct *work)
1297{
1298 struct hci_dev *hdev = container_of(work, struct hci_dev,
1299 adv_instance_expire.work);
1300
1301 struct hci_request req;
1302 u8 instance;
1303
1304 BT_DBG("%s", hdev->name);
1305
1306 hci_dev_lock(hdev);
1307
1308 hdev->adv_instance_timeout = 0;
1309
Johan Hedbergcab054a2015-11-30 11:21:45 +02001310 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001311 if (instance == 0x00)
1312 goto unlock;
1313
1314 hci_req_init(&req, hdev);
1315
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001316 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001317
1318 if (list_empty(&hdev->adv_instances))
1319 __hci_req_disable_advertising(&req);
1320
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001321 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001322
1323unlock:
1324 hci_dev_unlock(hdev);
1325}
1326
1327int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1328 bool force)
1329{
1330 struct hci_dev *hdev = req->hdev;
1331 struct adv_info *adv_instance = NULL;
1332 u16 timeout;
1333
1334 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001335 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001336 return -EPERM;
1337
1338 if (hdev->adv_instance_timeout)
1339 return -EBUSY;
1340
1341 adv_instance = hci_find_adv_instance(hdev, instance);
1342 if (!adv_instance)
1343 return -ENOENT;
1344
1345 /* A zero timeout means unlimited advertising. As long as there is
1346 * only one instance, duration should be ignored. We still set a timeout
1347 * in case further instances are being added later on.
1348 *
1349 * If the remaining lifetime of the instance is more than the duration
1350 * then the timeout corresponds to the duration, otherwise it will be
1351 * reduced to the remaining instance lifetime.
1352 */
1353 if (adv_instance->timeout == 0 ||
1354 adv_instance->duration <= adv_instance->remaining_time)
1355 timeout = adv_instance->duration;
1356 else
1357 timeout = adv_instance->remaining_time;
1358
1359 /* The remaining time is being reduced unless the instance is being
1360 * advertised without time limit.
1361 */
1362 if (adv_instance->timeout)
1363 adv_instance->remaining_time =
1364 adv_instance->remaining_time - timeout;
1365
1366 hdev->adv_instance_timeout = timeout;
1367 queue_delayed_work(hdev->req_workqueue,
1368 &hdev->adv_instance_expire,
1369 msecs_to_jiffies(timeout * 1000));
1370
1371 /* If we're just re-scheduling the same instance again then do not
1372 * execute any HCI commands. This happens when a single instance is
1373 * being advertised.
1374 */
1375 if (!force && hdev->cur_adv_instance == instance &&
1376 hci_dev_test_flag(hdev, HCI_LE_ADV))
1377 return 0;
1378
1379 hdev->cur_adv_instance = instance;
Johan Hedbergcab054a2015-11-30 11:21:45 +02001380 __hci_req_update_adv_data(req, instance);
1381 __hci_req_update_scan_rsp_data(req, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001382 __hci_req_enable_advertising(req);
1383
1384 return 0;
1385}
1386
1387static void cancel_adv_timeout(struct hci_dev *hdev)
1388{
1389 if (hdev->adv_instance_timeout) {
1390 hdev->adv_instance_timeout = 0;
1391 cancel_delayed_work(&hdev->adv_instance_expire);
1392 }
1393}
1394
1395/* For a single instance:
1396 * - force == true: The instance will be removed even when its remaining
1397 * lifetime is not zero.
1398 * - force == false: the instance will be deactivated but kept stored unless
1399 * the remaining lifetime is zero.
1400 *
1401 * For instance == 0x00:
1402 * - force == true: All instances will be removed regardless of their timeout
1403 * setting.
1404 * - force == false: Only instances that have a timeout will be removed.
1405 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001406void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1407 struct hci_request *req, u8 instance,
1408 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001409{
1410 struct adv_info *adv_instance, *n, *next_instance = NULL;
1411 int err;
1412 u8 rem_inst;
1413
1414 /* Cancel any timeout concerning the removed instance(s). */
1415 if (!instance || hdev->cur_adv_instance == instance)
1416 cancel_adv_timeout(hdev);
1417
1418 /* Get the next instance to advertise BEFORE we remove
1419 * the current one. This can be the same instance again
1420 * if there is only one instance.
1421 */
1422 if (instance && hdev->cur_adv_instance == instance)
1423 next_instance = hci_get_next_instance(hdev, instance);
1424
1425 if (instance == 0x00) {
1426 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1427 list) {
1428 if (!(force || adv_instance->timeout))
1429 continue;
1430
1431 rem_inst = adv_instance->instance;
1432 err = hci_remove_adv_instance(hdev, rem_inst);
1433 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001434 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001435 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001436 } else {
1437 adv_instance = hci_find_adv_instance(hdev, instance);
1438
1439 if (force || (adv_instance && adv_instance->timeout &&
1440 !adv_instance->remaining_time)) {
1441 /* Don't advertise a removed instance. */
1442 if (next_instance &&
1443 next_instance->instance == instance)
1444 next_instance = NULL;
1445
1446 err = hci_remove_adv_instance(hdev, instance);
1447 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001448 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001449 }
1450 }
1451
Johan Hedbergf2252572015-11-18 12:49:20 +02001452 if (!req || !hdev_is_powered(hdev) ||
1453 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1454 return;
1455
1456 if (next_instance)
1457 __hci_req_schedule_adv_instance(req, next_instance->instance,
1458 false);
1459}
1460
Johan Hedberg0857dd32014-12-19 13:40:20 +02001461static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1462{
1463 struct hci_dev *hdev = req->hdev;
1464
1465 /* If we're advertising or initiating an LE connection we can't
1466 * go ahead and change the random address at this time. This is
1467 * because the eventual initiator address used for the
1468 * subsequently created connection will be undefined (some
1469 * controllers use the new address and others the one we had
1470 * when the operation started).
1471 *
1472 * In this kind of scenario skip the update and let the random
1473 * address be updated at the next cycle.
1474 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001475 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001476 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001477 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001478 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001479 return;
1480 }
1481
1482 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1483}
1484
1485int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001486 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001487{
1488 struct hci_dev *hdev = req->hdev;
1489 int err;
1490
1491 /* If privacy is enabled use a resolvable private address. If
1492 * current RPA has expired or there is something else than
1493 * the current RPA in use, then generate a new one.
1494 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001495 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001496 int to;
1497
1498 *own_addr_type = ADDR_LE_DEV_RANDOM;
1499
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001500 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001501 !bacmp(&hdev->random_addr, &hdev->rpa))
1502 return 0;
1503
1504 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1505 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001506 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001507 return err;
1508 }
1509
1510 set_random_addr(req, &hdev->rpa);
1511
1512 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1513 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1514
1515 return 0;
1516 }
1517
1518 /* In case of required privacy without resolvable private address,
1519 * use an non-resolvable private address. This is useful for active
1520 * scanning and non-connectable advertising.
1521 */
1522 if (require_privacy) {
1523 bdaddr_t nrpa;
1524
1525 while (true) {
1526 /* The non-resolvable private address is generated
1527 * from random six bytes with the two most significant
1528 * bits cleared.
1529 */
1530 get_random_bytes(&nrpa, 6);
1531 nrpa.b[5] &= 0x3f;
1532
1533 /* The non-resolvable private address shall not be
1534 * equal to the public address.
1535 */
1536 if (bacmp(&hdev->bdaddr, &nrpa))
1537 break;
1538 }
1539
1540 *own_addr_type = ADDR_LE_DEV_RANDOM;
1541 set_random_addr(req, &nrpa);
1542 return 0;
1543 }
1544
1545 /* If forcing static address is in use or there is no public
1546 * address use the static address as random address (but skip
1547 * the HCI command if the current random address is already the
1548 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001549 *
1550 * In case BR/EDR has been disabled on a dual-mode controller
1551 * and a static address has been configured, then use that
1552 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001553 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001554 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001555 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001556 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001557 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001558 *own_addr_type = ADDR_LE_DEV_RANDOM;
1559 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1560 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1561 &hdev->static_addr);
1562 return 0;
1563 }
1564
1565 /* Neither privacy nor static address is being used so use a
1566 * public address.
1567 */
1568 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1569
1570 return 0;
1571}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001572
Johan Hedberg405a2612014-12-19 23:18:22 +02001573static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1574{
1575 struct bdaddr_list *b;
1576
1577 list_for_each_entry(b, &hdev->whitelist, list) {
1578 struct hci_conn *conn;
1579
1580 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1581 if (!conn)
1582 return true;
1583
1584 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1585 return true;
1586 }
1587
1588 return false;
1589}
1590
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001591void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001592{
1593 struct hci_dev *hdev = req->hdev;
1594 u8 scan;
1595
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001596 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001597 return;
1598
1599 if (!hdev_is_powered(hdev))
1600 return;
1601
1602 if (mgmt_powering_down(hdev))
1603 return;
1604
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001605 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001606 disconnected_whitelist_entries(hdev))
1607 scan = SCAN_PAGE;
1608 else
1609 scan = SCAN_DISABLED;
1610
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001611 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001612 scan |= SCAN_INQUIRY;
1613
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001614 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1615 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1616 return;
1617
Johan Hedberg405a2612014-12-19 23:18:22 +02001618 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1619}
1620
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001621static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001622{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001623 hci_dev_lock(req->hdev);
1624 __hci_req_update_scan(req);
1625 hci_dev_unlock(req->hdev);
1626 return 0;
1627}
Johan Hedberg405a2612014-12-19 23:18:22 +02001628
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001629static void scan_update_work(struct work_struct *work)
1630{
1631 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1632
1633 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001634}
1635
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001636static int connectable_update(struct hci_request *req, unsigned long opt)
1637{
1638 struct hci_dev *hdev = req->hdev;
1639
1640 hci_dev_lock(hdev);
1641
1642 __hci_req_update_scan(req);
1643
1644 /* If BR/EDR is not enabled and we disable advertising as a
1645 * by-product of disabling connectable, we need to update the
1646 * advertising flags.
1647 */
1648 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001649 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001650
1651 /* Update the advertising parameters if necessary */
1652 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001653 !list_empty(&hdev->adv_instances))
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001654 __hci_req_enable_advertising(req);
1655
1656 __hci_update_background_scan(req);
1657
1658 hci_dev_unlock(hdev);
1659
1660 return 0;
1661}
1662
1663static void connectable_update_work(struct work_struct *work)
1664{
1665 struct hci_dev *hdev = container_of(work, struct hci_dev,
1666 connectable_update);
1667 u8 status;
1668
1669 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1670 mgmt_set_connectable_complete(hdev, status);
1671}
1672
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001673static u8 get_service_classes(struct hci_dev *hdev)
1674{
1675 struct bt_uuid *uuid;
1676 u8 val = 0;
1677
1678 list_for_each_entry(uuid, &hdev->uuids, list)
1679 val |= uuid->svc_hint;
1680
1681 return val;
1682}
1683
1684void __hci_req_update_class(struct hci_request *req)
1685{
1686 struct hci_dev *hdev = req->hdev;
1687 u8 cod[3];
1688
1689 BT_DBG("%s", hdev->name);
1690
1691 if (!hdev_is_powered(hdev))
1692 return;
1693
1694 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1695 return;
1696
1697 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1698 return;
1699
1700 cod[0] = hdev->minor_class;
1701 cod[1] = hdev->major_class;
1702 cod[2] = get_service_classes(hdev);
1703
1704 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1705 cod[1] |= 0x20;
1706
1707 if (memcmp(cod, hdev->dev_class, 3) == 0)
1708 return;
1709
1710 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1711}
1712
Johan Hedbergaed1a882015-11-22 17:24:44 +03001713static void write_iac(struct hci_request *req)
1714{
1715 struct hci_dev *hdev = req->hdev;
1716 struct hci_cp_write_current_iac_lap cp;
1717
1718 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1719 return;
1720
1721 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1722 /* Limited discoverable mode */
1723 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1724 cp.iac_lap[0] = 0x00; /* LIAC */
1725 cp.iac_lap[1] = 0x8b;
1726 cp.iac_lap[2] = 0x9e;
1727 cp.iac_lap[3] = 0x33; /* GIAC */
1728 cp.iac_lap[4] = 0x8b;
1729 cp.iac_lap[5] = 0x9e;
1730 } else {
1731 /* General discoverable mode */
1732 cp.num_iac = 1;
1733 cp.iac_lap[0] = 0x33; /* GIAC */
1734 cp.iac_lap[1] = 0x8b;
1735 cp.iac_lap[2] = 0x9e;
1736 }
1737
1738 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1739 (cp.num_iac * 3) + 1, &cp);
1740}
1741
1742static int discoverable_update(struct hci_request *req, unsigned long opt)
1743{
1744 struct hci_dev *hdev = req->hdev;
1745
1746 hci_dev_lock(hdev);
1747
1748 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1749 write_iac(req);
1750 __hci_req_update_scan(req);
1751 __hci_req_update_class(req);
1752 }
1753
1754 /* Advertising instances don't use the global discoverable setting, so
1755 * only update AD if advertising was enabled using Set Advertising.
1756 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001757 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001758 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001759
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001760 /* Discoverable mode affects the local advertising
1761 * address in limited privacy mode.
1762 */
1763 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1764 __hci_req_enable_advertising(req);
1765 }
1766
Johan Hedbergaed1a882015-11-22 17:24:44 +03001767 hci_dev_unlock(hdev);
1768
1769 return 0;
1770}
1771
1772static void discoverable_update_work(struct work_struct *work)
1773{
1774 struct hci_dev *hdev = container_of(work, struct hci_dev,
1775 discoverable_update);
1776 u8 status;
1777
1778 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1779 mgmt_set_discoverable_complete(hdev, status);
1780}
1781
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03001782void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1783 u8 reason)
1784{
1785 switch (conn->state) {
1786 case BT_CONNECTED:
1787 case BT_CONFIG:
1788 if (conn->type == AMP_LINK) {
1789 struct hci_cp_disconn_phy_link cp;
1790
1791 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1792 cp.reason = reason;
1793 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1794 &cp);
1795 } else {
1796 struct hci_cp_disconnect dc;
1797
1798 dc.handle = cpu_to_le16(conn->handle);
1799 dc.reason = reason;
1800 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1801 }
1802
1803 conn->state = BT_DISCONN;
1804
1805 break;
1806 case BT_CONNECT:
1807 if (conn->type == LE_LINK) {
1808 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1809 break;
1810 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1811 0, NULL);
1812 } else if (conn->type == ACL_LINK) {
1813 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1814 break;
1815 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1816 6, &conn->dst);
1817 }
1818 break;
1819 case BT_CONNECT2:
1820 if (conn->type == ACL_LINK) {
1821 struct hci_cp_reject_conn_req rej;
1822
1823 bacpy(&rej.bdaddr, &conn->dst);
1824 rej.reason = reason;
1825
1826 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1827 sizeof(rej), &rej);
1828 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1829 struct hci_cp_reject_sync_conn_req rej;
1830
1831 bacpy(&rej.bdaddr, &conn->dst);
1832
1833 /* SCO rejection has its own limited set of
1834 * allowed error values (0x0D-0x0F) which isn't
1835 * compatible with most values passed to this
1836 * function. To be safe hard-code one of the
1837 * values that's suitable for SCO.
1838 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02001839 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03001840
1841 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1842 sizeof(rej), &rej);
1843 }
1844 break;
1845 default:
1846 conn->state = BT_CLOSED;
1847 break;
1848 }
1849}
1850
1851static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1852{
1853 if (status)
1854 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1855}
1856
1857int hci_abort_conn(struct hci_conn *conn, u8 reason)
1858{
1859 struct hci_request req;
1860 int err;
1861
1862 hci_req_init(&req, conn->hdev);
1863
1864 __hci_abort_conn(&req, conn, reason);
1865
1866 err = hci_req_run(&req, abort_conn_complete);
1867 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001868 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03001869 return err;
1870 }
1871
1872 return 0;
1873}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001874
Johan Hedberga1d01db2015-11-11 08:11:25 +02001875static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02001876{
1877 hci_dev_lock(req->hdev);
1878 __hci_update_background_scan(req);
1879 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001880 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001881}
1882
1883static void bg_scan_update(struct work_struct *work)
1884{
1885 struct hci_dev *hdev = container_of(work, struct hci_dev,
1886 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02001887 struct hci_conn *conn;
1888 u8 status;
1889 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001890
Johan Hedberg84235d22015-11-11 08:11:20 +02001891 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1892 if (!err)
1893 return;
1894
1895 hci_dev_lock(hdev);
1896
1897 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1898 if (conn)
1899 hci_le_conn_failed(conn, status);
1900
1901 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001902}
1903
Johan Hedberga1d01db2015-11-11 08:11:25 +02001904static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001905{
1906 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001907 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001908}
1909
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001910static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1911{
1912 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02001913 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1914 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001915 struct hci_cp_inquiry cp;
1916
1917 BT_DBG("%s", req->hdev->name);
1918
1919 hci_dev_lock(req->hdev);
1920 hci_inquiry_cache_flush(req->hdev);
1921 hci_dev_unlock(req->hdev);
1922
1923 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02001924
1925 if (req->hdev->discovery.limited)
1926 memcpy(&cp.lap, liac, sizeof(cp.lap));
1927 else
1928 memcpy(&cp.lap, giac, sizeof(cp.lap));
1929
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001930 cp.length = length;
1931
1932 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1933
1934 return 0;
1935}
1936
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001937static void le_scan_disable_work(struct work_struct *work)
1938{
1939 struct hci_dev *hdev = container_of(work, struct hci_dev,
1940 le_scan_disable.work);
1941 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001942
1943 BT_DBG("%s", hdev->name);
1944
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001945 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001946 return;
1947
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001948 cancel_delayed_work(&hdev->le_scan_restart);
1949
1950 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1951 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001952 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1953 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001954 return;
1955 }
1956
1957 hdev->discovery.scan_start = 0;
1958
1959 /* If we were running LE only scan, change discovery state. If
1960 * we were running both LE and BR/EDR inquiry simultaneously,
1961 * and BR/EDR inquiry is already finished, stop discovery,
1962 * otherwise BR/EDR inquiry will stop discovery when finished.
1963 * If we will resolve remote device name, do not change
1964 * discovery state.
1965 */
1966
1967 if (hdev->discovery.type == DISCOV_TYPE_LE)
1968 goto discov_stopped;
1969
1970 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1971 return;
1972
1973 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1974 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1975 hdev->discovery.state != DISCOVERY_RESOLVING)
1976 goto discov_stopped;
1977
1978 return;
1979 }
1980
1981 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1982 HCI_CMD_TIMEOUT, &status);
1983 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001984 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001985 goto discov_stopped;
1986 }
1987
1988 return;
1989
1990discov_stopped:
1991 hci_dev_lock(hdev);
1992 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1993 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001994}
1995
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001996static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001997{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001998 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001999
2000 /* If controller is not scanning we are done. */
2001 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2002 return 0;
2003
2004 hci_req_add_le_scan_disable(req);
2005
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302006 if (use_ext_scan(hdev)) {
2007 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2008
2009 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2010 ext_enable_cp.enable = LE_SCAN_ENABLE;
2011 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2012
2013 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2014 sizeof(ext_enable_cp), &ext_enable_cp);
2015 } else {
2016 struct hci_cp_le_set_scan_enable cp;
2017
2018 memset(&cp, 0, sizeof(cp));
2019 cp.enable = LE_SCAN_ENABLE;
2020 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2021 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2022 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002023
2024 return 0;
2025}
2026
2027static void le_scan_restart_work(struct work_struct *work)
2028{
2029 struct hci_dev *hdev = container_of(work, struct hci_dev,
2030 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002031 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002032 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002033
2034 BT_DBG("%s", hdev->name);
2035
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002036 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002037 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002038 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2039 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002040 return;
2041 }
2042
2043 hci_dev_lock(hdev);
2044
2045 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2046 !hdev->discovery.scan_start)
2047 goto unlock;
2048
2049 /* When the scan was started, hdev->le_scan_disable has been queued
2050 * after duration from scan_start. During scan restart this job
2051 * has been canceled, and we need to queue it again after proper
2052 * timeout, to make sure that scan does not run indefinitely.
2053 */
2054 duration = hdev->discovery.scan_duration;
2055 scan_start = hdev->discovery.scan_start;
2056 now = jiffies;
2057 if (now - scan_start <= duration) {
2058 int elapsed;
2059
2060 if (now >= scan_start)
2061 elapsed = now - scan_start;
2062 else
2063 elapsed = ULONG_MAX - scan_start + now;
2064
2065 timeout = duration - elapsed;
2066 } else {
2067 timeout = 0;
2068 }
2069
2070 queue_delayed_work(hdev->req_workqueue,
2071 &hdev->le_scan_disable, timeout);
2072
2073unlock:
2074 hci_dev_unlock(hdev);
2075}
2076
Johan Hedberge68f0722015-11-11 08:30:30 +02002077static int active_scan(struct hci_request *req, unsigned long opt)
2078{
2079 uint16_t interval = opt;
2080 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002081 u8 own_addr_type;
2082 int err;
2083
2084 BT_DBG("%s", hdev->name);
2085
2086 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2087 hci_dev_lock(hdev);
2088
2089 /* Don't let discovery abort an outgoing connection attempt
2090 * that's using directed advertising.
2091 */
2092 if (hci_lookup_le_connect(hdev)) {
2093 hci_dev_unlock(hdev);
2094 return -EBUSY;
2095 }
2096
2097 cancel_adv_timeout(hdev);
2098 hci_dev_unlock(hdev);
2099
Jaganath Kanakkassery94386b62017-12-11 20:26:47 +05302100 __hci_req_disable_advertising(req);
Johan Hedberge68f0722015-11-11 08:30:30 +02002101 }
2102
2103 /* If controller is scanning, it means the background scanning is
2104 * running. Thus, we should temporarily stop it in order to set the
2105 * discovery scanning parameters.
2106 */
2107 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2108 hci_req_add_le_scan_disable(req);
2109
2110 /* All active scans will be done with either a resolvable private
2111 * address (when privacy feature has been enabled) or non-resolvable
2112 * private address.
2113 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002114 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2115 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002116 if (err < 0)
2117 own_addr_type = ADDR_LE_DEV_PUBLIC;
2118
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302119 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2120 own_addr_type, 0);
Johan Hedberge68f0722015-11-11 08:30:30 +02002121 return 0;
2122}
2123
2124static int interleaved_discov(struct hci_request *req, unsigned long opt)
2125{
2126 int err;
2127
2128 BT_DBG("%s", req->hdev->name);
2129
2130 err = active_scan(req, opt);
2131 if (err)
2132 return err;
2133
Johan Hedberg7df26b52015-11-11 12:24:21 +02002134 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002135}
2136
2137static void start_discovery(struct hci_dev *hdev, u8 *status)
2138{
2139 unsigned long timeout;
2140
2141 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2142
2143 switch (hdev->discovery.type) {
2144 case DISCOV_TYPE_BREDR:
2145 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002146 hci_req_sync(hdev, bredr_inquiry,
2147 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002148 status);
2149 return;
2150 case DISCOV_TYPE_INTERLEAVED:
2151 /* When running simultaneous discovery, the LE scanning time
2152 * should occupy the whole discovery time sine BR/EDR inquiry
2153 * and LE scanning are scheduled by the controller.
2154 *
2155 * For interleaving discovery in comparison, BR/EDR inquiry
2156 * and LE scanning are done sequentially with separate
2157 * timeouts.
2158 */
2159 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2160 &hdev->quirks)) {
2161 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2162 /* During simultaneous discovery, we double LE scan
2163 * interval. We must leave some time for the controller
2164 * to do BR/EDR inquiry.
2165 */
2166 hci_req_sync(hdev, interleaved_discov,
2167 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2168 status);
2169 break;
2170 }
2171
2172 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2173 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2174 HCI_CMD_TIMEOUT, status);
2175 break;
2176 case DISCOV_TYPE_LE:
2177 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2178 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2179 HCI_CMD_TIMEOUT, status);
2180 break;
2181 default:
2182 *status = HCI_ERROR_UNSPECIFIED;
2183 return;
2184 }
2185
2186 if (*status)
2187 return;
2188
2189 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2190
2191 /* When service discovery is used and the controller has a
2192 * strict duplicate filter, it is important to remember the
2193 * start and duration of the scan. This is required for
2194 * restarting scanning during the discovery phase.
2195 */
2196 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2197 hdev->discovery.result_filtering) {
2198 hdev->discovery.scan_start = jiffies;
2199 hdev->discovery.scan_duration = timeout;
2200 }
2201
2202 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2203 timeout);
2204}
2205
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002206bool hci_req_stop_discovery(struct hci_request *req)
2207{
2208 struct hci_dev *hdev = req->hdev;
2209 struct discovery_state *d = &hdev->discovery;
2210 struct hci_cp_remote_name_req_cancel cp;
2211 struct inquiry_entry *e;
2212 bool ret = false;
2213
2214 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2215
2216 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2217 if (test_bit(HCI_INQUIRY, &hdev->flags))
2218 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2219
2220 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2221 cancel_delayed_work(&hdev->le_scan_disable);
2222 hci_req_add_le_scan_disable(req);
2223 }
2224
2225 ret = true;
2226 } else {
2227 /* Passive scanning */
2228 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2229 hci_req_add_le_scan_disable(req);
2230 ret = true;
2231 }
2232 }
2233
2234 /* No further actions needed for LE-only discovery */
2235 if (d->type == DISCOV_TYPE_LE)
2236 return ret;
2237
2238 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2239 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2240 NAME_PENDING);
2241 if (!e)
2242 return ret;
2243
2244 bacpy(&cp.bdaddr, &e->data.bdaddr);
2245 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2246 &cp);
2247 ret = true;
2248 }
2249
2250 return ret;
2251}
2252
2253static int stop_discovery(struct hci_request *req, unsigned long opt)
2254{
2255 hci_dev_lock(req->hdev);
2256 hci_req_stop_discovery(req);
2257 hci_dev_unlock(req->hdev);
2258
2259 return 0;
2260}
2261
Johan Hedberge68f0722015-11-11 08:30:30 +02002262static void discov_update(struct work_struct *work)
2263{
2264 struct hci_dev *hdev = container_of(work, struct hci_dev,
2265 discov_update);
2266 u8 status = 0;
2267
2268 switch (hdev->discovery.state) {
2269 case DISCOVERY_STARTING:
2270 start_discovery(hdev, &status);
2271 mgmt_start_discovery_complete(hdev, status);
2272 if (status)
2273 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2274 else
2275 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2276 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002277 case DISCOVERY_STOPPING:
2278 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2279 mgmt_stop_discovery_complete(hdev, status);
2280 if (!status)
2281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2282 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002283 case DISCOVERY_STOPPED:
2284 default:
2285 return;
2286 }
2287}
2288
Johan Hedbergc366f552015-11-23 15:43:06 +02002289static void discov_off(struct work_struct *work)
2290{
2291 struct hci_dev *hdev = container_of(work, struct hci_dev,
2292 discov_off.work);
2293
2294 BT_DBG("%s", hdev->name);
2295
2296 hci_dev_lock(hdev);
2297
2298 /* When discoverable timeout triggers, then just make sure
2299 * the limited discoverable flag is cleared. Even in the case
2300 * of a timeout triggered from general discoverable, it is
2301 * safe to unconditionally clear the flag.
2302 */
2303 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2304 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2305 hdev->discov_timeout = 0;
2306
2307 hci_dev_unlock(hdev);
2308
2309 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2310 mgmt_new_settings(hdev);
2311}
2312
Johan Hedberg2ff13892015-11-25 16:15:44 +02002313static int powered_update_hci(struct hci_request *req, unsigned long opt)
2314{
2315 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002316 u8 link_sec;
2317
2318 hci_dev_lock(hdev);
2319
2320 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2321 !lmp_host_ssp_capable(hdev)) {
2322 u8 mode = 0x01;
2323
2324 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2325
2326 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2327 u8 support = 0x01;
2328
2329 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2330 sizeof(support), &support);
2331 }
2332 }
2333
2334 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2335 lmp_bredr_capable(hdev)) {
2336 struct hci_cp_write_le_host_supported cp;
2337
2338 cp.le = 0x01;
2339 cp.simul = 0x00;
2340
2341 /* Check first if we already have the right
2342 * host state (host features set)
2343 */
2344 if (cp.le != lmp_host_le_capable(hdev) ||
2345 cp.simul != lmp_host_le_br_capable(hdev))
2346 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2347 sizeof(cp), &cp);
2348 }
2349
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002350 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002351 /* Make sure the controller has a good default for
2352 * advertising data. This also applies to the case
2353 * where BR/EDR was toggled during the AUTO_OFF phase.
2354 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002355 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2356 list_empty(&hdev->adv_instances)) {
2357 __hci_req_update_adv_data(req, 0x00);
2358 __hci_req_update_scan_rsp_data(req, 0x00);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002359
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002360 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2361 __hci_req_enable_advertising(req);
2362 } else if (!list_empty(&hdev->adv_instances)) {
2363 struct adv_info *adv_instance;
2364
Johan Hedberg2ff13892015-11-25 16:15:44 +02002365 adv_instance = list_first_entry(&hdev->adv_instances,
2366 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002367 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002368 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002369 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002370 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002371 }
2372
2373 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2374 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2375 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2376 sizeof(link_sec), &link_sec);
2377
2378 if (lmp_bredr_capable(hdev)) {
2379 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2380 __hci_req_write_fast_connectable(req, true);
2381 else
2382 __hci_req_write_fast_connectable(req, false);
2383 __hci_req_update_scan(req);
2384 __hci_req_update_class(req);
2385 __hci_req_update_name(req);
2386 __hci_req_update_eir(req);
2387 }
2388
2389 hci_dev_unlock(hdev);
2390 return 0;
2391}
2392
2393int __hci_req_hci_power_on(struct hci_dev *hdev)
2394{
2395 /* Register the available SMP channels (BR/EDR and LE) only when
2396 * successfully powering on the controller. This late
2397 * registration is required so that LE SMP can clearly decide if
2398 * the public address or static address is used.
2399 */
2400 smp_register(hdev);
2401
2402 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2403 NULL);
2404}
2405
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002406void hci_request_setup(struct hci_dev *hdev)
2407{
Johan Hedberge68f0722015-11-11 08:30:30 +02002408 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002409 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002410 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002411 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002412 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002413 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002414 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2415 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002416 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002417}
2418
2419void hci_request_cancel_all(struct hci_dev *hdev)
2420{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002421 hci_req_sync_cancel(hdev, ENODEV);
2422
Johan Hedberge68f0722015-11-11 08:30:30 +02002423 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002424 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002425 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002426 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002427 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002428 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002429 cancel_delayed_work_sync(&hdev->le_scan_disable);
2430 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002431
2432 if (hdev->adv_instance_timeout) {
2433 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2434 hdev->adv_instance_timeout = 0;
2435 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002436}