blob: e9a95ed654915dd976f5a6bcb81af9dc764ff9ba [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
João Paulo Rechi Vitaf80c5da2019-05-02 10:01:52 +080049bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
Johan Hedberge62144872015-04-02 13:41:08 +030054static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020056{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020076 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020082
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
Johan Hedberge62144872015-04-02 13:41:08 +030092int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
Johan Hedbergb5044302015-11-10 09:44:55 +0200116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200142 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100143 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200144 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145
John Keeping67d8cee2018-04-19 16:29:37 +0100146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200148
John Keeping67d8cee2018-04-19 16:29:37 +0100149 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200194 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200195{
196 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
Johan Hedberga1d01db2015-11-11 08:11:25 +0200205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229
230 return err;
231 }
232
John Keeping67d8cee2018-04-19 16:29:37 +0100233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200235
John Keeping67d8cee2018-04-19 16:29:37 +0100236 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200242 if (hci_status)
243 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 default:
253 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257 }
258
Frederic Dalleau9afee942016-08-23 07:59:19 +0200259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
Johan Hedberga1d01db2015-11-11 08:11:25 +0200268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200270 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200278 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200280 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200281
282 return ret;
283}
284
Johan Hedberg0857dd32014-12-19 13:40:20 +0200285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
Johannes Berg4df864c2017-06-16 14:29:21 +0200296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200301 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 BT_DBG("skb len %d", skb->len);
304
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200336
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100337 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
366 type = PAGE_SCAN_TYPE_STANDARD; /* default */
367
368 /* default 1.28 sec page scan */
369 acp.interval = cpu_to_le16(0x0800);
370 }
371
372 acp.window = cpu_to_le16(0x0012);
373
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 sizeof(acp), &acp);
378
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381}
382
Johan Hedberg196a5e92015-11-22 18:55:44 +0200383/* This function controls the background scanning based on hdev->pend_le_conns
384 * list. If there are pending LE connection we start the background scanning,
385 * otherwise we stop it.
386 *
387 * This function requires the caller holds hdev->lock.
388 */
389static void __hci_update_background_scan(struct hci_request *req)
390{
391 struct hci_dev *hdev = req->hdev;
392
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 return;
400
401 /* No point in doing scanning if LE support hasn't been enabled */
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 return;
404
405 /* If discovery is active don't interfere with it */
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 return;
408
409 /* Reset RSSI and UUID filters when starting background scanning
410 * since these filters are meant for service discovery only.
411 *
412 * The Start Discovery and Start Service Discovery operations
413 * ensure to set proper values for RSSI threshold and UUID
414 * filter list. So it is safe to just reset them here.
415 */
416 hci_discovery_filter_clear(hdev);
417
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420 /* If there is no pending LE connections or devices
421 * to be scanned for, we should stop the background
422 * scanning.
423 */
424
425 /* If controller is not scanning we are done. */
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427 return;
428
429 hci_req_add_le_scan_disable(req);
430
431 BT_DBG("%s stopping background scanning", hdev->name);
432 } else {
433 /* If there is at least one pending LE connection, we should
434 * keep the background scan running.
435 */
436
437 /* If controller is connecting, we should not start scanning
438 * since some controllers are not able to scan and connect at
439 * the same time.
440 */
441 if (hci_lookup_le_connect(hdev))
442 return;
443
444 /* If controller is currently scanning, we stop it to ensure we
445 * don't miss any advertising (due to duplicates filter).
446 */
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
449
450 hci_req_add_le_passive_scan(req);
451
452 BT_DBG("%s starting background scanning", hdev->name);
453 }
454}
455
Johan Hedberg00cf5042015-11-25 16:15:41 +0200456void __hci_req_update_name(struct hci_request *req)
457{
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
460
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464}
465
Johan Hedbergb1a89172015-11-25 16:15:42 +0200466#define PNP_INFO_SVCLASS_ID 0x1200
467
468static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469{
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
472
473 if (len < 4)
474 return ptr;
475
476 list_for_each_entry(uuid, &hdev->uuids, list) {
477 u16 uuid16;
478
479 if (uuid->size != 16)
480 continue;
481
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483 if (uuid16 < 0x1100)
484 continue;
485
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 continue;
488
489 if (!uuids_start) {
490 uuids_start = ptr;
491 uuids_start[0] = 1;
492 uuids_start[1] = EIR_UUID16_ALL;
493 ptr += 2;
494 }
495
496 /* Stop if not enough space to put next UUID */
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
499 break;
500 }
501
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
505 }
506
507 return ptr;
508}
509
510static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511{
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
514
515 if (len < 6)
516 return ptr;
517
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
520 continue;
521
522 if (!uuids_start) {
523 uuids_start = ptr;
524 uuids_start[0] = 1;
525 uuids_start[1] = EIR_UUID32_ALL;
526 ptr += 2;
527 }
528
529 /* Stop if not enough space to put next UUID */
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
532 break;
533 }
534
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536 ptr += sizeof(u32);
537 uuids_start[0] += sizeof(u32);
538 }
539
540 return ptr;
541}
542
543static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544{
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
547
548 if (len < 18)
549 return ptr;
550
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
553 continue;
554
555 if (!uuids_start) {
556 uuids_start = ptr;
557 uuids_start[0] = 1;
558 uuids_start[1] = EIR_UUID128_ALL;
559 ptr += 2;
560 }
561
562 /* Stop if not enough space to put next UUID */
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
565 break;
566 }
567
568 memcpy(ptr, uuid->uuid, 16);
569 ptr += 16;
570 uuids_start[0] += 16;
571 }
572
573 return ptr;
574}
575
576static void create_eir(struct hci_dev *hdev, u8 *data)
577{
578 u8 *ptr = data;
579 size_t name_len;
580
581 name_len = strlen(hdev->dev_name);
582
583 if (name_len > 0) {
584 /* EIR Data type */
585 if (name_len > 48) {
586 name_len = 48;
587 ptr[1] = EIR_NAME_SHORT;
588 } else
589 ptr[1] = EIR_NAME_COMPLETE;
590
591 /* EIR Data length */
592 ptr[0] = name_len + 1;
593
594 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596 ptr += (name_len + 2);
597 }
598
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600 ptr[0] = 2;
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
603
604 ptr += 3;
605 }
606
607 if (hdev->devid_source > 0) {
608 ptr[0] = 9;
609 ptr[1] = EIR_DEVICE_ID;
610
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616 ptr += 10;
617 }
618
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622}
623
624void __hci_req_update_eir(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
628
629 if (!hdev_is_powered(hdev))
630 return;
631
632 if (!lmp_ext_inq_capable(hdev))
633 return;
634
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636 return;
637
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639 return;
640
641 memset(&cp, 0, sizeof(cp));
642
643 create_eir(hdev, cp.data);
644
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646 return;
647
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651}
652
Johan Hedberg0857dd32014-12-19 13:40:20 +0200653void hci_req_add_le_scan_disable(struct hci_request *req)
654{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530655 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200656
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
659
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663 &cp);
664 } else {
665 struct hci_cp_le_set_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200671}
672
673static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675{
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, &params->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682}
683
684static u8 update_white_list(struct hci_request *req)
685{
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
696 */
697 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
700 */
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200706
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200712 continue;
713 }
714
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716 /* White list can not be used with RPAs */
717 return 0x00;
718 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200719
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500720 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200721 }
722
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
726 * the controller.
727 *
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
731 * white list.
732 */
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 &params->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739 /* Select filter policy to accept all advertising */
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, &params->addr,
744 params->addr_type)) {
745 /* White list can not be used with RPAs */
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
756 */
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 &params->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763 /* Select filter policy to accept all advertising */
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, &params->addr,
768 params->addr_type)) {
769 /* White list can not be used with RPAs */
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777 /* Select filter policy to use white list */
778 return 0x01;
779}
780
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200781static bool scan_use_rpa(struct hci_dev *hdev)
782{
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784}
785
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530786static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200788{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530789 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530790
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530791 /* Use ext scanning if set ext scan param and ext scan enable is
792 * supported
793 */
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530800
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
803
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530807
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530808 plen = sizeof(*ext_param_cp);
809
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
817
818 plen += sizeof(*phy_params);
819 phy_params++;
820 }
821
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
829
830 plen += sizeof(*phy_params);
831 phy_params++;
832 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530833
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530835 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530836
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
843 } else {
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
846
847 memset(&param_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854 &param_cp);
855
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860 &enable_cp);
861 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530862}
863
864void hci_req_add_le_passive_scan(struct hci_request *req)
865{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200866 struct hci_dev *hdev = req->hdev;
867 u8 own_addr_type;
868 u8 filter_policy;
869
870 /* Set require_privacy to false since no SCAN_REQ are send
871 * during passive scanning. Not using an non-resolvable address
872 * here is important so that peer devices using direct
873 * advertising with our address will be correctly reported
874 * by the controller.
875 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200878 return;
879
880 /* Adding or removing entries from the white list must
881 * happen before enabling scanning. The controller does
882 * not allow white list modification while scanning.
883 */
884 filter_policy = update_white_list(req);
885
886 /* When the controller is using random resolvable addresses and
887 * with that having LE privacy enabled, then controllers with
888 * Extended Scanner Filter Policies support can now enable support
889 * for handling directed advertising.
890 *
891 * So instead of using filter polices 0x00 (no whitelist)
892 * and 0x01 (whitelist enabled) use the new filter policies
893 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
894 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
898
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200901}
902
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530903static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904{
905 struct adv_info *adv_instance;
906
907 /* Ignore instance 0 */
908 if (instance == 0x00)
909 return 0;
910
911 adv_instance = hci_find_adv_instance(hdev, instance);
912 if (!adv_instance)
913 return 0;
914
915 /* TODO: Take into account the "appearance" and "local-name" flags here.
916 * These are currently being ignored as they are not supported.
917 */
918 return adv_instance->scan_rsp_len;
919}
920
Johan Hedbergf2252572015-11-18 12:49:20 +0200921static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200923 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200924 struct adv_info *adv_instance;
925
926 /* Ignore instance 0 */
927 if (instance == 0x00)
928 return 0;
929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931 if (!adv_instance)
932 return 0;
933
934 /* TODO: Take into account the "appearance" and "local-name" flags here.
935 * These are currently being ignored as they are not supported.
936 */
937 return adv_instance->scan_rsp_len;
938}
939
940void __hci_req_disable_advertising(struct hci_request *req)
941{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
Johan Hedbergf2252572015-11-18 12:49:20 +0200944
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +0530945 cp.enable = 0x00;
946 /* Disable all sets since we only support one set at the moment */
947 cp.num_of_sets = 0x00;
948
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950 } else {
951 u8 enable = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954 }
Johan Hedbergf2252572015-11-18 12:49:20 +0200955}
956
957static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958{
959 u32 flags;
960 struct adv_info *adv_instance;
961
962 if (instance == 0x00) {
963 /* Instance 0 always manages the "Tx Power" and "Flags"
964 * fields
965 */
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969 * corresponds to the "connectable" instance flag.
970 */
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200977 flags |= MGMT_ADV_FLAG_DISCOV;
978
Johan Hedbergf2252572015-11-18 12:49:20 +0200979 return flags;
980 }
981
982 adv_instance = hci_find_adv_instance(hdev, instance);
983
984 /* Return 0 when we got an invalid instance identifier. */
985 if (!adv_instance)
986 return 0;
987
988 return adv_instance->flags;
989}
990
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200991static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992{
993 /* If privacy is not enabled don't use RPA */
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995 return false;
996
997 /* If basic privacy mode is enabled use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999 return true;
1000
1001 /* If limited privacy mode is enabled don't use RPA if we're
1002 * both discoverable and bondable.
1003 */
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1006 return false;
1007
1008 /* We're neither bondable nor discoverable in the limited
1009 * privacy mode, therefore use RPA.
1010 */
1011 return true;
1012}
1013
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001014static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015{
1016 /* If there is no connection we are OK to advertise. */
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1018 return true;
1019
1020 /* Check le_states if there is any connection in slave role. */
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022 /* Slave connection state and non connectable mode bit 20. */
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1024 return false;
1025
1026 /* Slave connection state and connectable mode bit 38
1027 * and scannable bit 21.
1028 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001031 return false;
1032 }
1033
1034 /* Check le_states if there is any connection in master role. */
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036 /* Master connection state and non connectable mode bit 18. */
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1038 return false;
1039
1040 /* Master connection state and connectable mode bit 35 and
1041 * scannable 19.
1042 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001044 !(hdev->le_states[2] & 0x08)))
1045 return false;
1046 }
1047
1048 return true;
1049}
1050
Johan Hedbergf2252572015-11-18 12:49:20 +02001051void __hci_req_enable_advertising(struct hci_request *req)
1052{
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1056 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +02001057 u32 flags;
1058
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001059 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1060
1061 /* If the "connectable" instance flag was not set, then choose between
1062 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1063 */
1064 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1065 mgmt_get_connectable(hdev);
1066
1067 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001068 return;
1069
1070 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1071 __hci_req_disable_advertising(req);
1072
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 */
1078 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1079
Johan Hedbergf2252572015-11-18 12:49:20 +02001080 /* Set require_privacy to true only when non-connectable
1081 * advertising is used. In that case it is fine to use a
1082 * non-resolvable private address.
1083 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001084 if (hci_update_random_address(req, !connectable,
1085 adv_use_rpa(hdev, flags),
1086 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001087 return;
1088
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092
1093 if (connectable)
1094 cp.type = LE_ADV_IND;
1095 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1096 cp.type = LE_ADV_SCAN_IND;
1097 else
1098 cp.type = LE_ADV_NONCONN_IND;
1099
1100 cp.own_address_type = own_addr_type;
1101 cp.channel_map = hdev->le_adv_channel_map;
1102
1103 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1104
1105 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1106}
1107
Michał Narajowskif61851f2016-10-19 10:20:27 +02001108u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001109{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001110 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001111 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001112
Michał Narajowskif61851f2016-10-19 10:20:27 +02001113 /* no space left for name (+ NULL + type + len) */
1114 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1115 return ad_len;
1116
1117 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001118 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001119 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001120 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001121 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001122
Michał Narajowskif61851f2016-10-19 10:20:27 +02001123 /* use short name if present */
1124 short_len = strlen(hdev->short_name);
1125 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001126 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001127 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001128
Michał Narajowskif61851f2016-10-19 10:20:27 +02001129 /* use shortened full name if present, we already know that name
1130 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1131 */
1132 if (complete_len) {
1133 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1134
1135 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1136 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1137
1138 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1139 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001140 }
1141
1142 return ad_len;
1143}
1144
Michał Narajowski1b422062016-10-05 12:28:27 +02001145static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1146{
1147 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1148}
1149
Michał Narajowski7c295c42016-09-18 12:50:02 +02001150static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1151{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001152 u8 scan_rsp_len = 0;
1153
1154 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001155 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001156 }
1157
Michał Narajowski1b422062016-10-05 12:28:27 +02001158 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001159}
1160
Johan Hedbergf2252572015-11-18 12:49:20 +02001161static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1162 u8 *ptr)
1163{
1164 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001165 u32 instance_flags;
1166 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001167
1168 adv_instance = hci_find_adv_instance(hdev, instance);
1169 if (!adv_instance)
1170 return 0;
1171
Michał Narajowski7c295c42016-09-18 12:50:02 +02001172 instance_flags = adv_instance->flags;
1173
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001174 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001175 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001176 }
1177
Michał Narajowski1b422062016-10-05 12:28:27 +02001178 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001179 adv_instance->scan_rsp_len);
1180
Michał Narajowski7c295c42016-09-18 12:50:02 +02001181 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001182
1183 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1184 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1185
1186 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001187}
1188
Johan Hedbergcab054a2015-11-30 11:21:45 +02001189void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001190{
1191 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001192 u8 len;
1193
1194 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1195 return;
1196
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301197 if (ext_adv_capable(hdev)) {
1198 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001199
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301200 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001201
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301202 if (instance)
1203 len = create_instance_scan_rsp_data(hdev, instance,
1204 cp.data);
1205 else
1206 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001207
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301208 if (hdev->scan_rsp_data_len == len &&
1209 !memcmp(cp.data, hdev->scan_rsp_data, len))
1210 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001211
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301212 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1213 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001214
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301215 cp.handle = 0;
1216 cp.length = len;
1217 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1218 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1219
1220 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1221 &cp);
1222 } else {
1223 struct hci_cp_le_set_scan_rsp_data cp;
1224
1225 memset(&cp, 0, sizeof(cp));
1226
1227 if (instance)
1228 len = create_instance_scan_rsp_data(hdev, instance,
1229 cp.data);
1230 else
1231 len = create_default_scan_rsp_data(hdev, cp.data);
1232
1233 if (hdev->scan_rsp_data_len == len &&
1234 !memcmp(cp.data, hdev->scan_rsp_data, len))
1235 return;
1236
1237 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1238 hdev->scan_rsp_data_len = len;
1239
1240 cp.length = len;
1241
1242 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1243 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001244}
1245
Johan Hedbergf2252572015-11-18 12:49:20 +02001246static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1247{
1248 struct adv_info *adv_instance = NULL;
1249 u8 ad_len = 0, flags = 0;
1250 u32 instance_flags;
1251
1252 /* Return 0 when the current instance identifier is invalid. */
1253 if (instance) {
1254 adv_instance = hci_find_adv_instance(hdev, instance);
1255 if (!adv_instance)
1256 return 0;
1257 }
1258
1259 instance_flags = get_adv_instance_flags(hdev, instance);
1260
1261 /* The Add Advertising command allows userspace to set both the general
1262 * and limited discoverable flags.
1263 */
1264 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1265 flags |= LE_AD_GENERAL;
1266
1267 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1268 flags |= LE_AD_LIMITED;
1269
Johan Hedbergf18ba582016-04-06 13:09:05 +03001270 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1271 flags |= LE_AD_NO_BREDR;
1272
Johan Hedbergf2252572015-11-18 12:49:20 +02001273 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1274 /* If a discovery flag wasn't provided, simply use the global
1275 * settings.
1276 */
1277 if (!flags)
1278 flags |= mgmt_get_adv_discov_flags(hdev);
1279
Johan Hedbergf2252572015-11-18 12:49:20 +02001280 /* If flags would still be empty, then there is no need to
1281 * include the "Flags" AD field".
1282 */
1283 if (flags) {
1284 ptr[0] = 0x02;
1285 ptr[1] = EIR_FLAGS;
1286 ptr[2] = flags;
1287
1288 ad_len += 3;
1289 ptr += 3;
1290 }
1291 }
1292
1293 if (adv_instance) {
1294 memcpy(ptr, adv_instance->adv_data,
1295 adv_instance->adv_data_len);
1296 ad_len += adv_instance->adv_data_len;
1297 ptr += adv_instance->adv_data_len;
1298 }
1299
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301300 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1301 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001302
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301303 if (ext_adv_capable(hdev)) {
1304 if (adv_instance)
1305 adv_tx_power = adv_instance->tx_power;
1306 else
1307 adv_tx_power = hdev->adv_tx_power;
1308 } else {
1309 adv_tx_power = hdev->adv_tx_power;
1310 }
1311
1312 /* Provide Tx Power only if we can provide a valid value for it */
1313 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1314 ptr[0] = 0x02;
1315 ptr[1] = EIR_TX_POWER;
1316 ptr[2] = (u8)adv_tx_power;
1317
1318 ad_len += 3;
1319 ptr += 3;
1320 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001321 }
1322
1323 return ad_len;
1324}
1325
Johan Hedbergcab054a2015-11-30 11:21:45 +02001326void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001327{
1328 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001329 u8 len;
1330
1331 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1332 return;
1333
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301334 if (ext_adv_capable(hdev)) {
1335 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001336
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301337 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001338
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301339 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001340
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301341 /* There's nothing to do if the data hasn't changed */
1342 if (hdev->adv_data_len == len &&
1343 memcmp(cp.data, hdev->adv_data, len) == 0)
1344 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001345
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301346 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1347 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001348
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301349 cp.length = len;
1350 cp.handle = 0;
1351 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1352 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1353
1354 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1355 } else {
1356 struct hci_cp_le_set_adv_data cp;
1357
1358 memset(&cp, 0, sizeof(cp));
1359
1360 len = create_instance_adv_data(hdev, instance, cp.data);
1361
1362 /* There's nothing to do if the data hasn't changed */
1363 if (hdev->adv_data_len == len &&
1364 memcmp(cp.data, hdev->adv_data, len) == 0)
1365 return;
1366
1367 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1368 hdev->adv_data_len = len;
1369
1370 cp.length = len;
1371
1372 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1373 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001374}
1375
Johan Hedbergcab054a2015-11-30 11:21:45 +02001376int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001377{
1378 struct hci_request req;
1379
1380 hci_req_init(&req, hdev);
1381 __hci_req_update_adv_data(&req, instance);
1382
1383 return hci_req_run(&req, NULL);
1384}
1385
1386static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1387{
1388 BT_DBG("%s status %u", hdev->name, status);
1389}
1390
1391void hci_req_reenable_advertising(struct hci_dev *hdev)
1392{
1393 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001394
1395 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001396 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001397 return;
1398
Johan Hedbergf2252572015-11-18 12:49:20 +02001399 hci_req_init(&req, hdev);
1400
Johan Hedbergcab054a2015-11-30 11:21:45 +02001401 if (hdev->cur_adv_instance) {
1402 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1403 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001404 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301405 if (ext_adv_capable(hdev)) {
1406 __hci_req_start_ext_adv(&req, 0x00);
1407 } else {
1408 __hci_req_update_adv_data(&req, 0x00);
1409 __hci_req_update_scan_rsp_data(&req, 0x00);
1410 __hci_req_enable_advertising(&req);
1411 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001412 }
1413
1414 hci_req_run(&req, adv_enable_complete);
1415}
1416
1417static void adv_timeout_expire(struct work_struct *work)
1418{
1419 struct hci_dev *hdev = container_of(work, struct hci_dev,
1420 adv_instance_expire.work);
1421
1422 struct hci_request req;
1423 u8 instance;
1424
1425 BT_DBG("%s", hdev->name);
1426
1427 hci_dev_lock(hdev);
1428
1429 hdev->adv_instance_timeout = 0;
1430
Johan Hedbergcab054a2015-11-30 11:21:45 +02001431 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001432 if (instance == 0x00)
1433 goto unlock;
1434
1435 hci_req_init(&req, hdev);
1436
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001437 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001438
1439 if (list_empty(&hdev->adv_instances))
1440 __hci_req_disable_advertising(&req);
1441
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001442 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001443
1444unlock:
1445 hci_dev_unlock(hdev);
1446}
1447
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301448int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1449 bool use_rpa, struct adv_info *adv_instance,
1450 u8 *own_addr_type, bdaddr_t *rand_addr)
1451{
1452 int err;
1453
1454 bacpy(rand_addr, BDADDR_ANY);
1455
1456 /* If privacy is enabled use a resolvable private address. If
1457 * current RPA has expired then generate a new one.
1458 */
1459 if (use_rpa) {
1460 int to;
1461
1462 *own_addr_type = ADDR_LE_DEV_RANDOM;
1463
1464 if (adv_instance) {
1465 if (!adv_instance->rpa_expired &&
1466 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1467 return 0;
1468
1469 adv_instance->rpa_expired = false;
1470 } else {
1471 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1472 !bacmp(&hdev->random_addr, &hdev->rpa))
1473 return 0;
1474 }
1475
1476 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1477 if (err < 0) {
1478 BT_ERR("%s failed to generate new RPA", hdev->name);
1479 return err;
1480 }
1481
1482 bacpy(rand_addr, &hdev->rpa);
1483
1484 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1485 if (adv_instance)
1486 queue_delayed_work(hdev->workqueue,
1487 &adv_instance->rpa_expired_cb, to);
1488 else
1489 queue_delayed_work(hdev->workqueue,
1490 &hdev->rpa_expired, to);
1491
1492 return 0;
1493 }
1494
1495 /* In case of required privacy without resolvable private address,
1496 * use an non-resolvable private address. This is useful for
1497 * non-connectable advertising.
1498 */
1499 if (require_privacy) {
1500 bdaddr_t nrpa;
1501
1502 while (true) {
1503 /* The non-resolvable private address is generated
1504 * from random six bytes with the two most significant
1505 * bits cleared.
1506 */
1507 get_random_bytes(&nrpa, 6);
1508 nrpa.b[5] &= 0x3f;
1509
1510 /* The non-resolvable private address shall not be
1511 * equal to the public address.
1512 */
1513 if (bacmp(&hdev->bdaddr, &nrpa))
1514 break;
1515 }
1516
1517 *own_addr_type = ADDR_LE_DEV_RANDOM;
1518 bacpy(rand_addr, &nrpa);
1519
1520 return 0;
1521 }
1522
1523 /* No privacy so use a public address. */
1524 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1525
1526 return 0;
1527}
1528
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301529void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1530{
1531 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1532}
1533
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301534int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301535{
1536 struct hci_cp_le_set_ext_adv_params cp;
1537 struct hci_dev *hdev = req->hdev;
1538 bool connectable;
1539 u32 flags;
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301540 bdaddr_t random_addr;
1541 u8 own_addr_type;
1542 int err;
1543 struct adv_info *adv_instance;
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301544 bool secondary_adv;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301545 /* In ext adv set param interval is 3 octets */
1546 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1547
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301548 if (instance > 0) {
1549 adv_instance = hci_find_adv_instance(hdev, instance);
1550 if (!adv_instance)
1551 return -EINVAL;
1552 } else {
1553 adv_instance = NULL;
1554 }
1555
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301556 flags = get_adv_instance_flags(hdev, instance);
1557
1558 /* If the "connectable" instance flag was not set, then choose between
1559 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1560 */
1561 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1562 mgmt_get_connectable(hdev);
1563
Colin Ian King75edd1f2018-11-09 13:27:36 +00001564 if (!is_advertising_allowed(hdev, connectable))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301565 return -EPERM;
1566
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301567 /* Set require_privacy to true only when non-connectable
1568 * advertising is used. In that case it is fine to use a
1569 * non-resolvable private address.
1570 */
1571 err = hci_get_random_address(hdev, !connectable,
1572 adv_use_rpa(hdev, flags), adv_instance,
1573 &own_addr_type, &random_addr);
1574 if (err < 0)
1575 return err;
1576
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301577 memset(&cp, 0, sizeof(cp));
1578
1579 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1580 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1581
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301582 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1583
1584 if (connectable) {
1585 if (secondary_adv)
1586 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1587 else
1588 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1589 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1590 if (secondary_adv)
1591 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1592 else
1593 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1594 } else {
1595 if (secondary_adv)
1596 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1597 else
1598 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1599 }
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301600
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301601 cp.own_addr_type = own_addr_type;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301602 cp.channel_map = hdev->le_adv_channel_map;
1603 cp.tx_power = 127;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301604 cp.handle = 0;
1605
Jaganath Kanakkassery85a721a2018-07-19 17:09:47 +05301606 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1607 cp.primary_phy = HCI_ADV_PHY_1M;
1608 cp.secondary_phy = HCI_ADV_PHY_2M;
1609 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1610 cp.primary_phy = HCI_ADV_PHY_CODED;
1611 cp.secondary_phy = HCI_ADV_PHY_CODED;
1612 } else {
1613 /* In all other cases use 1M */
1614 cp.primary_phy = HCI_ADV_PHY_1M;
1615 cp.secondary_phy = HCI_ADV_PHY_1M;
1616 }
1617
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301618 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1619
Jaganath Kanakkasserya73c0462018-07-19 17:09:45 +05301620 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1621 bacmp(&random_addr, BDADDR_ANY)) {
1622 struct hci_cp_le_set_adv_set_rand_addr cp;
1623
1624 /* Check if random address need to be updated */
1625 if (adv_instance) {
1626 if (!bacmp(&random_addr, &adv_instance->random_addr))
1627 return 0;
1628 } else {
1629 if (!bacmp(&random_addr, &hdev->random_addr))
1630 return 0;
1631 }
1632
1633 memset(&cp, 0, sizeof(cp));
1634
1635 cp.handle = 0;
1636 bacpy(&cp.bdaddr, &random_addr);
1637
1638 hci_req_add(req,
1639 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1640 sizeof(cp), &cp);
1641 }
1642
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301643 return 0;
1644}
1645
1646void __hci_req_enable_ext_advertising(struct hci_request *req)
1647{
1648 struct hci_cp_le_set_ext_adv_enable *cp;
1649 struct hci_cp_ext_adv_set *adv_set;
1650 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1651
1652 cp = (void *) data;
1653 adv_set = (void *) cp->data;
1654
1655 memset(cp, 0, sizeof(*cp));
1656
1657 cp->enable = 0x01;
1658 cp->num_of_sets = 0x01;
1659
1660 memset(adv_set, 0, sizeof(*adv_set));
1661
1662 adv_set->handle = 0;
1663
1664 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1665 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1666 data);
1667}
1668
1669int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1670{
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301671 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301672 int err;
1673
Jaganath Kanakkassery45b77492018-07-19 17:09:43 +05301674 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1675 __hci_req_disable_advertising(req);
1676
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301677 err = __hci_req_setup_ext_adv_instance(req, instance);
1678 if (err < 0)
1679 return err;
1680
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301681 __hci_req_update_scan_rsp_data(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301682 __hci_req_enable_ext_advertising(req);
1683
1684 return 0;
1685}
1686
Johan Hedbergf2252572015-11-18 12:49:20 +02001687int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1688 bool force)
1689{
1690 struct hci_dev *hdev = req->hdev;
1691 struct adv_info *adv_instance = NULL;
1692 u16 timeout;
1693
1694 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001695 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001696 return -EPERM;
1697
1698 if (hdev->adv_instance_timeout)
1699 return -EBUSY;
1700
1701 adv_instance = hci_find_adv_instance(hdev, instance);
1702 if (!adv_instance)
1703 return -ENOENT;
1704
1705 /* A zero timeout means unlimited advertising. As long as there is
1706 * only one instance, duration should be ignored. We still set a timeout
1707 * in case further instances are being added later on.
1708 *
1709 * If the remaining lifetime of the instance is more than the duration
1710 * then the timeout corresponds to the duration, otherwise it will be
1711 * reduced to the remaining instance lifetime.
1712 */
1713 if (adv_instance->timeout == 0 ||
1714 adv_instance->duration <= adv_instance->remaining_time)
1715 timeout = adv_instance->duration;
1716 else
1717 timeout = adv_instance->remaining_time;
1718
1719 /* The remaining time is being reduced unless the instance is being
1720 * advertised without time limit.
1721 */
1722 if (adv_instance->timeout)
1723 adv_instance->remaining_time =
1724 adv_instance->remaining_time - timeout;
1725
1726 hdev->adv_instance_timeout = timeout;
1727 queue_delayed_work(hdev->req_workqueue,
1728 &hdev->adv_instance_expire,
1729 msecs_to_jiffies(timeout * 1000));
1730
1731 /* If we're just re-scheduling the same instance again then do not
1732 * execute any HCI commands. This happens when a single instance is
1733 * being advertised.
1734 */
1735 if (!force && hdev->cur_adv_instance == instance &&
1736 hci_dev_test_flag(hdev, HCI_LE_ADV))
1737 return 0;
1738
1739 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301740 if (ext_adv_capable(hdev)) {
1741 __hci_req_start_ext_adv(req, instance);
1742 } else {
1743 __hci_req_update_adv_data(req, instance);
1744 __hci_req_update_scan_rsp_data(req, instance);
1745 __hci_req_enable_advertising(req);
1746 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001747
1748 return 0;
1749}
1750
1751static void cancel_adv_timeout(struct hci_dev *hdev)
1752{
1753 if (hdev->adv_instance_timeout) {
1754 hdev->adv_instance_timeout = 0;
1755 cancel_delayed_work(&hdev->adv_instance_expire);
1756 }
1757}
1758
1759/* For a single instance:
1760 * - force == true: The instance will be removed even when its remaining
1761 * lifetime is not zero.
1762 * - force == false: the instance will be deactivated but kept stored unless
1763 * the remaining lifetime is zero.
1764 *
1765 * For instance == 0x00:
1766 * - force == true: All instances will be removed regardless of their timeout
1767 * setting.
1768 * - force == false: Only instances that have a timeout will be removed.
1769 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001770void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1771 struct hci_request *req, u8 instance,
1772 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001773{
1774 struct adv_info *adv_instance, *n, *next_instance = NULL;
1775 int err;
1776 u8 rem_inst;
1777
1778 /* Cancel any timeout concerning the removed instance(s). */
1779 if (!instance || hdev->cur_adv_instance == instance)
1780 cancel_adv_timeout(hdev);
1781
1782 /* Get the next instance to advertise BEFORE we remove
1783 * the current one. This can be the same instance again
1784 * if there is only one instance.
1785 */
1786 if (instance && hdev->cur_adv_instance == instance)
1787 next_instance = hci_get_next_instance(hdev, instance);
1788
1789 if (instance == 0x00) {
1790 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1791 list) {
1792 if (!(force || adv_instance->timeout))
1793 continue;
1794
1795 rem_inst = adv_instance->instance;
1796 err = hci_remove_adv_instance(hdev, rem_inst);
1797 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001798 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001799 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001800 } else {
1801 adv_instance = hci_find_adv_instance(hdev, instance);
1802
1803 if (force || (adv_instance && adv_instance->timeout &&
1804 !adv_instance->remaining_time)) {
1805 /* Don't advertise a removed instance. */
1806 if (next_instance &&
1807 next_instance->instance == instance)
1808 next_instance = NULL;
1809
1810 err = hci_remove_adv_instance(hdev, instance);
1811 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001812 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001813 }
1814 }
1815
Johan Hedbergf2252572015-11-18 12:49:20 +02001816 if (!req || !hdev_is_powered(hdev) ||
1817 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1818 return;
1819
1820 if (next_instance)
1821 __hci_req_schedule_adv_instance(req, next_instance->instance,
1822 false);
1823}
1824
Johan Hedberg0857dd32014-12-19 13:40:20 +02001825static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1826{
1827 struct hci_dev *hdev = req->hdev;
1828
1829 /* If we're advertising or initiating an LE connection we can't
1830 * go ahead and change the random address at this time. This is
1831 * because the eventual initiator address used for the
1832 * subsequently created connection will be undefined (some
1833 * controllers use the new address and others the one we had
1834 * when the operation started).
1835 *
1836 * In this kind of scenario skip the update and let the random
1837 * address be updated at the next cycle.
1838 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001839 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001840 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001841 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001842 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001843 return;
1844 }
1845
1846 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1847}
1848
1849int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001850 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001851{
1852 struct hci_dev *hdev = req->hdev;
1853 int err;
1854
1855 /* If privacy is enabled use a resolvable private address. If
1856 * current RPA has expired or there is something else than
1857 * the current RPA in use, then generate a new one.
1858 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001859 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001860 int to;
1861
1862 *own_addr_type = ADDR_LE_DEV_RANDOM;
1863
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001864 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001865 !bacmp(&hdev->random_addr, &hdev->rpa))
1866 return 0;
1867
1868 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1869 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001870 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001871 return err;
1872 }
1873
1874 set_random_addr(req, &hdev->rpa);
1875
1876 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1877 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1878
1879 return 0;
1880 }
1881
1882 /* In case of required privacy without resolvable private address,
1883 * use an non-resolvable private address. This is useful for active
1884 * scanning and non-connectable advertising.
1885 */
1886 if (require_privacy) {
1887 bdaddr_t nrpa;
1888
1889 while (true) {
1890 /* The non-resolvable private address is generated
1891 * from random six bytes with the two most significant
1892 * bits cleared.
1893 */
1894 get_random_bytes(&nrpa, 6);
1895 nrpa.b[5] &= 0x3f;
1896
1897 /* The non-resolvable private address shall not be
1898 * equal to the public address.
1899 */
1900 if (bacmp(&hdev->bdaddr, &nrpa))
1901 break;
1902 }
1903
1904 *own_addr_type = ADDR_LE_DEV_RANDOM;
1905 set_random_addr(req, &nrpa);
1906 return 0;
1907 }
1908
1909 /* If forcing static address is in use or there is no public
1910 * address use the static address as random address (but skip
1911 * the HCI command if the current random address is already the
1912 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001913 *
1914 * In case BR/EDR has been disabled on a dual-mode controller
1915 * and a static address has been configured, then use that
1916 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001917 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001918 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001919 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001920 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001921 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001922 *own_addr_type = ADDR_LE_DEV_RANDOM;
1923 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1924 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1925 &hdev->static_addr);
1926 return 0;
1927 }
1928
1929 /* Neither privacy nor static address is being used so use a
1930 * public address.
1931 */
1932 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1933
1934 return 0;
1935}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001936
Johan Hedberg405a2612014-12-19 23:18:22 +02001937static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1938{
1939 struct bdaddr_list *b;
1940
1941 list_for_each_entry(b, &hdev->whitelist, list) {
1942 struct hci_conn *conn;
1943
1944 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1945 if (!conn)
1946 return true;
1947
1948 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1949 return true;
1950 }
1951
1952 return false;
1953}
1954
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001955void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001956{
1957 struct hci_dev *hdev = req->hdev;
1958 u8 scan;
1959
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001961 return;
1962
1963 if (!hdev_is_powered(hdev))
1964 return;
1965
1966 if (mgmt_powering_down(hdev))
1967 return;
1968
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001969 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001970 disconnected_whitelist_entries(hdev))
1971 scan = SCAN_PAGE;
1972 else
1973 scan = SCAN_DISABLED;
1974
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001975 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001976 scan |= SCAN_INQUIRY;
1977
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001978 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1979 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1980 return;
1981
Johan Hedberg405a2612014-12-19 23:18:22 +02001982 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1983}
1984
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001985static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001986{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001987 hci_dev_lock(req->hdev);
1988 __hci_req_update_scan(req);
1989 hci_dev_unlock(req->hdev);
1990 return 0;
1991}
Johan Hedberg405a2612014-12-19 23:18:22 +02001992
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001993static void scan_update_work(struct work_struct *work)
1994{
1995 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1996
1997 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001998}
1999
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002000static int connectable_update(struct hci_request *req, unsigned long opt)
2001{
2002 struct hci_dev *hdev = req->hdev;
2003
2004 hci_dev_lock(hdev);
2005
2006 __hci_req_update_scan(req);
2007
2008 /* If BR/EDR is not enabled and we disable advertising as a
2009 * by-product of disabling connectable, we need to update the
2010 * advertising flags.
2011 */
2012 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02002013 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002014
2015 /* Update the advertising parameters if necessary */
2016 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302017 !list_empty(&hdev->adv_instances)) {
2018 if (ext_adv_capable(hdev))
2019 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2020 else
2021 __hci_req_enable_advertising(req);
2022 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002023
2024 __hci_update_background_scan(req);
2025
2026 hci_dev_unlock(hdev);
2027
2028 return 0;
2029}
2030
2031static void connectable_update_work(struct work_struct *work)
2032{
2033 struct hci_dev *hdev = container_of(work, struct hci_dev,
2034 connectable_update);
2035 u8 status;
2036
2037 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2038 mgmt_set_connectable_complete(hdev, status);
2039}
2040
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02002041static u8 get_service_classes(struct hci_dev *hdev)
2042{
2043 struct bt_uuid *uuid;
2044 u8 val = 0;
2045
2046 list_for_each_entry(uuid, &hdev->uuids, list)
2047 val |= uuid->svc_hint;
2048
2049 return val;
2050}
2051
2052void __hci_req_update_class(struct hci_request *req)
2053{
2054 struct hci_dev *hdev = req->hdev;
2055 u8 cod[3];
2056
2057 BT_DBG("%s", hdev->name);
2058
2059 if (!hdev_is_powered(hdev))
2060 return;
2061
2062 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2063 return;
2064
2065 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2066 return;
2067
2068 cod[0] = hdev->minor_class;
2069 cod[1] = hdev->major_class;
2070 cod[2] = get_service_classes(hdev);
2071
2072 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2073 cod[1] |= 0x20;
2074
2075 if (memcmp(cod, hdev->dev_class, 3) == 0)
2076 return;
2077
2078 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2079}
2080
Johan Hedbergaed1a882015-11-22 17:24:44 +03002081static void write_iac(struct hci_request *req)
2082{
2083 struct hci_dev *hdev = req->hdev;
2084 struct hci_cp_write_current_iac_lap cp;
2085
2086 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2087 return;
2088
2089 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2090 /* Limited discoverable mode */
2091 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2092 cp.iac_lap[0] = 0x00; /* LIAC */
2093 cp.iac_lap[1] = 0x8b;
2094 cp.iac_lap[2] = 0x9e;
2095 cp.iac_lap[3] = 0x33; /* GIAC */
2096 cp.iac_lap[4] = 0x8b;
2097 cp.iac_lap[5] = 0x9e;
2098 } else {
2099 /* General discoverable mode */
2100 cp.num_iac = 1;
2101 cp.iac_lap[0] = 0x33; /* GIAC */
2102 cp.iac_lap[1] = 0x8b;
2103 cp.iac_lap[2] = 0x9e;
2104 }
2105
2106 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2107 (cp.num_iac * 3) + 1, &cp);
2108}
2109
2110static int discoverable_update(struct hci_request *req, unsigned long opt)
2111{
2112 struct hci_dev *hdev = req->hdev;
2113
2114 hci_dev_lock(hdev);
2115
2116 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2117 write_iac(req);
2118 __hci_req_update_scan(req);
2119 __hci_req_update_class(req);
2120 }
2121
2122 /* Advertising instances don't use the global discoverable setting, so
2123 * only update AD if advertising was enabled using Set Advertising.
2124 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002125 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02002126 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002127
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002128 /* Discoverable mode affects the local advertising
2129 * address in limited privacy mode.
2130 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302131 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2132 if (ext_adv_capable(hdev))
2133 __hci_req_start_ext_adv(req, 0x00);
2134 else
2135 __hci_req_enable_advertising(req);
2136 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002137 }
2138
Johan Hedbergaed1a882015-11-22 17:24:44 +03002139 hci_dev_unlock(hdev);
2140
2141 return 0;
2142}
2143
2144static void discoverable_update_work(struct work_struct *work)
2145{
2146 struct hci_dev *hdev = container_of(work, struct hci_dev,
2147 discoverable_update);
2148 u8 status;
2149
2150 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2151 mgmt_set_discoverable_complete(hdev, status);
2152}
2153
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002154void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2155 u8 reason)
2156{
2157 switch (conn->state) {
2158 case BT_CONNECTED:
2159 case BT_CONFIG:
2160 if (conn->type == AMP_LINK) {
2161 struct hci_cp_disconn_phy_link cp;
2162
2163 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2164 cp.reason = reason;
2165 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2166 &cp);
2167 } else {
2168 struct hci_cp_disconnect dc;
2169
2170 dc.handle = cpu_to_le16(conn->handle);
2171 dc.reason = reason;
2172 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2173 }
2174
2175 conn->state = BT_DISCONN;
2176
2177 break;
2178 case BT_CONNECT:
2179 if (conn->type == LE_LINK) {
2180 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2181 break;
2182 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2183 0, NULL);
2184 } else if (conn->type == ACL_LINK) {
2185 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2186 break;
2187 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2188 6, &conn->dst);
2189 }
2190 break;
2191 case BT_CONNECT2:
2192 if (conn->type == ACL_LINK) {
2193 struct hci_cp_reject_conn_req rej;
2194
2195 bacpy(&rej.bdaddr, &conn->dst);
2196 rej.reason = reason;
2197
2198 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2199 sizeof(rej), &rej);
2200 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2201 struct hci_cp_reject_sync_conn_req rej;
2202
2203 bacpy(&rej.bdaddr, &conn->dst);
2204
2205 /* SCO rejection has its own limited set of
2206 * allowed error values (0x0D-0x0F) which isn't
2207 * compatible with most values passed to this
2208 * function. To be safe hard-code one of the
2209 * values that's suitable for SCO.
2210 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002211 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002212
2213 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2214 sizeof(rej), &rej);
2215 }
2216 break;
2217 default:
2218 conn->state = BT_CLOSED;
2219 break;
2220 }
2221}
2222
2223static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2224{
2225 if (status)
2226 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2227}
2228
2229int hci_abort_conn(struct hci_conn *conn, u8 reason)
2230{
2231 struct hci_request req;
2232 int err;
2233
2234 hci_req_init(&req, conn->hdev);
2235
2236 __hci_abort_conn(&req, conn, reason);
2237
2238 err = hci_req_run(&req, abort_conn_complete);
2239 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002240 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d92015-10-22 10:49:37 +03002241 return err;
2242 }
2243
2244 return 0;
2245}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002246
Johan Hedberga1d01db2015-11-11 08:11:25 +02002247static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002248{
2249 hci_dev_lock(req->hdev);
2250 __hci_update_background_scan(req);
2251 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002252 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002253}
2254
2255static void bg_scan_update(struct work_struct *work)
2256{
2257 struct hci_dev *hdev = container_of(work, struct hci_dev,
2258 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002259 struct hci_conn *conn;
2260 u8 status;
2261 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002262
Johan Hedberg84235d22015-11-11 08:11:20 +02002263 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2264 if (!err)
2265 return;
2266
2267 hci_dev_lock(hdev);
2268
2269 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2270 if (conn)
2271 hci_le_conn_failed(conn, status);
2272
2273 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002274}
2275
Johan Hedberga1d01db2015-11-11 08:11:25 +02002276static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002277{
2278 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002279 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002280}
2281
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002282static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2283{
2284 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002285 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2286 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002287 struct hci_cp_inquiry cp;
2288
2289 BT_DBG("%s", req->hdev->name);
2290
2291 hci_dev_lock(req->hdev);
2292 hci_inquiry_cache_flush(req->hdev);
2293 hci_dev_unlock(req->hdev);
2294
2295 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002296
2297 if (req->hdev->discovery.limited)
2298 memcpy(&cp.lap, liac, sizeof(cp.lap));
2299 else
2300 memcpy(&cp.lap, giac, sizeof(cp.lap));
2301
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002302 cp.length = length;
2303
2304 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2305
2306 return 0;
2307}
2308
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002309static void le_scan_disable_work(struct work_struct *work)
2310{
2311 struct hci_dev *hdev = container_of(work, struct hci_dev,
2312 le_scan_disable.work);
2313 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002314
2315 BT_DBG("%s", hdev->name);
2316
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002317 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002318 return;
2319
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002320 cancel_delayed_work(&hdev->le_scan_restart);
2321
2322 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2323 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002324 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2325 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002326 return;
2327 }
2328
2329 hdev->discovery.scan_start = 0;
2330
2331 /* If we were running LE only scan, change discovery state. If
2332 * we were running both LE and BR/EDR inquiry simultaneously,
2333 * and BR/EDR inquiry is already finished, stop discovery,
2334 * otherwise BR/EDR inquiry will stop discovery when finished.
2335 * If we will resolve remote device name, do not change
2336 * discovery state.
2337 */
2338
2339 if (hdev->discovery.type == DISCOV_TYPE_LE)
2340 goto discov_stopped;
2341
2342 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2343 return;
2344
2345 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2346 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2347 hdev->discovery.state != DISCOVERY_RESOLVING)
2348 goto discov_stopped;
2349
2350 return;
2351 }
2352
2353 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2354 HCI_CMD_TIMEOUT, &status);
2355 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002356 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002357 goto discov_stopped;
2358 }
2359
2360 return;
2361
2362discov_stopped:
2363 hci_dev_lock(hdev);
2364 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2365 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002366}
2367
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002368static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002369{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002370 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002371
2372 /* If controller is not scanning we are done. */
2373 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2374 return 0;
2375
2376 hci_req_add_le_scan_disable(req);
2377
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302378 if (use_ext_scan(hdev)) {
2379 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2380
2381 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2382 ext_enable_cp.enable = LE_SCAN_ENABLE;
2383 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2384
2385 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2386 sizeof(ext_enable_cp), &ext_enable_cp);
2387 } else {
2388 struct hci_cp_le_set_scan_enable cp;
2389
2390 memset(&cp, 0, sizeof(cp));
2391 cp.enable = LE_SCAN_ENABLE;
2392 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2393 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2394 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002395
2396 return 0;
2397}
2398
2399static void le_scan_restart_work(struct work_struct *work)
2400{
2401 struct hci_dev *hdev = container_of(work, struct hci_dev,
2402 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002403 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002404 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002405
2406 BT_DBG("%s", hdev->name);
2407
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002408 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002409 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002410 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2411 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002412 return;
2413 }
2414
2415 hci_dev_lock(hdev);
2416
2417 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2418 !hdev->discovery.scan_start)
2419 goto unlock;
2420
2421 /* When the scan was started, hdev->le_scan_disable has been queued
2422 * after duration from scan_start. During scan restart this job
2423 * has been canceled, and we need to queue it again after proper
2424 * timeout, to make sure that scan does not run indefinitely.
2425 */
2426 duration = hdev->discovery.scan_duration;
2427 scan_start = hdev->discovery.scan_start;
2428 now = jiffies;
2429 if (now - scan_start <= duration) {
2430 int elapsed;
2431
2432 if (now >= scan_start)
2433 elapsed = now - scan_start;
2434 else
2435 elapsed = ULONG_MAX - scan_start + now;
2436
2437 timeout = duration - elapsed;
2438 } else {
2439 timeout = 0;
2440 }
2441
2442 queue_delayed_work(hdev->req_workqueue,
2443 &hdev->le_scan_disable, timeout);
2444
2445unlock:
2446 hci_dev_unlock(hdev);
2447}
2448
Johan Hedberge68f0722015-11-11 08:30:30 +02002449static int active_scan(struct hci_request *req, unsigned long opt)
2450{
2451 uint16_t interval = opt;
2452 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002453 u8 own_addr_type;
2454 int err;
2455
2456 BT_DBG("%s", hdev->name);
2457
2458 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2459 hci_dev_lock(hdev);
2460
2461 /* Don't let discovery abort an outgoing connection attempt
2462 * that's using directed advertising.
2463 */
2464 if (hci_lookup_le_connect(hdev)) {
2465 hci_dev_unlock(hdev);
2466 return -EBUSY;
2467 }
2468
2469 cancel_adv_timeout(hdev);
2470 hci_dev_unlock(hdev);
2471
Jaganath Kanakkassery94386b62017-12-11 20:26:47 +05302472 __hci_req_disable_advertising(req);
Johan Hedberge68f0722015-11-11 08:30:30 +02002473 }
2474
2475 /* If controller is scanning, it means the background scanning is
2476 * running. Thus, we should temporarily stop it in order to set the
2477 * discovery scanning parameters.
2478 */
2479 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2480 hci_req_add_le_scan_disable(req);
2481
2482 /* All active scans will be done with either a resolvable private
2483 * address (when privacy feature has been enabled) or non-resolvable
2484 * private address.
2485 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002486 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2487 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002488 if (err < 0)
2489 own_addr_type = ADDR_LE_DEV_PUBLIC;
2490
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302491 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2492 own_addr_type, 0);
Johan Hedberge68f0722015-11-11 08:30:30 +02002493 return 0;
2494}
2495
2496static int interleaved_discov(struct hci_request *req, unsigned long opt)
2497{
2498 int err;
2499
2500 BT_DBG("%s", req->hdev->name);
2501
2502 err = active_scan(req, opt);
2503 if (err)
2504 return err;
2505
Johan Hedberg7df26b52015-11-11 12:24:21 +02002506 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002507}
2508
2509static void start_discovery(struct hci_dev *hdev, u8 *status)
2510{
2511 unsigned long timeout;
2512
2513 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2514
2515 switch (hdev->discovery.type) {
2516 case DISCOV_TYPE_BREDR:
2517 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002518 hci_req_sync(hdev, bredr_inquiry,
2519 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002520 status);
2521 return;
2522 case DISCOV_TYPE_INTERLEAVED:
2523 /* When running simultaneous discovery, the LE scanning time
2524 * should occupy the whole discovery time sine BR/EDR inquiry
2525 * and LE scanning are scheduled by the controller.
2526 *
2527 * For interleaving discovery in comparison, BR/EDR inquiry
2528 * and LE scanning are done sequentially with separate
2529 * timeouts.
2530 */
2531 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2532 &hdev->quirks)) {
2533 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2534 /* During simultaneous discovery, we double LE scan
2535 * interval. We must leave some time for the controller
2536 * to do BR/EDR inquiry.
2537 */
2538 hci_req_sync(hdev, interleaved_discov,
2539 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2540 status);
2541 break;
2542 }
2543
2544 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2545 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2546 HCI_CMD_TIMEOUT, status);
2547 break;
2548 case DISCOV_TYPE_LE:
2549 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2550 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2551 HCI_CMD_TIMEOUT, status);
2552 break;
2553 default:
2554 *status = HCI_ERROR_UNSPECIFIED;
2555 return;
2556 }
2557
2558 if (*status)
2559 return;
2560
2561 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2562
2563 /* When service discovery is used and the controller has a
2564 * strict duplicate filter, it is important to remember the
2565 * start and duration of the scan. This is required for
2566 * restarting scanning during the discovery phase.
2567 */
2568 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2569 hdev->discovery.result_filtering) {
2570 hdev->discovery.scan_start = jiffies;
2571 hdev->discovery.scan_duration = timeout;
2572 }
2573
2574 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2575 timeout);
2576}
2577
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002578bool hci_req_stop_discovery(struct hci_request *req)
2579{
2580 struct hci_dev *hdev = req->hdev;
2581 struct discovery_state *d = &hdev->discovery;
2582 struct hci_cp_remote_name_req_cancel cp;
2583 struct inquiry_entry *e;
2584 bool ret = false;
2585
2586 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2587
2588 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2589 if (test_bit(HCI_INQUIRY, &hdev->flags))
2590 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2591
2592 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2593 cancel_delayed_work(&hdev->le_scan_disable);
2594 hci_req_add_le_scan_disable(req);
2595 }
2596
2597 ret = true;
2598 } else {
2599 /* Passive scanning */
2600 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2601 hci_req_add_le_scan_disable(req);
2602 ret = true;
2603 }
2604 }
2605
2606 /* No further actions needed for LE-only discovery */
2607 if (d->type == DISCOV_TYPE_LE)
2608 return ret;
2609
2610 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2611 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2612 NAME_PENDING);
2613 if (!e)
2614 return ret;
2615
2616 bacpy(&cp.bdaddr, &e->data.bdaddr);
2617 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2618 &cp);
2619 ret = true;
2620 }
2621
2622 return ret;
2623}
2624
2625static int stop_discovery(struct hci_request *req, unsigned long opt)
2626{
2627 hci_dev_lock(req->hdev);
2628 hci_req_stop_discovery(req);
2629 hci_dev_unlock(req->hdev);
2630
2631 return 0;
2632}
2633
Johan Hedberge68f0722015-11-11 08:30:30 +02002634static void discov_update(struct work_struct *work)
2635{
2636 struct hci_dev *hdev = container_of(work, struct hci_dev,
2637 discov_update);
2638 u8 status = 0;
2639
2640 switch (hdev->discovery.state) {
2641 case DISCOVERY_STARTING:
2642 start_discovery(hdev, &status);
2643 mgmt_start_discovery_complete(hdev, status);
2644 if (status)
2645 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2646 else
2647 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2648 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002649 case DISCOVERY_STOPPING:
2650 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2651 mgmt_stop_discovery_complete(hdev, status);
2652 if (!status)
2653 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2654 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002655 case DISCOVERY_STOPPED:
2656 default:
2657 return;
2658 }
2659}
2660
Johan Hedbergc366f552015-11-23 15:43:06 +02002661static void discov_off(struct work_struct *work)
2662{
2663 struct hci_dev *hdev = container_of(work, struct hci_dev,
2664 discov_off.work);
2665
2666 BT_DBG("%s", hdev->name);
2667
2668 hci_dev_lock(hdev);
2669
2670 /* When discoverable timeout triggers, then just make sure
2671 * the limited discoverable flag is cleared. Even in the case
2672 * of a timeout triggered from general discoverable, it is
2673 * safe to unconditionally clear the flag.
2674 */
2675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2676 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2677 hdev->discov_timeout = 0;
2678
2679 hci_dev_unlock(hdev);
2680
2681 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2682 mgmt_new_settings(hdev);
2683}
2684
Johan Hedberg2ff13892015-11-25 16:15:44 +02002685static int powered_update_hci(struct hci_request *req, unsigned long opt)
2686{
2687 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002688 u8 link_sec;
2689
2690 hci_dev_lock(hdev);
2691
2692 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2693 !lmp_host_ssp_capable(hdev)) {
2694 u8 mode = 0x01;
2695
2696 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2697
2698 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2699 u8 support = 0x01;
2700
2701 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2702 sizeof(support), &support);
2703 }
2704 }
2705
2706 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2707 lmp_bredr_capable(hdev)) {
2708 struct hci_cp_write_le_host_supported cp;
2709
2710 cp.le = 0x01;
2711 cp.simul = 0x00;
2712
2713 /* Check first if we already have the right
2714 * host state (host features set)
2715 */
2716 if (cp.le != lmp_host_le_capable(hdev) ||
2717 cp.simul != lmp_host_le_br_capable(hdev))
2718 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2719 sizeof(cp), &cp);
2720 }
2721
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002722 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002723 /* Make sure the controller has a good default for
2724 * advertising data. This also applies to the case
2725 * where BR/EDR was toggled during the AUTO_OFF phase.
2726 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002727 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2728 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302729 int err;
2730
2731 if (ext_adv_capable(hdev)) {
2732 err = __hci_req_setup_ext_adv_instance(req,
2733 0x00);
2734 if (!err)
2735 __hci_req_update_scan_rsp_data(req,
2736 0x00);
2737 } else {
2738 err = 0;
2739 __hci_req_update_adv_data(req, 0x00);
2740 __hci_req_update_scan_rsp_data(req, 0x00);
2741 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002742
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302743 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302744 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302745 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302746 else if (!err)
2747 __hci_req_enable_ext_advertising(req);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302748 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002749 } else if (!list_empty(&hdev->adv_instances)) {
2750 struct adv_info *adv_instance;
2751
Johan Hedberg2ff13892015-11-25 16:15:44 +02002752 adv_instance = list_first_entry(&hdev->adv_instances,
2753 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002754 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002755 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002756 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002757 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002758 }
2759
2760 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2761 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2762 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2763 sizeof(link_sec), &link_sec);
2764
2765 if (lmp_bredr_capable(hdev)) {
2766 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2767 __hci_req_write_fast_connectable(req, true);
2768 else
2769 __hci_req_write_fast_connectable(req, false);
2770 __hci_req_update_scan(req);
2771 __hci_req_update_class(req);
2772 __hci_req_update_name(req);
2773 __hci_req_update_eir(req);
2774 }
2775
2776 hci_dev_unlock(hdev);
2777 return 0;
2778}
2779
2780int __hci_req_hci_power_on(struct hci_dev *hdev)
2781{
2782 /* Register the available SMP channels (BR/EDR and LE) only when
2783 * successfully powering on the controller. This late
2784 * registration is required so that LE SMP can clearly decide if
2785 * the public address or static address is used.
2786 */
2787 smp_register(hdev);
2788
2789 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2790 NULL);
2791}
2792
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002793void hci_request_setup(struct hci_dev *hdev)
2794{
Johan Hedberge68f0722015-11-11 08:30:30 +02002795 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002796 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002797 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002798 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002799 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002800 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002801 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2802 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002803 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002804}
2805
2806void hci_request_cancel_all(struct hci_dev *hdev)
2807{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002808 hci_req_sync_cancel(hdev, ENODEV);
2809
Johan Hedberge68f0722015-11-11 08:30:30 +02002810 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002811 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002812 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002813 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002814 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002815 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002816 cancel_delayed_work_sync(&hdev->le_scan_disable);
2817 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002818
2819 if (hdev->adv_instance_timeout) {
2820 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2821 hdev->adv_instance_timeout = 0;
2822 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002823}