Commit 4bb540db authored by David S. Miller's avatar David S. Miller

Merge branch 'for-upstream' of...

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2020-07-31

Here's the main bluetooth-next pull request for 5.9:

 - Fix firmware filenames for Marvell chipsets
 - Several suspend-related fixes
 - Addedd mgmt commands for runtime configuration
 - Multiple fixes for Qualcomm-based controllers
 - Add new monitoring feature for mgmt
 - Fix handling of legacy cipher (E4) together with security level 4
 - Add support for Realtek 8822CE controller
 - Fix issues with Chinese controllers using fake VID/PID values
 - Multiple other smaller fixes & improvements
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bd69058f 075f7732
......@@ -44,7 +44,7 @@ examples:
uart1 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
uart-has-rtscts = <1>;
uart-has-rtscts;
bluetooth {
compatible = "realtek,rtl8723bs-bt";
......
......@@ -106,7 +106,7 @@ static void bcm203x_complete(struct urb *urb)
}
data->state = BCM203X_LOAD_FIRMWARE;
/* fall through */
fallthrough;
case BCM203X_LOAD_FIRMWARE:
if (data->fw_sent == data->fw_size) {
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP),
......
......@@ -295,7 +295,6 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
baud_reg = REG_CONTROL_BAUD_RATE_115200;
break;
case PKT_BAUD_RATE_57600:
/* Fall through... */
default:
baud_reg = REG_CONTROL_BAUD_RATE_57600;
break;
......@@ -585,7 +584,6 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200;
break;
case 57600:
/* Fall through... */
default:
cmd[4] = 0x03;
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600;
......
......@@ -754,6 +754,65 @@ void btintel_reset_to_bootloader(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features)
{
struct sk_buff *skb;
u8 page_no = 1;
/* Intel controller supports two pages, each page is of 128-bit
* feature bit mask. And each bit defines specific feature support
*/
skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Reading supported features failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != (sizeof(features->page1) + 3)) {
bt_dev_err(hdev, "Supported features event size mismatch");
kfree_skb(skb);
return -EILSEQ;
}
memcpy(features->page1, skb->data + 3, sizeof(features->page1));
/* Read the supported features page2 if required in future.
*/
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_read_debug_features);
int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 };
struct sk_buff *skb;
if (!features)
return -EINVAL;
if (!(features->page1[0] & 0x3f)) {
bt_dev_info(hdev, "Telemetry exception format not supported");
return 0;
}
skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_set_debug_features);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
......
......@@ -62,6 +62,10 @@ struct intel_reset {
__le32 boot_param;
} __packed;
struct intel_debug_features {
__u8 page1[16];
} __packed;
#if IS_ENABLED(CONFIG_BT_INTEL)
int btintel_check_bdaddr(struct hci_dev *hdev);
......@@ -88,6 +92,10 @@ int btintel_read_boot_params(struct hci_dev *hdev,
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param);
void btintel_reset_to_bootloader(struct hci_dev *hdev);
int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features);
int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
......@@ -186,4 +194,17 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
{
}
static inline int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features)
{
return -EOPNOTSUPP;
}
static inline int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
return -EOPNOTSUPP;
}
#endif
......@@ -587,6 +587,14 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
static bool btmrvl_prevent_wake(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
return !device_may_wakeup(&card->func->dev);
}
/*
* This function handles the event generated by firmware, rx data
* received from firmware, and tx data sent from kernel.
......@@ -669,6 +677,7 @@ static int btmrvl_service_main_thread(void *data)
int btmrvl_register_hdev(struct btmrvl_private *priv)
{
struct hci_dev *hdev = NULL;
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
int ret;
hdev = hci_alloc_dev();
......@@ -687,6 +696,8 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->send = btmrvl_send_frame;
hdev->setup = btmrvl_setup;
hdev->set_bdaddr = btmrvl_set_bdaddr;
hdev->prevent_wake = btmrvl_prevent_wake;
SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type;
......
......@@ -111,6 +111,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
"Failed to request irq_bt %d (%d)\n",
cfg->irq_bt, ret);
}
/* Configure wakeup (enabled by default) */
device_init_wakeup(dev, true);
disable_irq(cfg->irq_bt);
}
}
......@@ -328,7 +331,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.helper = NULL,
.firmware = "mrvl/sd8977_uapsta.bin",
.firmware = "mrvl/sdsd8977_combo_v2.bin",
.reg = &btmrvl_reg_8977,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
......@@ -346,7 +349,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin",
.firmware = "mrvl/sdsd8997_combo_v4.bin",
.reg = &btmrvl_reg_8997,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
......@@ -1654,6 +1657,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
MODULE_SHUTDOWN_REQ);
btmrvl_sdio_disable_host_int(card);
}
BT_DBG("unregister dev");
card->priv->surprise_removed = true;
btmrvl_sdio_unregister_dev(card);
......@@ -1690,7 +1694,8 @@ static int btmrvl_sdio_suspend(struct device *dev)
}
/* Enable platform specific wakeup interrupt */
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 &&
device_may_wakeup(dev)) {
card->plt_wake_cfg->wake_by_bt = false;
enable_irq(card->plt_wake_cfg->irq_bt);
enable_irq_wake(card->plt_wake_cfg->irq_bt);
......@@ -1707,7 +1712,8 @@ static int btmrvl_sdio_suspend(struct device *dev)
BT_ERR("HS not activated, suspend failed!");
/* Disable platform specific wakeup interrupt */
if (card->plt_wake_cfg &&
card->plt_wake_cfg->irq_bt >= 0) {
card->plt_wake_cfg->irq_bt >= 0 &&
device_may_wakeup(dev)) {
disable_irq_wake(card->plt_wake_cfg->irq_bt);
disable_irq(card->plt_wake_cfg->irq_bt);
}
......@@ -1767,7 +1773,8 @@ static int btmrvl_sdio_resume(struct device *dev)
hci_resume_dev(hcidev);
/* Disable platform specific wakeup interrupt */
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 &&
device_may_wakeup(dev)) {
disable_irq_wake(card->plt_wake_cfg->irq_bt);
disable_irq(card->plt_wake_cfg->irq_bt);
if (card->plt_wake_cfg->wake_by_bt)
......@@ -1831,6 +1838,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin");
MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");
......@@ -685,7 +685,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
u8 flag;
u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
......@@ -693,6 +693,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
return err;
}
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = MTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
return err;
}
fw_ptr = fw->data;
fw_size = fw->size;
......
......@@ -400,6 +400,27 @@ static int qca_download_firmware(struct hci_dev *hdev,
return ret;
}
static int qca_disable_soc_logging(struct hci_dev *hdev)
{
struct sk_buff *skb;
u8 cmd[2];
int err;
cmd[0] = QCA_DISABLE_LOGGING_SUB_OP;
cmd[1] = 0x00;
skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd,
HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err);
return err;
}
kfree_skb(skb);
return 0;
}
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
......@@ -486,6 +507,12 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
if (soc_type >= QCA_WCN3991) {
err = qca_disable_soc_logging(hdev);
if (err < 0)
return err;
}
/* Perform HCI reset */
err = qca_send_reset(hdev);
if (err < 0) {
......
......@@ -14,6 +14,7 @@
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
#define QCA_DISABLE_LOGGING (0xFC17)
#define EDL_CMD_REQ_RES_EVT (0x00)
#define EDL_PATCH_VER_RES_EVT (0x19)
......@@ -22,6 +23,7 @@
#define EDL_CMD_EXE_STATUS_EVT (0x00)
#define EDL_SET_BAUDRATE_RSP_EVT (0x92)
#define EDL_NVM_ACCESS_CODE_EVT (0x0B)
#define QCA_DISABLE_LOGGING_SUB_OP (0x14)
#define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27)
......
This diff is collapsed.
......@@ -793,7 +793,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (!h5)
return -ENOMEM;
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev;
......
......@@ -219,7 +219,7 @@ static void ll_device_want_to_wakeup(struct hci_uart *hu)
* perfectly safe to always send one.
*/
BT_DBG("dual wake-up-indication");
/* fall through */
fallthrough;
case HCILL_ASLEEP:
/* acknowledge device wake up */
if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) {
......
......@@ -46,7 +46,7 @@
#define HCI_MAX_IBS_SIZE 10
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
#define MEMDUMP_TIMEOUT_MS 8000
......@@ -72,7 +72,8 @@ enum qca_flags {
QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING,
QCA_MEMDUMP_COLLECTION,
QCA_HW_ERROR_EVENT
QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED
};
enum qca_capabilities {
......@@ -289,25 +290,21 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
case HCI_IBS_TX_VOTE_CLOCK_ON:
qca->tx_vote = true;
qca->tx_votes_on++;
new_vote = true;
break;
case HCI_IBS_RX_VOTE_CLOCK_ON:
qca->rx_vote = true;
qca->rx_votes_on++;
new_vote = true;
break;
case HCI_IBS_TX_VOTE_CLOCK_OFF:
qca->tx_vote = false;
qca->tx_votes_off++;
new_vote = qca->rx_vote | qca->tx_vote;
break;
case HCI_IBS_RX_VOTE_CLOCK_OFF:
qca->rx_vote = false;
qca->rx_votes_off++;
new_vote = qca->rx_vote | qca->tx_vote;
break;
default:
......@@ -315,6 +312,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
return;
}
new_vote = qca->rx_vote | qca->tx_vote;
if (new_vote != old_vote) {
if (new_vote)
__serial_clock_on(hu->tty);
......@@ -474,8 +473,6 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
case HCI_IBS_TX_ASLEEP:
case HCI_IBS_TX_WAKING:
/* Fall through */
default:
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break;
......@@ -518,8 +515,6 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
case HCI_IBS_TX_ASLEEP:
case HCI_IBS_TX_AWAKE:
/* Fall through */
default:
BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
break;
......@@ -837,8 +832,6 @@ static void device_woke_up(struct hci_uart *hu)
break;
case HCI_IBS_TX_ASLEEP:
/* Fall through */
default:
BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
qca->tx_ibs_state);
......@@ -862,6 +855,13 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
qca->tx_ibs_state);
if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
/* As SSR is in progress, ignore the packets */
bt_dev_dbg(hu->hdev, "SSR is in progress");
kfree_skb(skb);
return 0;
}
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
......@@ -983,8 +983,11 @@ static void qca_controller_memdump(struct work_struct *work)
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
mutex_lock(&qca->hci_memdump_lock);
/* Skip processing the received packets if timeout detected. */
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) {
/* Skip processing the received packets if timeout detected
* or memdump collection completed.
*/
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
mutex_unlock(&qca->hci_memdump_lock);
return;
}
......@@ -1128,6 +1131,7 @@ static int qca_controller_memdump_event(struct hci_dev *hdev,
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
skb_queue_tail(&qca->rx_memdump_q, skb);
queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
......@@ -1485,9 +1489,8 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
struct qca_memdump_data *qca_memdump = qca->qca_memdump;
char *memdump_buf = NULL;
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
......@@ -1509,19 +1512,23 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
qca_wait_for_dump_collection(hdev);
}
mutex_lock(&qca->hci_memdump_lock);
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
mutex_lock(&qca->hci_memdump_lock);
if (qca_memdump)
memdump_buf = qca_memdump->memdump_buf_head;
vfree(memdump_buf);
kfree(qca_memdump);
if (qca->qca_memdump) {
vfree(qca->qca_memdump->memdump_buf_head);
kfree(qca->qca_memdump);
qca->qca_memdump = NULL;
}
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
cancel_delayed_work(&qca->ctrl_memdump_timeout);
skb_queue_purge(&qca->rx_memdump_q);
}
mutex_unlock(&qca->hci_memdump_lock);
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
cancel_work_sync(&qca->ctrl_memdump_evt);
skb_queue_purge(&qca->rx_memdump_q);
}
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
......@@ -1532,10 +1539,30 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
if (qca->memdump_state == QCA_MEMDUMP_IDLE)
set_bit(QCA_SSR_TRIGGERED, &qca->flags);
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
qca_send_crashbuffer(hu);
else
bt_dev_info(hdev, "Dump collection is in process");
qca_wait_for_dump_collection(hdev);
} else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
/* Let us wait here until memory dump collected or
* memory dump timer expired.
*/
bt_dev_info(hdev, "waiting for dump to complete");
qca_wait_for_dump_collection(hdev);
}
mutex_lock(&qca->hci_memdump_lock);
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
/* Inject hw error event to reset the device
* and driver.
*/
hci_reset_dev(hu->hdev);
}
}
mutex_unlock(&qca->hci_memdump_lock);
}
static int qca_wcn3990_init(struct hci_uart *hu)
......@@ -1641,11 +1668,15 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "setting up %s",
qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
qca->memdump_state = QCA_MEMDUMP_IDLE;
retry:
ret = qca_power_on(hdev);
if (ret)
return ret;
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
if (qca_is_wcn399x(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
......@@ -1788,9 +1819,6 @@ static void qca_power_shutdown(struct hci_uart *hu)
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
hu->hdev->hw_error = NULL;
hu->hdev->cmd_timeout = NULL;
/* Non-serdev device usually is powered by external power
* and don't need additional action in driver for power down
*/
......@@ -1812,6 +1840,9 @@ static int qca_power_off(struct hci_dev *hdev)
struct qca_data *qca = hu->priv;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
hu->hdev->hw_error = NULL;
hu->hdev->cmd_timeout = NULL;
/* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
......@@ -1819,7 +1850,6 @@ static int qca_power_off(struct hci_dev *hdev)
usleep_range(8000, 10000);
}
qca->memdump_state = QCA_MEMDUMP_IDLE;
qca_power_shutdown(hu);
return 0;
}
......@@ -1962,9 +1992,10 @@ static int qca_serdev_probe(struct serdev_device *serdev)
}
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (!qcadev->susclk) {
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
} else {
return PTR_ERR(qcadev->susclk);
}
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
if (err)
return err;
......@@ -1972,7 +2003,6 @@ static int qca_serdev_probe(struct serdev_device *serdev)
err = clk_prepare_enable(qcadev->susclk);
if (err)
return err;
}
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
......@@ -2050,6 +2080,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
struct hci_uart *hu = &qcadev->serdev_hu;
struct qca_data *qca = hu->priv;
unsigned long flags;
bool tx_pending = false;
int ret = 0;
u8 cmd;
......@@ -2068,7 +2099,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
switch (qca->tx_ibs_state) {
case HCI_IBS_TX_WAKING:
del_timer(&qca->wake_retrans_timer);
/* Fall through */
fallthrough;
case HCI_IBS_TX_AWAKE:
del_timer(&qca->tx_idle_timer);
......@@ -2083,8 +2114,7 @@ static int __maybe_unused qca_suspend(struct device *dev)
qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
qca->ibs_sent_slps++;
qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
tx_pending = true;
break;
case HCI_IBS_TX_ASLEEP:
......@@ -2101,23 +2131,25 @@ static int __maybe_unused qca_suspend(struct device *dev)
if (ret < 0)
goto error;
if (tx_pending) {
serdev_device_wait_until_sent(hu->serdev,
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
}
/* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
* to sleep, so that the packet does not wake the system later.
*/
ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
if (ret == 0) {
ret = -ETIMEDOUT;
goto error;
}
if (ret > 0)
return 0;
if (ret == 0)
ret = -ETIMEDOUT;
error:
clear_bit(QCA_SUSPENDING, &qca->flags);
......
......@@ -355,6 +355,7 @@ void hci_uart_unregister_device(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
hci_free_dev(hdev);
......
......@@ -36,9 +36,9 @@
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
......
......@@ -41,6 +41,8 @@
#define BLUETOOTH_VER_1_1 1
#define BLUETOOTH_VER_1_2 2
#define BLUETOOTH_VER_2_0 3
#define BLUETOOTH_VER_2_1 4
#define BLUETOOTH_VER_4_0 6
/* Reserv for core and drivers use */
#define BT_SKB_RESERVE 8
......@@ -147,6 +149,10 @@ struct bt_voice {
#define BT_MODE_LE_FLOWCTL 0x03
#define BT_MODE_EXT_FLOWCTL 0x04
#define BT_PKT_STATUS 16
#define BT_SCM_PKT_STATUS 0x03
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
......@@ -286,6 +292,7 @@ struct bt_sock {
struct sock *parent;
unsigned long flags;
void (*skb_msg_name)(struct sk_buff *, void *, int *);
void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *);
};
enum {
......@@ -335,6 +342,10 @@ struct l2cap_ctrl {
struct l2cap_chan *chan;
};
struct sco_ctrl {
u8 pkt_status;
};
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
......@@ -361,6 +372,7 @@ struct bt_skb_cb {
u8 incoming:1;
union {
struct l2cap_ctrl l2cap;
struct sco_ctrl sco;
struct hci_ctrl hci;
};
};
......
......@@ -227,6 +227,17 @@ enum {
* supported.
*/
HCI_QUIRK_VALID_LE_STATES,
/* When this quirk is set, then erroneous data reporting
* is ignored. This is mainly due to the fact that the HCI
* Read Default Erroneous Data Reporting command is advertised,
* but not supported; these controllers often reply with unknown
* command and tend to lock up randomly. Needing a hard reset.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
};
/* HCI device flags */
......@@ -307,6 +318,7 @@ enum {
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
HCI_LL_RPA_RESOLUTION,
HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
......@@ -1637,6 +1649,8 @@ struct hci_rp_le_read_resolv_list_size {
#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
#define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len {
__u8 status;
......@@ -2270,6 +2284,8 @@ struct hci_ev_le_conn_complete {
#define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01
#define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02
#define ADDR_LE_DEV_RANDOM_RESOLVED 0x03
#define HCI_EV_LE_ADVERTISING_REPORT 0x02
struct hci_ev_le_advertising_info {
......@@ -2516,4 +2532,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14)
/* le24 support */
static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
{
dst[0] = val & 0xff;
dst[1] = (val & 0xff00) >> 8;
dst[2] = (val & 0xff0000) >> 16;
}
#endif /* __HCI_H */
......@@ -25,6 +25,7 @@
#ifndef __HCI_CORE_H
#define __HCI_CORE_H
#include <linux/idr.h>
#include <linux/leds.h>
#include <linux/rculist.h>
......@@ -136,6 +137,23 @@ struct bdaddr_list_with_irk {
u8 local_irk[16];
};
struct bdaddr_list_with_flags {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
u32 current_flags;
};
enum hci_conn_flags {
HCI_CONN_FLAG_REMOTE_WAKEUP,
HCI_CONN_FLAG_MAX
};
#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr))
/* Make sure number of flags doesn't exceed sizeof(current_flags) */
static_assert(HCI_CONN_FLAG_MAX < 32);
struct bt_uuid {
struct list_head list;
u8 uuid[16];
......@@ -220,6 +238,24 @@ struct adv_info {
#define HCI_MAX_ADV_INSTANCES 5
#define HCI_DEFAULT_ADV_DURATION 2
struct adv_pattern {
struct list_head list;
__u8 ad_type;
__u8 offset;
__u8 length;
__u8 value[HCI_MAX_AD_LENGTH];
};
struct adv_monitor {
struct list_head patterns;
bool active;
__u16 handle;
};
#define HCI_MIN_ADV_MONITOR_HANDLE 1
#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
#define HCI_MAX_SHORT_NAME_LENGTH 10
/* Min encryption key size to match with SMP */
......@@ -295,6 +331,14 @@ struct hci_dev {
__u8 le_scan_type;
__u16 le_scan_interval;
__u16 le_scan_window;
__u16 le_scan_int_suspend;
__u16 le_scan_window_suspend;
__u16 le_scan_int_discovery;
__u16 le_scan_window_discovery;
__u16 le_scan_int_adv_monitor;
__u16 le_scan_window_adv_monitor;
__u16 le_scan_int_connect;
__u16 le_scan_window_connect;
__u16 le_conn_min_interval;
__u16 le_conn_max_interval;
__u16 le_conn_latency;
......@@ -323,6 +367,17 @@ struct hci_dev {
__u16 devid_product;
__u16 devid_version;
__u8 def_page_scan_type;
__u16 def_page_scan_int;
__u16 def_page_scan_window;
__u8 def_inq_scan_type;
__u16 def_inq_scan_int;
__u16 def_inq_scan_window;
__u16 def_br_lsto;
__u16 def_page_timeout;
__u16 def_multi_adv_rotation_duration;
__u16 def_le_autoconnect_timeout;
__u16 pkt_type;
__u16 esco_type;
__u16 link_policy;
......@@ -438,7 +493,6 @@ struct hci_dev {
struct list_head mgmt_pending;
struct list_head blacklist;
struct list_head whitelist;
struct list_head wakeable;
struct list_head uuids;
struct list_head link_keys;
struct list_head long_term_keys;
......@@ -477,6 +531,9 @@ struct hci_dev {
__u16 adv_instance_timeout;
struct delayed_work adv_instance_expire;
struct idr adv_monitors_idr;
unsigned int adv_monitors_cnt;
__u8 irk[16];
__u32 rpa_timeout;
struct delayed_work rpa_expired;
......@@ -508,6 +565,12 @@ struct hci_dev {
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
enum conn_reasons {
CONN_REASON_PAIR_DEVICE,
CONN_REASON_L2CAP_CHAN,
CONN_REASON_SCO_CONNECT,
};
struct hci_conn {
struct list_head list;
......@@ -559,6 +622,8 @@ struct hci_conn {
__s8 max_tx_power;
unsigned long flags;
enum conn_reasons conn_reason;
__u32 clock;
__u16 clock_accuracy;
......@@ -626,7 +691,7 @@ struct hci_conn_params {
struct hci_conn *conn;
bool explicit_connect;
bool wakeable;
u32 current_flags;
};
extern struct list_head hci_dev_list;
......@@ -984,12 +1049,14 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level,
u16 conn_timeout);
u16 conn_timeout,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type);
u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u16 setting);
int hci_conn_check_link_mode(struct hci_conn *conn);
......@@ -1151,12 +1218,19 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
struct list_head *list, bdaddr_t *bdaddr,
u8 type);
struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type);
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
u8 type, u8 *peer_irk, u8 *local_irk);
int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type, u32 flags);
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
u8 type);
int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
u8 type);
void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
......@@ -1217,6 +1291,12 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_adv_monitors_clear(struct hci_dev *hdev);
void hci_free_adv_monitor(struct adv_monitor *monitor);
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle);
bool hci_is_adv_monitoring(struct hci_dev *hdev);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
void hci_init_sysfs(struct hci_dev *hdev);
......@@ -1279,6 +1359,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
/* Use LL Privacy based address resolution if supported */
#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
......@@ -1387,7 +1470,7 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
__u8 encrypt;
if (conn->state == BT_CONFIG) {
if (status)
if (!status)
conn->state = BT_CONNECTED;
hci_connect_cfm(conn, status);
......@@ -1402,11 +1485,13 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
else
encrypt = 0x01;
if (!status) {
if (conn->sec_level == BT_SECURITY_SDP)
conn->sec_level = BT_SECURITY_LOW;
if (conn->pending_sec_level > conn->sec_level)
conn->sec_level = conn->pending_sec_level;
}
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
......
......@@ -31,8 +31,8 @@
#define HCI_TIME_STAMP 3
/* CMSG flags */
#define HCI_CMSG_DIR 0x0001
#define HCI_CMSG_TSTAMP 0x0002
#define HCI_CMSG_DIR 0x01
#define HCI_CMSG_TSTAMP 0x02
struct sockaddr_hci {
sa_family_t hci_family;
......
......@@ -52,6 +52,12 @@ struct mgmt_hdr {
__le16 len;
} __packed;
struct mgmt_tlv {
__le16 type;
__u8 length;
__u8 value[];
} __packed;
struct mgmt_addr_info {
bdaddr_t bdaddr;
__u8 type;
......@@ -702,6 +708,78 @@ struct mgmt_rp_set_exp_feature {
__le32 flags;
} __packed;
#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b
#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0
#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c
#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0
#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d
#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0
#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e
#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0
#define MGMT_OP_GET_DEVICE_FLAGS 0x004F
#define MGMT_GET_DEVICE_FLAGS_SIZE 7
struct mgmt_cp_get_device_flags {
struct mgmt_addr_info addr;
} __packed;
struct mgmt_rp_get_device_flags {
struct mgmt_addr_info addr;
__le32 supported_flags;
__le32 current_flags;
} __packed;
#define MGMT_OP_SET_DEVICE_FLAGS 0x0050
#define MGMT_SET_DEVICE_FLAGS_SIZE 11
struct mgmt_cp_set_device_flags {
struct mgmt_addr_info addr;
__le32 current_flags;
} __packed;
struct mgmt_rp_set_device_flags {
struct mgmt_addr_info addr;
} __packed;
#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0)
#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051
#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0
struct mgmt_rp_read_adv_monitor_features {
__le32 supported_features;
__le32 enabled_features;
__le16 max_num_handles;
__u8 max_num_patterns;
__le16 num_handles;
__le16 handles[];
} __packed;
struct mgmt_adv_pattern {
__u8 ad_type;
__u8 offset;
__u8 length;
__u8 value[31];
} __packed;
#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
struct mgmt_cp_add_adv_patterns_monitor {
__u8 pattern_count;
struct mgmt_adv_pattern patterns[];
} __packed;
#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1
struct mgmt_rp_add_adv_patterns_monitor {
__le16 monitor_handle;
} __packed;
#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053
struct mgmt_cp_remove_adv_monitor {
__le16 monitor_handle;
} __packed;
#define MGMT_REMOVE_ADV_MONITOR_SIZE 2
struct mgmt_rp_remove_adv_monitor {
__le16 monitor_handle;
} __packed;
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
......@@ -933,3 +1011,20 @@ struct mgmt_ev_exp_feature_changed {
__u8 uuid[16];
__le32 flags;
} __packed;
#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a
struct mgmt_ev_device_flags_changed {
struct mgmt_addr_info addr;
__le32 supported_flags;
__le32 current_flags;
} __packed;
#define MGMT_EV_ADV_MONITOR_ADDED 0x002b
struct mgmt_ev_adv_monitor_added {
__le16 monitor_handle;
} __packed;
#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c
struct mgmt_ev_adv_monitor_removed {
__le16 monitor_handle;
} __packed;
......@@ -46,4 +46,6 @@ struct sco_conninfo {
__u8 dev_class[3];
};
#define SCO_CMSG_PKT_STATUS 0x01
#endif /* __SCO_H */
......@@ -50,6 +50,7 @@ static bool enable_6lowpan;
/* We are listening incoming connections via this channel
*/
static struct l2cap_chan *listen_chan;
static DEFINE_MUTEX(set_lock);
struct lowpan_peer {
struct list_head list;
......@@ -1078,12 +1079,14 @@ static void do_enable_set(struct work_struct *work)
enable_6lowpan = set_enable->flag;
mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
}
listen_chan = bt_6lowpan_listen();
mutex_unlock(&set_lock);
kfree(set_enable);
}
......@@ -1135,11 +1138,13 @@ static ssize_t lowpan_control_write(struct file *fp,
if (ret == -EINVAL)
return ret;
mutex_lock(&set_lock);
if (listen_chan) {
l2cap_chan_close(listen_chan, 0);
l2cap_chan_put(listen_chan);
listen_chan = NULL;
}
mutex_unlock(&set_lock);
if (conn) {
struct lowpan_peer *peer;
......
......@@ -21,7 +21,7 @@ menuconfig BT
It was designed as a replacement for cables and other short-range
technologies like IrDA. Bluetooth operates in personal area range
that typically extends up to 10 meters. More information about
Bluetooth can be found at <http://www.bluetooth.com/>.
Bluetooth can be found at <https://www.bluetooth.com/>.
Linux Bluetooth subsystem consist of several layers:
Bluetooth Core
......
......@@ -14,7 +14,7 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
ecdh_helper.o hci_request.o mgmt_util.o
ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
......
......@@ -286,6 +286,9 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (msg->msg_name && bt_sk(sk)->skb_msg_name)
bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
&msg->msg_namelen);
if (bt_sk(sk)->skb_put_cmsg)
bt_sk(sk)->skb_put_cmsg(skb, msg, sk);
}
skb_free_datagram(sk, skb);
......@@ -453,8 +456,6 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
__poll_t mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == BT_LISTEN)
......
......@@ -789,11 +789,8 @@ static void set_ext_conn_params(struct hci_conn *conn,
memset(p, 0, sizeof(*p));
/* Set window to be the same value as the interval to
* enable continuous scanning.
*/
p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
p->scan_window = p->scan_interval;
p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
p->conn_latency = cpu_to_le16(conn->le_conn_latency);
......@@ -875,11 +872,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
memset(&cp, 0, sizeof(cp));
/* Set window to be the same value as the interval to enable
* continuous scanning.
*/
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
cp.scan_window = cp.scan_interval;
cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
......@@ -937,7 +931,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
* So it is required to remove adv set for handle 0x00. since we use
* instance 0 for directed adv.
*/
hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(cp.handle), &cp.handle);
__hci_req_remove_ext_adv_instance(req, cp.handle);
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
......@@ -1009,6 +1003,11 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_request req;
int err;
/* This ensures that during disable le_scan address resolution
* will not be disabled if it is followed by le_create_conn
*/
bool rpa_le_conn = true;
/* Let's make sure that le is enabled.*/
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
if (lmp_le_capable(hdev))
......@@ -1109,7 +1108,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* state.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(&req);
hci_req_add_le_scan_disable(&req, rpa_le_conn);
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
......@@ -1180,7 +1179,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev,
/* This function requires the caller holds hdev->lock */
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level,
u16 conn_timeout)
u16 conn_timeout,
enum conn_reasons conn_reason)
{
struct hci_conn *conn;
......@@ -1225,6 +1225,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW;
conn->pending_sec_level = sec_level;
conn->conn_timeout = conn_timeout;
conn->conn_reason = conn_reason;
hci_update_background_scan(hdev);
......@@ -1234,7 +1235,8 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
}
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type)
u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason)
{
struct hci_conn *acl;
......@@ -1254,6 +1256,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
hci_conn_hold(acl);
acl->conn_reason = conn_reason;
if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level;
......@@ -1270,7 +1273,8 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
struct hci_conn *acl;
struct hci_conn *sco;
acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
CONN_REASON_SCO_CONNECT);
if (IS_ERR(acl))
return acl;
......@@ -1323,6 +1327,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
return 0;
}
/* AES encryption is required for Level 4:
*
* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
* page 1319:
*
* 128-bit equivalent strength for link and encryption keys
* required using FIPS approved algorithms (E0 not allowed,
* SAFER+ not allowed, and P-192 not allowed; encryption key
* not shortened)
*/
if (conn->sec_level == BT_SECURITY_FIPS &&
!test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
bt_dev_err(conn->hdev,
"Invalid security: Missing AES-CCM usage");
return 0;
}
if (hci_conn_ssp_enabled(conn) &&
!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
return 0;
......
This diff is collapsed.
......@@ -2296,6 +2296,22 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
if (!conn)
return;
/* When using controller based address resolution, then the new
* address types 0x02 and 0x03 are used. These types need to be
* converted back into either public address or random address type
*/
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
switch (own_address_type) {
case ADDR_LE_DEV_PUBLIC_RESOLVED:
own_address_type = ADDR_LE_DEV_PUBLIC;
break;
case ADDR_LE_DEV_RANDOM_RESOLVED:
own_address_type = ADDR_LE_DEV_RANDOM;
break;
}
}
/* Store the initiator and responder address information which
* is needed for SMP. These values will not change during the
* lifetime of the connection.
......@@ -2517,7 +2533,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
......@@ -2697,7 +2713,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/
if (hci_dev_test_flag(hdev, HCI_MGMT) &&
!hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
!hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
return;
......@@ -2825,7 +2841,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_AUTO_CONN_LINK_LOSS:
if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
break;
/* Fall through */
fallthrough;
case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
......@@ -3065,23 +3081,19 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
/* Check link security requirements are met */
if (!hci_conn_check_link_mode(conn))
ev->status = HCI_ERROR_AUTH_FAILURE;
if (ev->status && conn->state == BT_CONNECTED) {
if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_drop(conn);
goto unlock;
}
/* In Secure Connections Only mode, do not allow any connections
* that are not encrypted with AES-CCM using a P-256 authenticated
* combination key.
/* Notify upper layers so they can cleanup before
* disconnecting.
*/
if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
(!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
hci_encrypt_cfm(conn, ev->status);
hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_drop(conn);
goto unlock;
}
......@@ -4163,6 +4175,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) {
u32 flags;
......@@ -4184,6 +4199,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
if (skb->len < num_rsp * sizeof(*info) + 1)
goto unlock;
for (; num_rsp; num_rsp--, info++) {
u32 flags;
......@@ -4204,6 +4222,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
}
}
unlock:
hci_dev_unlock(hdev);
}
......@@ -4324,7 +4343,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
if (hci_setup_sync(conn, conn->link->handle))
goto unlock;
}
/* fall through */
fallthrough;
default:
conn->state = BT_CLOSED;
......@@ -4379,7 +4398,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
......@@ -5209,6 +5228,11 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
le16_to_cpu(ev->interval),
le16_to_cpu(ev->latency),
le16_to_cpu(ev->supervision_timeout));
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
hci_req_disable_address_resolution(hdev);
}
static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
......@@ -5319,7 +5343,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
direct_rpa);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
......@@ -5447,14 +5471,15 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
/* Passive scanning shouldn't trigger any device found events,
* except for devices marked as CONN_REPORT for which we do send
* device found events.
* device found events, or advertisement monitoring requested.
*/
if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
if (type == LE_ADV_DIRECT_IND)
return;
if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
bdaddr, bdaddr_type))
bdaddr, bdaddr_type) &&
idr_is_empty(&hdev->adv_monitors_idr))
return;
if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
......
This diff is collapsed.
......@@ -65,11 +65,12 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
void __hci_req_update_name(struct hci_request *req);
void __hci_req_update_eir(struct hci_request *req);
void hci_req_add_le_scan_disable(struct hci_request *req);
void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
void hci_req_disable_address_resolution(struct hci_dev *hdev);
void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req);
......@@ -86,6 +87,8 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance);
int __hci_req_start_ext_adv(struct hci_request *req, u8 instance);
int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance);
int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance);
int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance);
void __hci_req_clear_ext_adv_sets(struct hci_request *req);
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
bool use_rpa, struct adv_info *adv_instance,
......
......@@ -52,7 +52,7 @@ struct hci_pinfo {
struct bt_sock bt;
struct hci_dev *hdev;
struct hci_filter filter;
__u32 cmsg_mask;
__u8 cmsg_mask;
unsigned short channel;
unsigned long flags;
__u32 cookie;
......@@ -443,8 +443,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
case HCI_DEV_SETUP:
if (hdev->manufacturer == 0xffff)
return NULL;
/* fall through */
fallthrough;
case HCI_DEV_UP:
skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
......@@ -1399,7 +1398,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb)
{
__u32 mask = hci_pi(sk)->cmsg_mask;
__u8 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR) {
int incoming = bt_cb(skb)->incoming;
......
......@@ -666,8 +666,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
l2cap_seq_list_free(&chan->srej_list);
l2cap_seq_list_free(&chan->retrans_list);
/* fall through */
fallthrough;
case L2CAP_MODE_STREAMING:
skb_queue_purge(&chan->tx_q);
......@@ -872,7 +871,8 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
else
return HCI_AT_NO_BONDING;
}
/* fall through */
fallthrough;
default:
switch (chan->sec_level) {
case BT_SECURITY_HIGH:
......@@ -2983,8 +2983,7 @@ static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
break;
case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
l2cap_process_reqseq(chan, control->reqseq);
/* Fall through */
fallthrough;
case L2CAP_EV_RECV_FBIT:
if (control && control->final) {
......@@ -3311,7 +3310,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
case L2CAP_MODE_ERTM:
if (l2cap_mode_supported(mode, remote_feat_mask))
return mode;
/* fall through */
fallthrough;
default:
return L2CAP_MODE_BASIC;
}
......@@ -3447,7 +3446,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data
if (__l2cap_efs_supported(chan->conn))
set_bit(FLAG_EFS_ENABLE, &chan->flags);
/* fall through */
fallthrough;
default:
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
break;
......@@ -4539,7 +4538,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
goto done;
break;
}
/* fall through */
fallthrough;
default:
l2cap_chan_set_err(chan, ECONNRESET);
......@@ -7719,7 +7718,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->mtu = hcon->hdev->le_mtu;
break;
}
/* fall through */
fallthrough;
default:
conn->mtu = hcon->hdev->acl_mtu;
break;
......@@ -7841,7 +7840,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
fallthrough;
default:
err = -EOPNOTSUPP;
goto done;
......@@ -7893,11 +7892,13 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level,
HCI_LE_CONN_TIMEOUT);
HCI_LE_CONN_TIMEOUT,
CONN_REASON_L2CAP_CHAN);
} else {
u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
CONN_REASON_L2CAP_CHAN);
}
if (IS_ERR(hcon)) {
......
......@@ -284,7 +284,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
fallthrough;
default:
err = -EOPNOTSUPP;
goto done;
......@@ -760,7 +760,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
break;
/* fall through */
fallthrough;
default:
err = -EINVAL;
break;
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Google Corporation
*/
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "mgmt_util.h"
#include "mgmt_config.h"
#define HDEV_PARAM_U16(_param_code_, _param_name_) \
{ \
{ cpu_to_le16(_param_code_), sizeof(__u16) }, \
{ cpu_to_le16(hdev->_param_name_) } \
}
#define HDEV_PARAM_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \
{ \
{ cpu_to_le16(_param_code_), sizeof(__u16) }, \
{ cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) } \
}
int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct {
struct mgmt_tlv entry;
union {
/* This is a simplification for now since all values
* are 16 bits. In the future, this code may need
* refactoring to account for variable length values
* and properly calculate the required buffer size.
*/
__le16 value;
};
} __packed params[] = {
/* Please see mgmt-api.txt for documentation of these values */
HDEV_PARAM_U16(0x0000, def_page_scan_type),
HDEV_PARAM_U16(0x0001, def_page_scan_int),
HDEV_PARAM_U16(0x0002, def_page_scan_window),
HDEV_PARAM_U16(0x0003, def_inq_scan_type),
HDEV_PARAM_U16(0x0004, def_inq_scan_int),
HDEV_PARAM_U16(0x0005, def_inq_scan_window),
HDEV_PARAM_U16(0x0006, def_br_lsto),
HDEV_PARAM_U16(0x0007, def_page_timeout),
HDEV_PARAM_U16(0x0008, sniff_min_interval),
HDEV_PARAM_U16(0x0009, sniff_max_interval),
HDEV_PARAM_U16(0x000a, le_adv_min_interval),
HDEV_PARAM_U16(0x000b, le_adv_max_interval),
HDEV_PARAM_U16(0x000c, def_multi_adv_rotation_duration),
HDEV_PARAM_U16(0x000d, le_scan_interval),
HDEV_PARAM_U16(0x000e, le_scan_window),
HDEV_PARAM_U16(0x000f, le_scan_int_suspend),
HDEV_PARAM_U16(0x0010, le_scan_window_suspend),
HDEV_PARAM_U16(0x0011, le_scan_int_discovery),
HDEV_PARAM_U16(0x0012, le_scan_window_discovery),
HDEV_PARAM_U16(0x0013, le_scan_int_adv_monitor),
HDEV_PARAM_U16(0x0014, le_scan_window_adv_monitor),
HDEV_PARAM_U16(0x0015, le_scan_int_connect),
HDEV_PARAM_U16(0x0016, le_scan_window_connect),
HDEV_PARAM_U16(0x0017, le_conn_min_interval),
HDEV_PARAM_U16(0x0018, le_conn_max_interval),
HDEV_PARAM_U16(0x0019, le_conn_latency),
HDEV_PARAM_U16(0x001a, le_supv_timeout),
HDEV_PARAM_U16_JIFFIES_TO_MSECS(0x001b,
def_le_autoconnect_timeout),
};
struct mgmt_rp_read_def_system_config *rp = (void *)params;
bt_dev_dbg(hdev, "sock %p", sk);
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_READ_DEF_SYSTEM_CONFIG,
0, rp, sizeof(params));
}
#define TO_TLV(x) ((struct mgmt_tlv *)(x))
#define TLV_GET_LE16(tlv) le16_to_cpu(*((__le16 *)(TO_TLV(tlv)->value)))
int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
u16 buffer_left = data_len;
u8 *buffer = data;
if (buffer_left < sizeof(struct mgmt_tlv)) {
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}
/* First pass to validate the tlv */
while (buffer_left >= sizeof(struct mgmt_tlv)) {
const u8 len = TO_TLV(buffer)->length;
const u16 exp_len = sizeof(struct mgmt_tlv) +
len;
const u16 type = le16_to_cpu(TO_TLV(buffer)->type);
if (buffer_left < exp_len) {
bt_dev_warn(hdev, "invalid len left %d, exp >= %d",
buffer_left, exp_len);
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}
/* Please see mgmt-api.txt for documentation of these values */
switch (type) {
case 0x0000:
case 0x0001:
case 0x0002:
case 0x0003:
case 0x0004:
case 0x0005:
case 0x0006:
case 0x0007:
case 0x0008:
case 0x0009:
case 0x000a:
case 0x000b:
case 0x000c:
case 0x000d:
case 0x000e:
case 0x000f:
case 0x0010:
case 0x0011:
case 0x0012:
case 0x0013:
case 0x0014:
case 0x0015:
case 0x0016:
case 0x0017:
case 0x0018:
case 0x0019:
case 0x001a:
case 0x001b:
if (len != sizeof(u16)) {
bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d",
len, sizeof(u16), type);
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}
break;
default:
bt_dev_warn(hdev, "unsupported parameter %u", type);
break;
}
buffer_left -= exp_len;
buffer += exp_len;
}
buffer_left = data_len;
buffer = data;
while (buffer_left >= sizeof(struct mgmt_tlv)) {
const u8 len = TO_TLV(buffer)->length;
const u16 exp_len = sizeof(struct mgmt_tlv) +
len;
const u16 type = le16_to_cpu(TO_TLV(buffer)->type);
switch (type) {
case 0x0000:
hdev->def_page_scan_type = TLV_GET_LE16(buffer);
break;
case 0x0001:
hdev->def_page_scan_int = TLV_GET_LE16(buffer);
break;
case 0x0002:
hdev->def_page_scan_window = TLV_GET_LE16(buffer);
break;
case 0x0003:
hdev->def_inq_scan_type = TLV_GET_LE16(buffer);
break;
case 0x0004:
hdev->def_inq_scan_int = TLV_GET_LE16(buffer);
break;
case 0x0005:
hdev->def_inq_scan_window = TLV_GET_LE16(buffer);
break;
case 0x0006:
hdev->def_br_lsto = TLV_GET_LE16(buffer);
break;
case 0x0007:
hdev->def_page_timeout = TLV_GET_LE16(buffer);
break;
case 0x0008:
hdev->sniff_min_interval = TLV_GET_LE16(buffer);
break;
case 0x0009:
hdev->sniff_max_interval = TLV_GET_LE16(buffer);
break;
case 0x000a:
hdev->le_adv_min_interval = TLV_GET_LE16(buffer);
break;
case 0x000b:
hdev->le_adv_max_interval = TLV_GET_LE16(buffer);
break;
case 0x000c:
hdev->def_multi_adv_rotation_duration =
TLV_GET_LE16(buffer);
break;
case 0x000d:
hdev->le_scan_interval = TLV_GET_LE16(buffer);
break;
case 0x000e:
hdev->le_scan_window = TLV_GET_LE16(buffer);
break;
case 0x000f:
hdev->le_scan_int_suspend = TLV_GET_LE16(buffer);
break;
case 0x0010:
hdev->le_scan_window_suspend = TLV_GET_LE16(buffer);
break;
case 0x0011:
hdev->le_scan_int_discovery = TLV_GET_LE16(buffer);
break;
case 0x00012:
hdev->le_scan_window_discovery = TLV_GET_LE16(buffer);
break;
case 0x00013:
hdev->le_scan_int_adv_monitor = TLV_GET_LE16(buffer);
break;
case 0x00014:
hdev->le_scan_window_adv_monitor = TLV_GET_LE16(buffer);
break;
case 0x00015:
hdev->le_scan_int_connect = TLV_GET_LE16(buffer);
break;
case 0x00016:
hdev->le_scan_window_connect = TLV_GET_LE16(buffer);
break;
case 0x00017:
hdev->le_conn_min_interval = TLV_GET_LE16(buffer);
break;
case 0x00018:
hdev->le_conn_max_interval = TLV_GET_LE16(buffer);
break;
case 0x00019:
hdev->le_conn_latency = TLV_GET_LE16(buffer);
break;
case 0x0001a:
hdev->le_supv_timeout = TLV_GET_LE16(buffer);
break;
case 0x0001b:
hdev->def_le_autoconnect_timeout =
msecs_to_jiffies(TLV_GET_LE16(buffer));
break;
default:
bt_dev_warn(hdev, "unsupported parameter %u", type);
break;
}
buffer_left -= exp_len;
buffer += exp_len;
}
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_SET_DEF_SYSTEM_CONFIG, 0, NULL, 0);
}
int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
bt_dev_dbg(hdev, "sock %p", sk);
return mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_READ_DEF_RUNTIME_CONFIG, 0, NULL, 0);
}
int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
bt_dev_dbg(hdev, "sock %p", sk);
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Google Corporation
*/
int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
......@@ -139,3 +139,10 @@ void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb)
bt_dev_dbg(hdev, "MSFT vendor event %u", event);
}
__u64 msft_get_features(struct hci_dev *hdev)
{
struct msft_data *msft = hdev->msft_data;
return msft ? msft->features : 0;
}
......@@ -3,16 +3,25 @@
* Copyright (C) 2020 Google Corporation
*/
#define MSFT_FEATURE_MASK_BREDR_RSSI_MONITOR BIT(0)
#define MSFT_FEATURE_MASK_LE_CONN_RSSI_MONITOR BIT(1)
#define MSFT_FEATURE_MASK_LE_ADV_RSSI_MONITOR BIT(2)
#define MSFT_FEATURE_MASK_LE_ADV_MONITOR BIT(3)
#define MSFT_FEATURE_MASK_CURVE_VALIDITY BIT(4)
#define MSFT_FEATURE_MASK_CONCURRENT_ADV_MONITOR BIT(5)
#if IS_ENABLED(CONFIG_BT_MSFTEXT)
void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev);
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb);
__u64 msft_get_features(struct hci_dev *hdev);
#else
static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {}
static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {}
static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; }
#endif
......@@ -479,7 +479,7 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
/* if closing a dlc in a session that hasn't been started,
* just close and unlink the dlc
*/
/* fall through */
fallthrough;
default:
rfcomm_dlc_clear_timer(d);
......
......@@ -218,7 +218,7 @@ static void __rfcomm_sock_close(struct sock *sk)
case BT_CONFIG:
case BT_CONNECTED:
rfcomm_dlc_close(d, 0);
/* fall through */
fallthrough;
default:
sock_set_flag(sk, SOCK_ZAPPED);
......
......@@ -66,6 +66,7 @@ struct sco_pinfo {
bdaddr_t dst;
__u32 flags;
__u16 setting;
__u8 cmsg_mask;
struct sco_conn *conn;
};
......@@ -449,6 +450,15 @@ static void sco_sock_close(struct sock *sk)
sco_sock_kill(sk);
}
static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg,
struct sock *sk)
{
if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS)
put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS,
sizeof(bt_cb(skb)->sco.pkt_status),
&bt_cb(skb)->sco.pkt_status);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
......@@ -457,6 +467,8 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
sk->sk_type = parent->sk_type;
bt_sk(sk)->flags = bt_sk(parent)->flags;
security_sk_clone(parent, sk);
} else {
bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg;
}
}
......@@ -846,6 +858,18 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
sco_pi(sk)->setting = voice.setting;
break;
case BT_PKT_STATUS:
if (get_user(opt, (u32 __user *)optval)) {
err = -EFAULT;
break;
}
if (opt)
sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS;
else
sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS;
break;
default:
err = -ENOPROTOOPT;
break;
......@@ -923,6 +947,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
int len, err = 0;
struct bt_voice voice;
u32 phys;
int pkt_status;
BT_DBG("sk %p", sk);
......@@ -969,6 +994,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
err = -EFAULT;
break;
case BT_PKT_STATUS:
pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS);
if (put_user(pkt_status, (int __user *)optval))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
......
......@@ -205,7 +205,7 @@ static int __init test_ecdh(void)
calltime = ktime_get();
tfm = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0);
tfm = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(tfm)) {
BT_ERR("Unable to create ECDH crypto context");
err = PTR_ERR(tfm);
......
......@@ -1387,7 +1387,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
goto zfree_smp;
}
smp->tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0);
smp->tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(smp->tfm_ecdh)) {
BT_ERR("Unable to create ECDH crypto context");
goto free_shash;
......@@ -1654,7 +1654,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
memset(smp->tk, 0, sizeof(smp->tk));
BT_DBG("PassKey: %d", value);
put_unaligned_le32(value, smp->tk);
/* Fall Through */
fallthrough;
case MGMT_OP_USER_CONFIRM_REPLY:
set_bit(SMP_FLAG_TK_VALID, &smp->flags);
break;
......@@ -3282,7 +3282,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
return ERR_CAST(tfm_cmac);
}
tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0);
tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(tfm_ecdh)) {
BT_ERR("Unable to create ECDH crypto context");
crypto_free_shash(tfm_cmac);
......@@ -3847,7 +3847,7 @@ int __init bt_selftest_smp(void)
return PTR_ERR(tfm_cmac);
}
tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0);
tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0);
if (IS_ERR(tfm_ecdh)) {
BT_ERR("Unable to create ECDH crypto context");
crypto_free_shash(tfm_cmac);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment