Commit e2dfb94f authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-net-next-2021-12-29' of...

Merge tag 'for-net-next-2021-12-29' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Luiz Augusto von Dentz says:

====================
bluetooth-next pull request for net-next:

 - Add support for Foxconn MT7922A
 - Add support for Realtek RTL8852AE
 - Rework HCI event handling to use skb_pull_data

* tag 'for-net-next-2021-12-29' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next: (62 commits)
  Bluetooth: MGMT: Fix spelling mistake "simultanous" -> "simultaneous"
  Bluetooth: vhci: Set HCI_QUIRK_VALID_LE_STATES
  Bluetooth: MGMT: Fix LE simultaneous roles UUID if not supported
  Bluetooth: hci_sync: Add check simultaneous roles support
  Bluetooth: hci_sync: Wait for proper events when connecting LE
  Bluetooth: hci_sync: Add support for waiting specific LE subevents
  Bluetooth: hci_sync: Add hci_le_create_conn_sync
  Bluetooth: hci_event: Use skb_pull_data when processing inquiry results
  Bluetooth: hci_sync: Push sync command cancellation to workqueue
  Bluetooth: hci_qca: Stop IBS timer during BT OFF
  Bluetooth: btusb: Add support for Foxconn MT7922A
  Bluetooth: btintel: Add missing quirks and msft ext for legacy bootloader
  Bluetooth: btusb: Add two more Bluetooth parts for WCN6855
  Bluetooth: L2CAP: Fix using wrong mode
  Bluetooth: hci_sync: Fix not always pausing advertising when necessary
  Bluetooth: mgmt: Make use of mgmt_send_event_skb in MGMT_EV_DEVICE_CONNECTED
  Bluetooth: mgmt: Make use of mgmt_send_event_skb in MGMT_EV_DEVICE_FOUND
  Bluetooth: mgmt: Introduce mgmt_alloc_skb and mgmt_send_event_skb
  Bluetooth: btusb: Return error code when getting patch status failed
  Bluetooth: btusb: Handle download_firmware failure cases
  ...
====================

Link: https://lore.kernel.org/r/20211229211258.2290966-1-luiz.dentz@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents cfcad56b 5d1dd2e5
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/dmi.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
...@@ -343,6 +344,40 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev) ...@@ -343,6 +344,40 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
return skb; return skb;
} }
static const struct dmi_system_id disable_broken_read_transmit_power[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,1"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,4"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,1"),
},
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,2"),
},
},
{ }
};
static int btbcm_read_info(struct hci_dev *hdev) static int btbcm_read_info(struct hci_dev *hdev)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -363,6 +398,10 @@ static int btbcm_read_info(struct hci_dev *hdev) ...@@ -363,6 +398,10 @@ static int btbcm_read_info(struct hci_dev *hdev)
bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]); bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb); kfree_skb(skb);
/* Read DMI and disable broken Read LE Min/Max Tx Power */
if (dmi_first_match(disable_broken_read_transmit_power))
set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
return 0; return 0;
} }
......
...@@ -2498,10 +2498,14 @@ static int btintel_setup_combined(struct hci_dev *hdev) ...@@ -2498,10 +2498,14 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x12: /* ThP */ case 0x12: /* ThP */
case 0x13: /* HrP */ case 0x13: /* HrP */
case 0x14: /* CcP */ case 0x14: /* CcP */
/* Some legacy bootloader devices from JfP supports both old /* Some legacy bootloader devices starting from JfP,
* and TLV based HCI_Intel_Read_Version command. But we don't * the operational firmware supports both old and TLV based
* want to use the TLV based setup routines for those legacy * HCI_Intel_Read_Version command based on the command
* bootloader device. * parameter.
*
* For upgrading firmware case, the TLV based version cannot
* be used because the firmware filename for legacy bootloader
* is based on the old format.
* *
* Also, it is not easy to convert TLV based version from the * Also, it is not easy to convert TLV based version from the
* legacy version format. * legacy version format.
...@@ -2513,6 +2517,20 @@ static int btintel_setup_combined(struct hci_dev *hdev) ...@@ -2513,6 +2517,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_read_version(hdev, &ver); err = btintel_read_version(hdev, &ver);
if (err) if (err)
return err; return err;
/* Apply the device specific HCI quirks
*
* All Legacy bootloader devices support WBS
*/
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
/* Valid LE States quirk for JfP/ThP familiy */
if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
err = btintel_bootloader_setup(hdev, &ver); err = btintel_bootloader_setup(hdev, &ver);
break; break;
case 0x17: case 0x17:
......
...@@ -121,6 +121,7 @@ int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, ...@@ -121,6 +121,7 @@ int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
} else { } else {
bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)", bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
status); status);
err = -EIO;
goto err_release_fw; goto err_release_fw;
} }
} }
......
...@@ -98,6 +98,8 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table); ...@@ -98,6 +98,8 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_SDIO_BLOCK_SIZE 256 #define MTK_SDIO_BLOCK_SIZE 256
#define BTMTKSDIO_TX_WAIT_VND_EVT 1 #define BTMTKSDIO_TX_WAIT_VND_EVT 1
#define BTMTKSDIO_HW_TX_READY 2
#define BTMTKSDIO_FUNC_ENABLED 3
struct mtkbtsdio_hdr { struct mtkbtsdio_hdr {
__le16 len; __le16 len;
...@@ -113,7 +115,6 @@ struct btmtksdio_dev { ...@@ -113,7 +115,6 @@ struct btmtksdio_dev {
struct work_struct txrx_work; struct work_struct txrx_work;
unsigned long tx_state; unsigned long tx_state;
struct sk_buff_head txq; struct sk_buff_head txq;
bool hw_tx_ready;
struct sk_buff *evt_skb; struct sk_buff *evt_skb;
...@@ -254,7 +255,7 @@ static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev, ...@@ -254,7 +255,7 @@ static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
sdio_hdr->reserved = cpu_to_le16(0); sdio_hdr->reserved = cpu_to_le16(0);
sdio_hdr->bt_type = hci_skb_pkt_type(skb); sdio_hdr->bt_type = hci_skb_pkt_type(skb);
bdev->hw_tx_ready = false; clear_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data, err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
round_up(skb->len, MTK_SDIO_BLOCK_SIZE)); round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
if (err < 0) if (err < 0)
...@@ -324,8 +325,29 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) ...@@ -324,8 +325,29 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
return err; return err;
} }
static int btmtksdio_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
switch (handle) {
case 0xfc6f:
/* Firmware dump from device: when the firmware hangs, the
* device can no longer suspend and thus disable auto-suspend.
*/
pm_runtime_forbid(bdev->dev);
fallthrough;
case 0x05ff:
case 0x05fe:
/* Firmware debug logging */
return hci_recv_diag(hdev, skb);
}
return hci_recv_frame(hdev, skb);
}
static const struct h4_recv_pkt mtk_recv_pkts[] = { static const struct h4_recv_pkt mtk_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_ACL, .recv = btmtksdio_recv_acl },
{ H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = btmtksdio_recv_event }, { H4_RECV_EVENT, .recv = btmtksdio_recv_event },
}; };
...@@ -463,11 +485,12 @@ static void btmtksdio_txrx_work(struct work_struct *work) ...@@ -463,11 +485,12 @@ static void btmtksdio_txrx_work(struct work_struct *work)
bt_dev_dbg(bdev->hdev, "Get fw own back"); bt_dev_dbg(bdev->hdev, "Get fw own back");
if (int_status & TX_EMPTY) if (int_status & TX_EMPTY)
bdev->hw_tx_ready = true; set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
else if (unlikely(int_status & TX_FIFO_OVERFLOW)) else if (unlikely(int_status & TX_FIFO_OVERFLOW))
bt_dev_warn(bdev->hdev, "Tx fifo overflow"); bt_dev_warn(bdev->hdev, "Tx fifo overflow");
if (bdev->hw_tx_ready) { if (test_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state)) {
skb = skb_dequeue(&bdev->txq); skb = skb_dequeue(&bdev->txq);
if (skb) { if (skb) {
err = btmtksdio_tx_packet(bdev, skb); err = btmtksdio_tx_packet(bdev, skb);
...@@ -517,6 +540,8 @@ static int btmtksdio_open(struct hci_dev *hdev) ...@@ -517,6 +540,8 @@ static int btmtksdio_open(struct hci_dev *hdev)
if (err < 0) if (err < 0)
goto err_release_host; goto err_release_host;
set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
/* Get ownership from the device */ /* Get ownership from the device */
sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
if (err < 0) if (err < 0)
...@@ -618,6 +643,7 @@ static int btmtksdio_close(struct hci_dev *hdev) ...@@ -618,6 +643,7 @@ static int btmtksdio_close(struct hci_dev *hdev)
if (err < 0) if (err < 0)
bt_dev_err(bdev->hdev, "Cannot return ownership to device"); bt_dev_err(bdev->hdev, "Cannot return ownership to device");
clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state);
sdio_disable_func(bdev->func); sdio_disable_func(bdev->func);
sdio_release_host(bdev->func); sdio_release_host(bdev->func);
...@@ -765,6 +791,9 @@ static int mt79xx_setup(struct hci_dev *hdev, const char *fwname) ...@@ -765,6 +791,9 @@ static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
return err; return err;
} }
hci_set_msft_opcode(hdev, 0xFD30);
hci_set_aosp_capable(hdev);
return err; return err;
} }
...@@ -811,7 +840,7 @@ static int btmtksdio_setup(struct hci_dev *hdev) ...@@ -811,7 +840,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
u32 fw_version = 0; u32 fw_version = 0;
calltime = ktime_get(); calltime = ktime_get();
bdev->hw_tx_ready = true; set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state);
switch (bdev->data->chipid) { switch (bdev->data->chipid) {
case 0x7921: case 0x7921:
...@@ -1036,6 +1065,11 @@ static int btmtksdio_runtime_suspend(struct device *dev) ...@@ -1036,6 +1065,11 @@ static int btmtksdio_runtime_suspend(struct device *dev)
if (!bdev) if (!bdev)
return 0; return 0;
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
return 0;
sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
sdio_claim_host(bdev->func); sdio_claim_host(bdev->func);
sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
...@@ -1063,6 +1097,9 @@ static int btmtksdio_runtime_resume(struct device *dev) ...@@ -1063,6 +1097,9 @@ static int btmtksdio_runtime_resume(struct device *dev)
if (!bdev) if (!bdev)
return 0; return 0;
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
return 0;
sdio_claim_host(bdev->func); sdio_claim_host(bdev->func);
sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
......
This diff is collapsed.
...@@ -1928,6 +1928,9 @@ static int qca_power_off(struct hci_dev *hdev) ...@@ -1928,6 +1928,9 @@ static int qca_power_off(struct hci_dev *hdev)
hu->hdev->hw_error = NULL; hu->hdev->hw_error = NULL;
hu->hdev->cmd_timeout = NULL; hu->hdev->cmd_timeout = NULL;
del_timer_sync(&qca->wake_retrans_timer);
del_timer_sync(&qca->tx_idle_timer);
/* Stop sending shutdown command if soc crashes. */ /* Stop sending shutdown command if soc crashes. */
if (soc_type != QCA_ROME if (soc_type != QCA_ROME
&& qca->memdump_state == QCA_MEMDUMP_IDLE) { && qca->memdump_state == QCA_MEMDUMP_IDLE) {
......
...@@ -331,6 +331,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) ...@@ -331,6 +331,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
if (opcode & 0x80) if (opcode & 0x80)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
if (hci_register_dev(hdev) < 0) { if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device"); BT_ERR("Can't register HCI device");
hci_free_dev(hdev); hci_free_dev(hdev);
......
...@@ -2376,6 +2376,8 @@ static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len) ...@@ -2376,6 +2376,8 @@ static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
} }
void *skb_pull_data(struct sk_buff *skb, size_t len);
void *__pskb_pull_tail(struct sk_buff *skb, int delta); void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len) static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
......
...@@ -390,6 +390,11 @@ struct hci_ctrl { ...@@ -390,6 +390,11 @@ struct hci_ctrl {
}; };
}; };
struct mgmt_ctrl {
struct hci_dev *hdev;
u16 opcode;
};
struct bt_skb_cb { struct bt_skb_cb {
u8 pkt_type; u8 pkt_type;
u8 force_active; u8 force_active;
...@@ -399,6 +404,7 @@ struct bt_skb_cb { ...@@ -399,6 +404,7 @@ struct bt_skb_cb {
struct l2cap_ctrl l2cap; struct l2cap_ctrl l2cap;
struct sco_ctrl sco; struct sco_ctrl sco;
struct hci_ctrl hci; struct hci_ctrl hci;
struct mgmt_ctrl mgmt;
}; };
}; };
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
...@@ -406,6 +412,7 @@ struct bt_skb_cb { ...@@ -406,6 +412,7 @@ struct bt_skb_cb {
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type #define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
#define hci_skb_expect(skb) bt_cb((skb))->expect #define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode #define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
#define hci_skb_sk(skb) bt_cb((skb))->hci.sk #define hci_skb_sk(skb) bt_cb((skb))->hci.sk
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
......
...@@ -246,6 +246,15 @@ enum { ...@@ -246,6 +246,15 @@ enum {
* HCI after resume. * HCI after resume.
*/ */
HCI_QUIRK_NO_SUSPEND_NOTIFIER, HCI_QUIRK_NO_SUSPEND_NOTIFIER,
/*
* When this quirk is set, LE tx power is not queried on startup
* and the min/max tx power values default to HCI_TX_POWER_INVALID.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER,
}; };
/* HCI device flags */ /* HCI device flags */
...@@ -332,6 +341,7 @@ enum { ...@@ -332,6 +341,7 @@ enum {
HCI_FORCE_NO_MITM, HCI_FORCE_NO_MITM,
HCI_QUALITY_REPORT, HCI_QUALITY_REPORT,
HCI_OFFLOAD_CODECS_ENABLED, HCI_OFFLOAD_CODECS_ENABLED,
HCI_LE_SIMULTANEOUS_ROLES,
__HCI_NUM_FLAGS, __HCI_NUM_FLAGS,
}; };
...@@ -1047,8 +1057,8 @@ struct hci_cp_read_stored_link_key { ...@@ -1047,8 +1057,8 @@ struct hci_cp_read_stored_link_key {
} __packed; } __packed;
struct hci_rp_read_stored_link_key { struct hci_rp_read_stored_link_key {
__u8 status; __u8 status;
__u8 max_keys; __le16 max_keys;
__u8 num_keys; __le16 num_keys;
} __packed; } __packed;
#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12 #define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
...@@ -1058,7 +1068,7 @@ struct hci_cp_delete_stored_link_key { ...@@ -1058,7 +1068,7 @@ struct hci_cp_delete_stored_link_key {
} __packed; } __packed;
struct hci_rp_delete_stored_link_key { struct hci_rp_delete_stored_link_key {
__u8 status; __u8 status;
__u8 num_keys; __le16 num_keys;
} __packed; } __packed;
#define HCI_MAX_NAME_LENGTH 248 #define HCI_MAX_NAME_LENGTH 248
...@@ -1931,6 +1941,16 @@ struct hci_rp_le_read_transmit_power { ...@@ -1931,6 +1941,16 @@ struct hci_rp_le_read_transmit_power {
__s8 max_le_tx_power; __s8 max_le_tx_power;
} __packed; } __packed;
#define HCI_NETWORK_PRIVACY 0x00
#define HCI_DEVICE_PRIVACY 0x01
#define HCI_OP_LE_SET_PRIVACY_MODE 0x204e
struct hci_cp_le_set_privacy_mode {
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 mode;
} __packed;
#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060 #define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
struct hci_rp_le_read_buffer_size_v2 { struct hci_rp_le_read_buffer_size_v2 {
__u8 status; __u8 status;
...@@ -2012,6 +2032,10 @@ struct hci_cp_le_reject_cis { ...@@ -2012,6 +2032,10 @@ struct hci_cp_le_reject_cis {
} __packed; } __packed;
/* ---- HCI Events ---- */ /* ---- HCI Events ---- */
struct hci_ev_status {
__u8 status;
} __packed;
#define HCI_EV_INQUIRY_COMPLETE 0x01 #define HCI_EV_INQUIRY_COMPLETE 0x01
#define HCI_EV_INQUIRY_RESULT 0x02 #define HCI_EV_INQUIRY_RESULT 0x02
...@@ -2024,6 +2048,11 @@ struct inquiry_info { ...@@ -2024,6 +2048,11 @@ struct inquiry_info {
__le16 clock_offset; __le16 clock_offset;
} __packed; } __packed;
struct hci_ev_inquiry_result {
__u8 num;
struct inquiry_info info[];
};
#define HCI_EV_CONN_COMPLETE 0x03 #define HCI_EV_CONN_COMPLETE 0x03
struct hci_ev_conn_complete { struct hci_ev_conn_complete {
__u8 status; __u8 status;
...@@ -2135,7 +2164,7 @@ struct hci_comp_pkts_info { ...@@ -2135,7 +2164,7 @@ struct hci_comp_pkts_info {
} __packed; } __packed;
struct hci_ev_num_comp_pkts { struct hci_ev_num_comp_pkts {
__u8 num_hndl; __u8 num;
struct hci_comp_pkts_info handles[]; struct hci_comp_pkts_info handles[];
} __packed; } __packed;
...@@ -2185,7 +2214,7 @@ struct hci_ev_pscan_rep_mode { ...@@ -2185,7 +2214,7 @@ struct hci_ev_pscan_rep_mode {
} __packed; } __packed;
#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 #define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22
struct inquiry_info_with_rssi { struct inquiry_info_rssi {
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 pscan_rep_mode; __u8 pscan_rep_mode;
__u8 pscan_period_mode; __u8 pscan_period_mode;
...@@ -2193,7 +2222,7 @@ struct inquiry_info_with_rssi { ...@@ -2193,7 +2222,7 @@ struct inquiry_info_with_rssi {
__le16 clock_offset; __le16 clock_offset;
__s8 rssi; __s8 rssi;
} __packed; } __packed;
struct inquiry_info_with_rssi_and_pscan_mode { struct inquiry_info_rssi_pscan {
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 pscan_rep_mode; __u8 pscan_rep_mode;
__u8 pscan_period_mode; __u8 pscan_period_mode;
...@@ -2202,6 +2231,14 @@ struct inquiry_info_with_rssi_and_pscan_mode { ...@@ -2202,6 +2231,14 @@ struct inquiry_info_with_rssi_and_pscan_mode {
__le16 clock_offset; __le16 clock_offset;
__s8 rssi; __s8 rssi;
} __packed; } __packed;
struct hci_ev_inquiry_result_rssi {
__u8 num;
struct inquiry_info_rssi info[];
} __packed;
struct hci_ev_inquiry_result_rssi_pscan {
__u8 num;
struct inquiry_info_rssi_pscan info[];
} __packed;
#define HCI_EV_REMOTE_EXT_FEATURES 0x23 #define HCI_EV_REMOTE_EXT_FEATURES 0x23
struct hci_ev_remote_ext_features { struct hci_ev_remote_ext_features {
...@@ -2256,6 +2293,11 @@ struct extended_inquiry_info { ...@@ -2256,6 +2293,11 @@ struct extended_inquiry_info {
__u8 data[240]; __u8 data[240];
} __packed; } __packed;
struct hci_ev_ext_inquiry_result {
__u8 num;
struct extended_inquiry_info info[];
} __packed;
#define HCI_EV_KEY_REFRESH_COMPLETE 0x30 #define HCI_EV_KEY_REFRESH_COMPLETE 0x30
struct hci_ev_key_refresh_complete { struct hci_ev_key_refresh_complete {
__u8 status; __u8 status;
...@@ -2423,13 +2465,18 @@ struct hci_ev_le_conn_complete { ...@@ -2423,13 +2465,18 @@ struct hci_ev_le_conn_complete {
#define HCI_EV_LE_ADVERTISING_REPORT 0x02 #define HCI_EV_LE_ADVERTISING_REPORT 0x02
struct hci_ev_le_advertising_info { struct hci_ev_le_advertising_info {
__u8 evt_type; __u8 type;
__u8 bdaddr_type; __u8 bdaddr_type;
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 length; __u8 length;
__u8 data[]; __u8 data[];
} __packed; } __packed;
struct hci_ev_le_advertising_report {
__u8 num;
struct hci_ev_le_advertising_info info[];
} __packed;
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 #define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
struct hci_ev_le_conn_update_complete { struct hci_ev_le_conn_update_complete {
__u8 status; __u8 status;
...@@ -2473,7 +2520,7 @@ struct hci_ev_le_data_len_change { ...@@ -2473,7 +2520,7 @@ struct hci_ev_le_data_len_change {
#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B #define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
struct hci_ev_le_direct_adv_info { struct hci_ev_le_direct_adv_info {
__u8 evt_type; __u8 type;
__u8 bdaddr_type; __u8 bdaddr_type;
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 direct_addr_type; __u8 direct_addr_type;
...@@ -2481,6 +2528,11 @@ struct hci_ev_le_direct_adv_info { ...@@ -2481,6 +2528,11 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi; __s8 rssi;
} __packed; } __packed;
struct hci_ev_le_direct_adv_report {
__u8 num;
struct hci_ev_le_direct_adv_info info[];
} __packed;
#define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c #define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c
struct hci_ev_le_phy_update_complete { struct hci_ev_le_phy_update_complete {
__u8 status; __u8 status;
...@@ -2490,8 +2542,8 @@ struct hci_ev_le_phy_update_complete { ...@@ -2490,8 +2542,8 @@ struct hci_ev_le_phy_update_complete {
} __packed; } __packed;
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d #define HCI_EV_LE_EXT_ADV_REPORT 0x0d
struct hci_ev_le_ext_adv_report { struct hci_ev_le_ext_adv_info {
__le16 evt_type; __le16 type;
__u8 bdaddr_type; __u8 bdaddr_type;
bdaddr_t bdaddr; bdaddr_t bdaddr;
__u8 primary_phy; __u8 primary_phy;
...@@ -2499,11 +2551,16 @@ struct hci_ev_le_ext_adv_report { ...@@ -2499,11 +2551,16 @@ struct hci_ev_le_ext_adv_report {
__u8 sid; __u8 sid;
__u8 tx_power; __u8 tx_power;
__s8 rssi; __s8 rssi;
__le16 interval; __le16 interval;
__u8 direct_addr_type; __u8 direct_addr_type;
bdaddr_t direct_addr; bdaddr_t direct_addr;
__u8 length; __u8 length;
__u8 data[]; __u8 data[];
} __packed;
struct hci_ev_le_ext_adv_report {
__u8 num;
struct hci_ev_le_ext_adv_info info[];
} __packed; } __packed;
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a #define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
......
...@@ -88,6 +88,7 @@ struct discovery_state { ...@@ -88,6 +88,7 @@ struct discovery_state {
u8 (*uuids)[16]; u8 (*uuids)[16];
unsigned long scan_start; unsigned long scan_start;
unsigned long scan_duration; unsigned long scan_duration;
unsigned long name_resolve_timeout;
}; };
#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
...@@ -151,22 +152,22 @@ struct bdaddr_list_with_irk { ...@@ -151,22 +152,22 @@ struct bdaddr_list_with_irk {
u8 local_irk[16]; u8 local_irk[16];
}; };
struct bdaddr_list_with_flags {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
u32 current_flags;
};
enum hci_conn_flags { enum hci_conn_flags {
HCI_CONN_FLAG_REMOTE_WAKEUP, HCI_CONN_FLAG_REMOTE_WAKEUP,
HCI_CONN_FLAG_MAX HCI_CONN_FLAG_DEVICE_PRIVACY,
};
#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr)) __HCI_CONN_NUM_FLAGS,
};
/* Make sure number of flags doesn't exceed sizeof(current_flags) */ /* Make sure number of flags doesn't exceed sizeof(current_flags) */
static_assert(HCI_CONN_FLAG_MAX < 32); static_assert(__HCI_CONN_NUM_FLAGS < 32);
struct bdaddr_list_with_flags {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
};
struct bt_uuid { struct bt_uuid {
struct list_head list; struct list_head list;
...@@ -352,8 +353,8 @@ struct hci_dev { ...@@ -352,8 +353,8 @@ struct hci_dev {
__u16 lmp_subver; __u16 lmp_subver;
__u16 voice_setting; __u16 voice_setting;
__u8 num_iac; __u8 num_iac;
__u8 stored_max_keys; __u16 stored_max_keys;
__u8 stored_num_keys; __u16 stored_num_keys;
__u8 io_capability; __u8 io_capability;
__s8 inq_tx_power; __s8 inq_tx_power;
__u8 err_data_reporting; __u8 err_data_reporting;
...@@ -479,6 +480,7 @@ struct hci_dev { ...@@ -479,6 +480,7 @@ struct hci_dev {
struct work_struct cmd_sync_work; struct work_struct cmd_sync_work;
struct list_head cmd_sync_work_list; struct list_head cmd_sync_work_list;
struct mutex cmd_sync_work_lock; struct mutex cmd_sync_work_lock;
struct work_struct cmd_sync_cancel_work;
__u16 discov_timeout; __u16 discov_timeout;
struct delayed_work discov_off; struct delayed_work discov_off;
...@@ -559,6 +561,7 @@ struct hci_dev { ...@@ -559,6 +561,7 @@ struct hci_dev {
struct rfkill *rfkill; struct rfkill *rfkill;
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
DECLARE_BITMAP(conn_flags, __HCI_CONN_NUM_FLAGS);
__s8 adv_tx_power; __s8 adv_tx_power;
__u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH];
...@@ -754,7 +757,8 @@ struct hci_conn_params { ...@@ -754,7 +757,8 @@ struct hci_conn_params {
struct hci_conn *conn; struct hci_conn *conn;
bool explicit_connect; bool explicit_connect;
u32 current_flags; DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
u8 privacy_mode;
}; };
extern struct list_head hci_dev_list; extern struct list_head hci_dev_list;
...@@ -779,6 +783,12 @@ extern struct mutex hci_cb_list_lock; ...@@ -779,6 +783,12 @@ extern struct mutex hci_cb_list_lock;
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
} while (0) } while (0)
#define hci_dev_le_state_simultaneous(hdev) \
(test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
(hdev->le_states[4] & 0x08) && /* Central */ \
(hdev->le_states[4] & 0x40) && /* Peripheral */ \
(hdev->le_states[3] & 0x10)) /* Simultaneous */
/* ----- HCI interface to upper protocols ----- */ /* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon); int l2cap_disconn_ind(struct hci_conn *hcon);
...@@ -1117,8 +1127,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, ...@@ -1117,8 +1127,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
enum conn_reasons conn_reason); enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level, u8 dst_type, bool dst_resolved, u8 sec_level,
u16 conn_timeout, u8 role, u16 conn_timeout, u8 role);
bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type, u8 sec_level, u8 auth_type,
enum conn_reasons conn_reason); enum conn_reasons conn_reason);
...@@ -1465,6 +1474,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); ...@@ -1465,6 +1474,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \ #define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY)) hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
(hdev->commands[39] & 0x04))
/* Use enhanced synchronous connection if command is supported */ /* Use enhanced synchronous connection if command is supported */
#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08) #define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08)
...@@ -1759,6 +1771,8 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); ...@@ -1759,6 +1771,8 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */
#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */
#define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */
void mgmt_fill_version_info(void *ver); void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev); int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev);
......
...@@ -37,6 +37,8 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, ...@@ -37,6 +37,8 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
void hci_cmd_sync_init(struct hci_dev *hdev); void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev); void hci_cmd_sync_clear(struct hci_dev *hdev);
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy); void *data, hci_cmd_sync_work_destroy_t destroy);
...@@ -100,3 +102,7 @@ int hci_stop_discovery_sync(struct hci_dev *hdev); ...@@ -100,3 +102,7 @@ int hci_stop_discovery_sync(struct hci_dev *hdev);
int hci_suspend_sync(struct hci_dev *hdev); int hci_suspend_sync(struct hci_dev *hdev);
int hci_resume_sync(struct hci_dev *hdev); int hci_resume_sync(struct hci_dev *hdev);
struct hci_conn;
int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
...@@ -936,10 +936,11 @@ struct mgmt_ev_auth_failed { ...@@ -936,10 +936,11 @@ struct mgmt_ev_auth_failed {
__u8 status; __u8 status;
} __packed; } __packed;
#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01 #define MGMT_DEV_FOUND_CONFIRM_NAME 0x01
#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02 #define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02
#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04 #define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
#define MGMT_DEV_FOUND_INITIATED_CONN 0x08 #define MGMT_DEV_FOUND_INITIATED_CONN 0x08
#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED 0x10
#define MGMT_EV_DEVICE_FOUND 0x0012 #define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found { struct mgmt_ev_device_found {
......
This diff is collapsed.
...@@ -2153,7 +2153,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, ...@@ -2153,7 +2153,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
bacpy(&entry->bdaddr, bdaddr); bacpy(&entry->bdaddr, bdaddr);
entry->bdaddr_type = type; entry->bdaddr_type = type;
entry->current_flags = flags; bitmap_from_u64(entry->flags, flags);
list_add(&entry->list, list); list_add(&entry->list, list);
...@@ -2629,6 +2629,12 @@ int hci_register_dev(struct hci_dev *hdev) ...@@ -2629,6 +2629,12 @@ int hci_register_dev(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
hci_dev_set_flag(hdev, HCI_UNCONFIGURED); hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
/* Mark Remote Wakeup connection flag as supported if driver has wakeup
* callback.
*/
if (hdev->wakeup)
set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
hci_sock_dev_event(hdev, HCI_DEV_REG); hci_sock_dev_event(hdev, HCI_DEV_REG);
hci_dev_hold(hdev); hci_dev_hold(hdev);
...@@ -2906,7 +2912,7 @@ int hci_unregister_cb(struct hci_cb *cb) ...@@ -2906,7 +2912,7 @@ int hci_unregister_cb(struct hci_cb *cb)
} }
EXPORT_SYMBOL(hci_unregister_cb); EXPORT_SYMBOL(hci_unregister_cb);
static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{ {
int err; int err;
...@@ -2929,14 +2935,17 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) ...@@ -2929,14 +2935,17 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
if (!test_bit(HCI_RUNNING, &hdev->flags)) { if (!test_bit(HCI_RUNNING, &hdev->flags)) {
kfree_skb(skb); kfree_skb(skb);
return; return -EINVAL;
} }
err = hdev->send(hdev, skb); err = hdev->send(hdev, skb);
if (err < 0) { if (err < 0) {
bt_dev_err(hdev, "sending frame failed (%d)", err); bt_dev_err(hdev, "sending frame failed (%d)", err);
kfree_skb(skb); kfree_skb(skb);
return err;
} }
return 0;
} }
/* Send HCI command */ /* Send HCI command */
...@@ -3843,10 +3852,15 @@ static void hci_cmd_work(struct work_struct *work) ...@@ -3843,10 +3852,15 @@ static void hci_cmd_work(struct work_struct *work)
hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
if (hdev->sent_cmd) { if (hdev->sent_cmd) {
int res;
if (hci_req_status_pend(hdev)) if (hci_req_status_pend(hdev))
hci_dev_set_flag(hdev, HCI_CMD_PENDING); hci_dev_set_flag(hdev, HCI_CMD_PENDING);
atomic_dec(&hdev->cmd_cnt); atomic_dec(&hdev->cmd_cnt);
hci_send_frame(hdev, skb);
res = hci_send_frame(hdev, skb);
if (res < 0)
__hci_cmd_sync_cancel(hdev, -res);
if (test_bit(HCI_RESET, &hdev->flags)) if (test_bit(HCI_RESET, &hdev->flags))
cancel_delayed_work(&hdev->cmd_timer); cancel_delayed_work(&hdev->cmd_timer);
else else
......
This diff is collapsed.
...@@ -111,17 +111,6 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, ...@@ -111,17 +111,6 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
} }
} }
void hci_req_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
wake_up_interruptible(&hdev->req_wait_q);
}
}
/* Execute request and wait for completion. */ /* Execute request and wait for completion. */
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt), unsigned long opt),
...@@ -492,8 +481,8 @@ static int add_to_accept_list(struct hci_request *req, ...@@ -492,8 +481,8 @@ static int add_to_accept_list(struct hci_request *req,
} }
/* During suspend, only wakeable devices can be in accept list */ /* During suspend, only wakeable devices can be in accept list */
if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, if (hdev->suspended &&
params->current_flags)) !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
return 0; return 0;
*num_entries += 1; *num_entries += 1;
...@@ -829,56 +818,6 @@ static void cancel_adv_timeout(struct hci_dev *hdev) ...@@ -829,56 +818,6 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
} }
} }
/* This function requires the caller holds hdev->lock */
void __hci_req_pause_adv_instances(struct hci_request *req)
{
bt_dev_dbg(req->hdev, "Pausing advertising instances");
/* Call to disable any advertisements active on the controller.
* This will succeed even if no advertisements are configured.
*/
__hci_req_disable_advertising(req);
/* If we are using software rotation, pause the loop */
if (!ext_adv_capable(req->hdev))
cancel_adv_timeout(req->hdev);
}
/* This function requires the caller holds hdev->lock */
static void __hci_req_resume_adv_instances(struct hci_request *req)
{
struct adv_info *adv;
bt_dev_dbg(req->hdev, "Resuming advertising instances");
if (ext_adv_capable(req->hdev)) {
/* Call for each tracked instance to be re-enabled */
list_for_each_entry(adv, &req->hdev->adv_instances, list) {
__hci_req_enable_ext_advertising(req,
adv->instance);
}
} else {
/* Schedule for most recent instance to be restarted and begin
* the software rotation loop
*/
__hci_req_schedule_adv_instance(req,
req->hdev->cur_adv_instance,
true);
}
}
/* This function requires the caller holds hdev->lock */
int hci_req_resume_adv_instances(struct hci_dev *hdev)
{
struct hci_request req;
hci_req_init(&req, hdev);
__hci_req_resume_adv_instances(&req);
return hci_req_run(&req, NULL);
}
static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{ {
return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
...@@ -2703,7 +2642,7 @@ void hci_request_setup(struct hci_dev *hdev) ...@@ -2703,7 +2642,7 @@ void hci_request_setup(struct hci_dev *hdev)
void hci_request_cancel_all(struct hci_dev *hdev) void hci_request_cancel_all(struct hci_dev *hdev)
{ {
hci_req_sync_cancel(hdev, ENODEV); __hci_cmd_sync_cancel(hdev, ENODEV);
cancel_work_sync(&hdev->discov_update); cancel_work_sync(&hdev->discov_update);
cancel_work_sync(&hdev->scan_update); cancel_work_sync(&hdev->scan_update);
......
...@@ -64,7 +64,6 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, ...@@ -64,7 +64,6 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt), unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status); unsigned long opt, u32 timeout, u8 *hci_status);
void hci_req_sync_cancel(struct hci_dev *hdev, int err);
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param); const void *param);
...@@ -81,8 +80,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req); ...@@ -81,8 +80,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
void hci_req_disable_address_resolution(struct hci_dev *hdev); void hci_req_disable_address_resolution(struct hci_dev *hdev);
void __hci_req_pause_adv_instances(struct hci_request *req);
int hci_req_resume_adv_instances(struct hci_dev *hdev);
void hci_req_reenable_advertising(struct hci_dev *hdev); void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req);
......
This diff is collapsed.
...@@ -7905,7 +7905,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, ...@@ -7905,7 +7905,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
hcon = hci_connect_le(hdev, dst, dst_type, false, hcon = hci_connect_le(hdev, dst, dst_type, false,
chan->sec_level, chan->sec_level,
HCI_LE_CONN_TIMEOUT, HCI_LE_CONN_TIMEOUT,
HCI_ROLE_SLAVE, NULL); HCI_ROLE_SLAVE);
else else
hcon = hci_connect_le_scan(hdev, dst, dst_type, hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level, chan->sec_level,
......
...@@ -161,7 +161,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) ...@@ -161,7 +161,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
break; break;
} }
if (chan->psm && bdaddr_type_is_le(chan->src_type)) /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
* L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
*/
if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
chan->mode != L2CAP_MODE_EXT_FLOWCTL)
chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->mode = L2CAP_MODE_LE_FLOWCTL;
chan->state = BT_BOUND; chan->state = BT_BOUND;
...@@ -255,7 +259,11 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, ...@@ -255,7 +259,11 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
return -EINVAL; return -EINVAL;
} }
if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
* L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
*/
if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
chan->mode != L2CAP_MODE_EXT_FLOWCTL)
chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->mode = L2CAP_MODE_LE_FLOWCTL;
l2cap_sock_init_pid(sk); l2cap_sock_init_pid(sk);
......
This diff is collapsed.
...@@ -56,40 +56,72 @@ static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie, ...@@ -56,40 +56,72 @@ static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie,
return skb; return skb;
} }
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode,
void *data, u16 data_len, int flag, struct sock *skip_sk) unsigned int size)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct mgmt_hdr *hdr;
skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL);
if (!skb) if (!skb)
return -ENOMEM; return skb;
hdr = skb_put(skb, sizeof(*hdr)); skb_reserve(skb, sizeof(struct mgmt_hdr));
hdr->opcode = cpu_to_le16(event); bt_cb(skb)->mgmt.hdev = hdev;
if (hdev) bt_cb(skb)->mgmt.opcode = opcode;
hdr->index = cpu_to_le16(hdev->id);
else
hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
hdr->len = cpu_to_le16(data_len);
if (data) return skb;
skb_put_data(skb, data, data_len); }
int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
struct sock *skip_sk)
{
struct hci_dev *hdev;
struct mgmt_hdr *hdr;
int len = skb->len;
if (!skb)
return -EINVAL;
hdev = bt_cb(skb)->mgmt.hdev;
/* Time stamp */ /* Time stamp */
__net_timestamp(skb); __net_timestamp(skb);
hci_send_to_channel(channel, skb, flag, skip_sk); /* Send just the data, without headers, to the monitor */
if (channel == HCI_CHANNEL_CONTROL) if (channel == HCI_CHANNEL_CONTROL)
hci_send_monitor_ctrl_event(hdev, event, data, data_len, hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode,
skb->data, skb->len,
skb_get_ktime(skb), flag, skip_sk); skb_get_ktime(skb), flag, skip_sk);
hdr = skb_push(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode);
if (hdev)
hdr->index = cpu_to_le16(hdev->id);
else
hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
hdr->len = cpu_to_le16(len);
hci_send_to_channel(channel, skb, flag, skip_sk);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
void *data, u16 data_len, int flag, struct sock *skip_sk)
{
struct sk_buff *skb;
skb = mgmt_alloc_skb(hdev, event, data_len);
if (!skb)
return -ENOMEM;
if (data)
skb_put_data(skb, data, data_len);
return mgmt_send_event_skb(channel, skb, flag, skip_sk);
}
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{ {
struct sk_buff *skb, *mskb; struct sk_buff *skb, *mskb;
......
...@@ -32,6 +32,10 @@ struct mgmt_pending_cmd { ...@@ -32,6 +32,10 @@ struct mgmt_pending_cmd {
int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
}; };
struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode,
unsigned int size);
int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
struct sock *skip_sk);
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
void *data, u16 data_len, int flag, struct sock *skip_sk); void *data, u16 data_len, int flag, struct sock *skip_sk);
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status); int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status);
......
...@@ -590,7 +590,7 @@ void msft_unregister(struct hci_dev *hdev) ...@@ -590,7 +590,7 @@ void msft_unregister(struct hci_dev *hdev)
kfree(msft); kfree(msft);
} }
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb)
{ {
struct msft_data *msft = hdev->msft_data; struct msft_data *msft = hdev->msft_data;
u8 event; u8 event;
......
...@@ -17,7 +17,7 @@ void msft_register(struct hci_dev *hdev); ...@@ -17,7 +17,7 @@ void msft_register(struct hci_dev *hdev);
void msft_unregister(struct hci_dev *hdev); void msft_unregister(struct hci_dev *hdev);
void msft_do_open(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev);
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb);
__u64 msft_get_features(struct hci_dev *hdev); __u64 msft_get_features(struct hci_dev *hdev);
int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor);
int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
...@@ -39,7 +39,8 @@ static inline void msft_register(struct hci_dev *hdev) {} ...@@ -39,7 +39,8 @@ static inline void msft_register(struct hci_dev *hdev) {}
static inline void msft_unregister(struct hci_dev *hdev) {} static inline void msft_unregister(struct hci_dev *hdev) {}
static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {}
static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} static inline void msft_vendor_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb) {}
static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; }
static inline int msft_add_monitor_pattern(struct hci_dev *hdev, static inline int msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor) struct adv_monitor *monitor)
......
...@@ -2023,6 +2023,30 @@ void *skb_pull(struct sk_buff *skb, unsigned int len) ...@@ -2023,6 +2023,30 @@ void *skb_pull(struct sk_buff *skb, unsigned int len)
} }
EXPORT_SYMBOL(skb_pull); EXPORT_SYMBOL(skb_pull);
/**
* skb_pull_data - remove data from the start of a buffer returning its
* original position.
* @skb: buffer to use
* @len: amount of data to remove
*
* This function removes data from the start of a buffer, returning
* the memory to the headroom. A pointer to the original data in the buffer
* is returned after checking if there is enough data to pull. Once the
* data has been pulled future pushes will overwrite the old data.
*/
void *skb_pull_data(struct sk_buff *skb, size_t len)
{
void *data = skb->data;
if (skb->len < len)
return NULL;
skb_pull(skb, len);
return data;
}
EXPORT_SYMBOL(skb_pull_data);
/** /**
* skb_trim - remove end from a buffer * skb_trim - remove end from a buffer
* @skb: buffer to alter * @skb: buffer to alter
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment