Commit 5148371a authored by David S. Miller's avatar David S. Miller

Merge branch 'for-upstream' of...

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2015-12-11

Here's another set of Bluetooth & 802.15.4 patches for the 4.5 kernel:

 - 6LoWPAN debugfs support
 - New 802.15.4 driver for ADF7242 MAC IEEE802154
 - Initial code for 6LoWPAN Generic Header Compression (GHC) support
 - Refactor Bluetooth LE scan & advertising behind dedicated workqueue
 - Cleanups to Bluetooth H:5 HCI driver
 - Support for Toshiba Broadcom based Bluetooth controllers
 - Use continuous scanning when establishing Bluetooth LE connections

Please let me know if there are any issues pulling. Thanks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7e074af2 7302b9d9
* ADF7242 IEEE 802.15.4 *
Required properties:
- compatible: should be "adi,adf7242"
- spi-max-frequency: maximal bus speed (12.5 MHz)
- reg: the chipselect index
- interrupts: the interrupt generated by the device via pin IRQ1.
IRQ_TYPE_LEVEL_HIGH (4) or IRQ_TYPE_EDGE_FALLING (1)
Example:
adf7242@0 {
compatible = "adi,adf7242";
spi-max-frequency = <10000000>;
reg = <0>;
interrupts = <98 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gpio3>;
};
......@@ -371,6 +371,15 @@ ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR)
M: Jiri Kosina <jikos@kernel.org>
S: Maintained
ADF7242 IEEE 802.15.4 RADIO DRIVER
M: Michael Hennerich <michael.hennerich@analog.com>
W: https://wiki.analog.com/ADF7242
W: http://ez.analog.com/community/linux-device-drivers
L: linux-wpan@vger.kernel.org
S: Supported
F: drivers/net/ieee802154/adf7242.c
F: Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
ADM1025 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: lm-sensors@lm-sensors.org
......
......@@ -73,6 +73,48 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
int btintel_enter_mfg(struct hci_dev *hdev)
{
const u8 param[] = { 0x01, 0x00 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_enter_mfg);
int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched)
{
u8 param[] = { 0x00, 0x00 };
struct sk_buff *skb;
/* The 2nd command parameter specifies the manufacturing exit method:
* 0x00: Just disable the manufacturing mode (0x00).
* 0x01: Disable manufacturing mode and reset with patches deactivated.
* 0x02: Disable manufacturing mode and reset with patches activated.
*/
if (reset)
param[1] |= patched ? 0x02 : 0x01;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_exit_mfg);
int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
......@@ -126,37 +168,19 @@ EXPORT_SYMBOL_GPL(btintel_set_diag);
int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
{
struct sk_buff *skb;
u8 param[2];
int err;
param[0] = 0x01;
param[1] = 0x00;
int err, ret;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
hdev->name, err);
return PTR_ERR(skb);
}
kfree_skb(skb);
err = btintel_set_diag(hdev, enable);
param[0] = 0x00;
param[1] = 0x00;
err = btintel_enter_mfg(hdev);
if (err)
return err;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
hdev->name, err);
return PTR_ERR(skb);
}
kfree_skb(skb);
ret = btintel_set_diag(hdev, enable);
err = btintel_exit_mfg(hdev, false, false);
if (err)
return err;
return ret;
}
EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
......@@ -309,39 +333,46 @@ EXPORT_SYMBOL_GPL(btintel_set_event_mask);
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
{
struct sk_buff *skb;
u8 param[2];
int err;
int err, ret;
param[0] = 0x01;
param[1] = 0x00;
err = btintel_enter_mfg(hdev);
if (err)
return err;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
hdev->name, err);
return PTR_ERR(skb);
}
kfree_skb(skb);
ret = btintel_set_event_mask(hdev, debug);
err = btintel_set_event_mask(hdev, debug);
err = btintel_exit_mfg(hdev, false, false);
if (err)
return err;
param[0] = 0x00;
param[1] = 0x00;
return ret;
}
EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
{
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
hdev->name, err);
bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;
}
return err;
memcpy(ver, skb->data, sizeof(*ver));
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
EXPORT_SYMBOL_GPL(btintel_read_version);
/* ------- REGMAP IBT SUPPORT ------- */
......
......@@ -72,6 +72,8 @@ struct intel_secure_send_result {
#if IS_ENABLED(CONFIG_BT_INTEL)
int btintel_check_bdaddr(struct hci_dev *hdev);
int btintel_enter_mfg(struct hci_dev *hdev);
int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched);
int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int btintel_set_diag(struct hci_dev *hdev, bool enable);
int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
......@@ -83,6 +85,7 @@ int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
int btintel_set_event_mask(struct hci_dev *hdev, bool debug);
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug);
int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver);
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write);
......@@ -94,6 +97,16 @@ static inline int btintel_check_bdaddr(struct hci_dev *hdev)
return -EOPNOTSUPP;
}
static inline int btintel_enter_mfg(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
static inline int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched)
{
return -EOPNOTSUPP;
}
static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
return -EOPNOTSUPP;
......@@ -140,6 +153,12 @@ static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
return -EOPNOTSUPP;
}
static inline int btintel_read_version(struct hci_dev *hdev,
struct intel_version *ver)
{
return -EOPNOTSUPP;
}
static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
u16 opcode_read,
u16 opcode_write)
......
......@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* Toshiba Corp - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a),
.driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
......@@ -1642,13 +1646,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
struct sk_buff *skb;
const struct firmware *fw;
const u8 *fw_ptr;
int disable_patch;
struct intel_version *ver;
const u8 mfg_enable[] = { 0x01, 0x00 };
const u8 mfg_disable[] = { 0x00, 0x00 };
const u8 mfg_reset_deactivate[] = { 0x00, 0x01 };
const u8 mfg_reset_activate[] = { 0x00, 0x02 };
int disable_patch, err;
struct intel_version ver;
BT_DBG("%s", hdev->name);
......@@ -1674,35 +1673,22 @@ static int btusb_setup_intel(struct hci_dev *hdev)
* The returned information are hardware variant and revision plus
* firmware variant, revision and build number.
*/
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s reading Intel fw version command failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
BT_ERR("%s Intel version event length mismatch", hdev->name);
kfree_skb(skb);
return -EIO;
}
ver = (struct intel_version *)skb->data;
err = btintel_read_version(hdev, &ver);
if (err)
return err;
BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
hdev->name, ver->hw_platform, ver->hw_variant,
ver->hw_revision, ver->fw_variant, ver->fw_revision,
ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy,
ver->fw_patch_num);
hdev->name, ver.hw_platform, ver.hw_variant, ver.hw_revision,
ver.fw_variant, ver.fw_revision, ver.fw_build_num,
ver.fw_build_ww, ver.fw_build_yy, ver.fw_patch_num);
/* fw_patch_num indicates the version of patch the device currently
* have. If there is no patch data in the device, it is always 0x00.
* So, if it is other than 0x00, no need to patch the device again.
*/
if (ver->fw_patch_num) {
if (ver.fw_patch_num) {
BT_INFO("%s: Intel device is already patched. patch num: %02x",
hdev->name, ver->fw_patch_num);
kfree_skb(skb);
hdev->name, ver.fw_patch_num);
goto complete;
}
......@@ -1712,31 +1698,21 @@ static int btusb_setup_intel(struct hci_dev *hdev)
* If no patch file is found, allow the device to operate without
* a patch.
*/
fw = btusb_setup_intel_get_fw(hdev, ver);
if (!fw) {
kfree_skb(skb);
fw = btusb_setup_intel_get_fw(hdev, &ver);
if (!fw)
goto complete;
}
fw_ptr = fw->data;
kfree_skb(skb);
/* This Intel specific command enables the manufacturer mode of the
* controller.
*
/* Enable the manufacturer mode of the controller.
* Only while this mode is enabled, the driver can download the
* firmware patch data and configuration parameters.
*/
skb = __hci_cmd_sync(hdev, 0xfc11, 2, mfg_enable, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
err = btintel_enter_mfg(hdev);
if (err) {
release_firmware(fw);
return PTR_ERR(skb);
return err;
}
kfree_skb(skb);
disable_patch = 1;
/* The firmware data file consists of list of Intel specific HCI
......@@ -1776,14 +1752,9 @@ static int btusb_setup_intel(struct hci_dev *hdev)
/* Patching completed successfully and disable the manufacturer mode
* with reset and activate the downloaded firmware patches.
*/
skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_activate),
mfg_reset_activate, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
err = btintel_exit_mfg(hdev, true, true);
if (err)
return err;
BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
hdev->name);
......@@ -1792,14 +1763,9 @@ static int btusb_setup_intel(struct hci_dev *hdev)
exit_mfg_disable:
/* Disable the manufacturer mode without reset */
skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_disable), mfg_disable,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
err = btintel_exit_mfg(hdev, false, false);
if (err)
return err;
BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
......@@ -1811,14 +1777,9 @@ static int btusb_setup_intel(struct hci_dev *hdev)
/* Patching failed. Disable the manufacturer mode with reset and
* deactivate the downloaded firmware patches.
*/
skb = __hci_cmd_sync(hdev, 0xfc11, sizeof(mfg_reset_deactivate),
mfg_reset_deactivate, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
err = btintel_exit_mfg(hdev, true, false);
if (err)
return err;
BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
hdev->name);
......@@ -2005,7 +1966,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
0x00, 0x08, 0x04, 0x00 };
struct btusb_data *data = hci_get_drvdata(hdev);
struct sk_buff *skb;
struct intel_version *ver;
struct intel_version ver;
struct intel_boot_params *params;
const struct firmware *fw;
const u8 *fw_ptr;
......@@ -2023,28 +1984,16 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* is in bootloader mode or if it already has operational firmware
* loaded.
*/
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: Reading Intel version information failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
BT_ERR("%s: Intel version event size mismatch", hdev->name);
kfree_skb(skb);
return -EILSEQ;
}
ver = (struct intel_version *)skb->data;
err = btintel_read_version(hdev, &ver);
if (err)
return err;
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
*/
if (ver->hw_platform != 0x37) {
if (ver.hw_platform != 0x37) {
BT_ERR("%s: Unsupported Intel hardware platform (%u)",
hdev->name, ver->hw_platform);
kfree_skb(skb);
hdev->name, ver.hw_platform);
return -EINVAL;
}
......@@ -2053,14 +2002,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* put in place to ensure correct forward compatibility options
* when newer hardware variants come along.
*/
if (ver->hw_variant != 0x0b) {
if (ver.hw_variant != 0x0b) {
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
hdev->name, ver->hw_variant);
kfree_skb(skb);
hdev->name, ver.hw_variant);
return -EINVAL;
}
btintel_version_info(hdev, ver);
btintel_version_info(hdev, &ver);
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies
......@@ -2075,8 +2023,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* It is not possible to use the Secure Boot Parameters in this
* case since that command is only available in bootloader mode.
*/
if (ver->fw_variant == 0x23) {
kfree_skb(skb);
if (ver.fw_variant == 0x23) {
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btintel_check_bdaddr(hdev);
return 0;
......@@ -2085,15 +2032,12 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* If the device is not in bootloader mode, then the only possible
* choice is to return an error and abort the device initialization.
*/
if (ver->fw_variant != 0x06) {
if (ver.fw_variant != 0x06) {
BT_ERR("%s: Unsupported Intel firmware variant (%u)",
hdev->name, ver->fw_variant);
kfree_skb(skb);
hdev->name, ver.fw_variant);
return -ENODEV;
}
kfree_skb(skb);
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
......
......@@ -116,18 +116,14 @@ static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
static u8 h5_cfg_field(struct h5 *h5)
{
u8 field = 0;
/* Sliding window size (first 3 bits) */
field |= (h5->tx_win & 0x07);
return field;
return h5->tx_win & 0x07;
}
static void h5_timed_event(unsigned long arg)
{
const unsigned char sync_req[] = { 0x01, 0x7e };
unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
unsigned char conf_req[3] = { 0x03, 0xfc };
struct hci_uart *hu = (struct hci_uart *)arg;
struct h5 *h5 = hu->priv;
struct sk_buff *skb;
......@@ -285,7 +281,7 @@ static void h5_handle_internal_rx(struct hci_uart *hu)
struct h5 *h5 = hu->priv;
const unsigned char sync_req[] = { 0x01, 0x7e };
const unsigned char sync_rsp[] = { 0x02, 0x7d };
unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
unsigned char conf_req[3] = { 0x03, 0xfc };
const unsigned char conf_rsp[] = { 0x04, 0x7b };
const unsigned char wakeup_req[] = { 0x05, 0xfa };
const unsigned char woken_req[] = { 0x06, 0xf9 };
......@@ -317,7 +313,7 @@ static void h5_handle_internal_rx(struct hci_uart *hu)
h5_link_control(hu, conf_req, 3);
} else if (memcmp(data, conf_rsp, 2) == 0) {
if (H5_HDR_LEN(hdr) > 2)
h5->tx_win = (data[2] & 7);
h5->tx_win = (data[2] & 0x07);
BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
h5->state = H5_ACTIVE;
hci_uart_init_ready(hu);
......
......@@ -502,7 +502,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
/* Device will not accept speed change if Intel version has not been
* previously requested.
*/
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
PTR_ERR(skb));
......@@ -542,7 +542,7 @@ static int intel_setup(struct hci_uart *hu)
struct intel_device *idev = NULL;
struct hci_dev *hdev = hu->hdev;
struct sk_buff *skb;
struct intel_version *ver;
struct intel_version ver;
struct intel_boot_params *params;
struct list_head *p;
const struct firmware *fw;
......@@ -590,35 +590,16 @@ static int intel_setup(struct hci_uart *hu)
* is in bootloader mode or if it already has operational firmware
* loaded.
*/
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;
}
ver = (struct intel_version *)skb->data;
if (ver->status) {
bt_dev_err(hdev, "Intel version command failure (%02x)",
ver->status);
err = -bt_to_errno(ver->status);
kfree_skb(skb);
err = btintel_read_version(hdev, &ver);
if (err)
return err;
}
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
*/
if (ver->hw_platform != 0x37) {
if (ver.hw_platform != 0x37) {
bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
ver->hw_platform);
kfree_skb(skb);
ver.hw_platform);
return -EINVAL;
}
......@@ -627,14 +608,13 @@ static int intel_setup(struct hci_uart *hu)
* put in place to ensure correct forward compatibility options
* when newer hardware variants come along.
*/
if (ver->hw_variant != 0x0b) {
if (ver.hw_variant != 0x0b) {
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
ver->hw_variant);
kfree_skb(skb);
ver.hw_variant);
return -EINVAL;
}
btintel_version_info(hdev, ver);
btintel_version_info(hdev, &ver);
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies
......@@ -649,8 +629,7 @@ static int intel_setup(struct hci_uart *hu)
* It is not possible to use the Secure Boot Parameters in this
* case since that command is only available in bootloader mode.
*/
if (ver->fw_variant == 0x23) {
kfree_skb(skb);
if (ver.fw_variant == 0x23) {
clear_bit(STATE_BOOTLOADER, &intel->flags);
btintel_check_bdaddr(hdev);
return 0;
......@@ -659,19 +638,16 @@ static int intel_setup(struct hci_uart *hu)
/* If the device is not in bootloader mode, then the only possible
* choice is to return an error and abort the device initialization.
*/
if (ver->fw_variant != 0x06) {
if (ver.fw_variant != 0x06) {
bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
ver->fw_variant);
kfree_skb(skb);
ver.fw_variant);
return -ENODEV;
}
kfree_skb(skb);
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
PTR_ERR(skb));
......@@ -881,7 +857,7 @@ static int intel_setup(struct hci_uart *hu)
set_bit(STATE_BOOTING, &intel->flags);
skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
HCI_INIT_TIMEOUT);
HCI_CMD_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
......
......@@ -462,13 +462,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
/* Flush any pending characters in the driver and line discipline. */
/* FIXME: why is this needed. Note don't use ldisc_ref here as the
open path is before the ldisc is referencable */
if (tty->ldisc->ops->flush_buffer)
tty->ldisc->ops->flush_buffer(tty);
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
return 0;
......
......@@ -71,3 +71,14 @@ config IEEE802154_ATUSB
This driver can also be built as a module. To do so say M here.
The module will be called 'atusb'.
config IEEE802154_ADF7242
tristate "ADF7242 transceiver driver"
depends on IEEE802154_DRIVERS && MAC802154
depends on SPI
---help---
Say Y here to enable the ADF7242 SPI 802.15.4 wireless
controller.
This driver can also be built as a module. To do so, say M here.
the module will be called 'adf7242'.
......@@ -3,3 +3,4 @@ obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o
/*
* Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver
*
* Copyright 2009-2015 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*
* http://www.analog.com/ADF7242
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/firmware.h>
#include <linux/spi/spi.h>
#include <linux/skbuff.h>
#include <linux/of.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/bitops.h>
#include <linux/ieee802154.h>
#include <net/mac802154.h>
#include <net/cfg802154.h>
#define FIRMWARE "adf7242_firmware.bin"
#define MAX_POLL_LOOPS 200
/* All Registers */
#define REG_EXT_CTRL 0x100 /* RW External LNA/PA and internal PA control */
#define REG_TX_FSK_TEST 0x101 /* RW TX FSK test mode configuration */
#define REG_CCA1 0x105 /* RW RSSI threshold for CCA */
#define REG_CCA2 0x106 /* RW CCA mode configuration */
#define REG_BUFFERCFG 0x107 /* RW RX_BUFFER overwrite control */
#define REG_PKT_CFG 0x108 /* RW FCS evaluation configuration */
#define REG_DELAYCFG0 0x109 /* RW RC_RX command to SFD or sync word delay */
#define REG_DELAYCFG1 0x10A /* RW RC_TX command to TX state */
#define REG_DELAYCFG2 0x10B /* RW Mac delay extension */
#define REG_SYNC_WORD0 0x10C /* RW sync word bits [7:0] of [23:0] */
#define REG_SYNC_WORD1 0x10D /* RW sync word bits [15:8] of [23:0] */
#define REG_SYNC_WORD2 0x10E /* RW sync word bits [23:16] of [23:0] */
#define REG_SYNC_CONFIG 0x10F /* RW sync word configuration */
#define REG_RC_CFG 0x13E /* RW RX / TX packet configuration */
#define REG_RC_VAR44 0x13F /* RW RESERVED */
#define REG_CH_FREQ0 0x300 /* RW Channel Frequency Settings - Low */
#define REG_CH_FREQ1 0x301 /* RW Channel Frequency Settings - Middle */
#define REG_CH_FREQ2 0x302 /* RW Channel Frequency Settings - High */
#define REG_TX_FD 0x304 /* RW TX Frequency Deviation Register */
#define REG_DM_CFG0 0x305 /* RW RX Discriminator BW Register */
#define REG_TX_M 0x306 /* RW TX Mode Register */
#define REG_RX_M 0x307 /* RW RX Mode Register */
#define REG_RRB 0x30C /* R RSSI Readback Register */
#define REG_LRB 0x30D /* R Link Quality Readback Register */
#define REG_DR0 0x30E /* RW bits [15:8] of [15:0] data rate setting */
#define REG_DR1 0x30F /* RW bits [7:0] of [15:0] data rate setting */
#define REG_PRAMPG 0x313 /* RW RESERVED */
#define REG_TXPB 0x314 /* RW TX Packet Storage Base Address */
#define REG_RXPB 0x315 /* RW RX Packet Storage Base Address */
#define REG_TMR_CFG0 0x316 /* RW Wake up Timer Conf Register - High */
#define REG_TMR_CFG1 0x317 /* RW Wake up Timer Conf Register - Low */
#define REG_TMR_RLD0 0x318 /* RW Wake up Timer Value Register - High */
#define REG_TMR_RLD1 0x319 /* RW Wake up Timer Value Register - Low */
#define REG_TMR_CTRL 0x31A /* RW Wake up Timer Timeout flag */
#define REG_PD_AUX 0x31E /* RW Battmon enable */
#define REG_GP_CFG 0x32C /* RW GPIO Configuration */
#define REG_GP_OUT 0x32D /* RW GPIO Configuration */
#define REG_GP_IN 0x32E /* R GPIO Configuration */
#define REG_SYNT 0x335 /* RW bandwidth calibration timers */
#define REG_CAL_CFG 0x33D /* RW Calibration Settings */
#define REG_PA_BIAS 0x36E /* RW PA BIAS */
#define REG_SYNT_CAL 0x371 /* RW Oscillator and Doubler Configuration */
#define REG_IIRF_CFG 0x389 /* RW BB Filter Decimation Rate */
#define REG_CDR_CFG 0x38A /* RW CDR kVCO */
#define REG_DM_CFG1 0x38B /* RW Postdemodulator Filter */
#define REG_AGCSTAT 0x38E /* R RXBB Ref Osc Calibration Engine Readback */
#define REG_RXCAL0 0x395 /* RW RX BB filter tuning, LSB */
#define REG_RXCAL1 0x396 /* RW RX BB filter tuning, MSB */
#define REG_RXFE_CFG 0x39B /* RW RXBB Ref Osc & RXFE Calibration */
#define REG_PA_RR 0x3A7 /* RW Set PA ramp rate */
#define REG_PA_CFG 0x3A8 /* RW PA enable */
#define REG_EXTPA_CFG 0x3A9 /* RW External PA BIAS DAC */
#define REG_EXTPA_MSC 0x3AA /* RW PA Bias Mode */
#define REG_ADC_RBK 0x3AE /* R Readback temp */
#define REG_AGC_CFG1 0x3B2 /* RW GC Parameters */
#define REG_AGC_MAX 0x3B4 /* RW Slew rate */
#define REG_AGC_CFG2 0x3B6 /* RW RSSI Parameters */
#define REG_AGC_CFG3 0x3B7 /* RW RSSI Parameters */
#define REG_AGC_CFG4 0x3B8 /* RW RSSI Parameters */
#define REG_AGC_CFG5 0x3B9 /* RW RSSI & NDEC Parameters */
#define REG_AGC_CFG6 0x3BA /* RW NDEC Parameters */
#define REG_OCL_CFG1 0x3C4 /* RW OCL System Parameters */
#define REG_IRQ1_EN0 0x3C7 /* RW Interrupt Mask set bits for IRQ1 */
#define REG_IRQ1_EN1 0x3C8 /* RW Interrupt Mask set bits for IRQ1 */
#define REG_IRQ2_EN0 0x3C9 /* RW Interrupt Mask set bits for IRQ2 */
#define REG_IRQ2_EN1 0x3CA /* RW Interrupt Mask set bits for IRQ2 */
#define REG_IRQ1_SRC0 0x3CB /* RW Interrupt Source bits for IRQ */
#define REG_IRQ1_SRC1 0x3CC /* RW Interrupt Source bits for IRQ */
#define REG_OCL_BW0 0x3D2 /* RW OCL System Parameters */
#define REG_OCL_BW1 0x3D3 /* RW OCL System Parameters */
#define REG_OCL_BW2 0x3D4 /* RW OCL System Parameters */
#define REG_OCL_BW3 0x3D5 /* RW OCL System Parameters */
#define REG_OCL_BW4 0x3D6 /* RW OCL System Parameters */
#define REG_OCL_BWS 0x3D7 /* RW OCL System Parameters */
#define REG_OCL_CFG13 0x3E0 /* RW OCL System Parameters */
#define REG_GP_DRV 0x3E3 /* RW I/O pads Configuration and bg trim */
#define REG_BM_CFG 0x3E6 /* RW Batt. Monitor Threshold Voltage setting */
#define REG_SFD_15_4 0x3F4 /* RW Option to set non standard SFD */
#define REG_AFC_CFG 0x3F7 /* RW AFC mode and polarity */
#define REG_AFC_KI_KP 0x3F8 /* RW AFC ki and kp */
#define REG_AFC_RANGE 0x3F9 /* RW AFC range */
#define REG_AFC_READ 0x3FA /* RW Readback frequency error */
/* REG_EXTPA_MSC */
#define PA_PWR(x) (((x) & 0xF) << 4)
#define EXTPA_BIAS_SRC BIT(3)
#define EXTPA_BIAS_MODE(x) (((x) & 0x7) << 0)
/* REG_PA_CFG */
#define PA_BRIDGE_DBIAS(x) (((x) & 0x1F) << 0)
#define PA_DBIAS_HIGH_POWER 21
#define PA_DBIAS_LOW_POWER 13
/* REG_PA_BIAS */
#define PA_BIAS_CTRL(x) (((x) & 0x1F) << 1)
#define REG_PA_BIAS_DFL BIT(0)
#define PA_BIAS_HIGH_POWER 63
#define PA_BIAS_LOW_POWER 55
#define REG_PAN_ID0 0x112
#define REG_PAN_ID1 0x113
#define REG_SHORT_ADDR_0 0x114
#define REG_SHORT_ADDR_1 0x115
#define REG_IEEE_ADDR_0 0x116
#define REG_IEEE_ADDR_1 0x117
#define REG_IEEE_ADDR_2 0x118
#define REG_IEEE_ADDR_3 0x119
#define REG_IEEE_ADDR_4 0x11A
#define REG_IEEE_ADDR_5 0x11B
#define REG_IEEE_ADDR_6 0x11C
#define REG_IEEE_ADDR_7 0x11D
#define REG_FFILT_CFG 0x11E
#define REG_AUTO_CFG 0x11F
#define REG_AUTO_TX1 0x120
#define REG_AUTO_TX2 0x121
#define REG_AUTO_STATUS 0x122
/* REG_FFILT_CFG */
#define ACCEPT_BEACON_FRAMES BIT(0)
#define ACCEPT_DATA_FRAMES BIT(1)
#define ACCEPT_ACK_FRAMES BIT(2)
#define ACCEPT_MACCMD_FRAMES BIT(3)
#define ACCEPT_RESERVED_FRAMES BIT(4)
#define ACCEPT_ALL_ADDRESS BIT(5)
/* REG_AUTO_CFG */
#define AUTO_ACK_FRAMEPEND BIT(0)
#define IS_PANCOORD BIT(1)
#define RX_AUTO_ACK_EN BIT(3)
#define CSMA_CA_RX_TURNAROUND BIT(4)
/* REG_AUTO_TX1 */
#define MAX_FRAME_RETRIES(x) ((x) & 0xF)
#define MAX_CCA_RETRIES(x) (((x) & 0x7) << 4)
/* REG_AUTO_TX2 */
#define CSMA_MAX_BE(x) ((x) & 0xF)
#define CSMA_MIN_BE(x) (((x) & 0xF) << 4)
#define CMD_SPI_NOP 0xFF /* No operation. Use for dummy writes */
#define CMD_SPI_PKT_WR 0x10 /* Write telegram to the Packet RAM
* starting from the TX packet base address
* pointer tx_packet_base
*/
#define CMD_SPI_PKT_RD 0x30 /* Read telegram from the Packet RAM
* starting from RX packet base address
* pointer rxpb.rx_packet_base
*/
#define CMD_SPI_MEM_WR(x) (0x18 + (x >> 8)) /* Write data to MCR or
* Packet RAM sequentially
*/
#define CMD_SPI_MEM_RD(x) (0x38 + (x >> 8)) /* Read data from MCR or
* Packet RAM sequentially
*/
#define CMD_SPI_MEMR_WR(x) (0x08 + (x >> 8)) /* Write data to MCR or Packet
* RAM as random block
*/
#define CMD_SPI_MEMR_RD(x) (0x28 + (x >> 8)) /* Read data from MCR or
* Packet RAM random block
*/
#define CMD_SPI_PRAM_WR 0x1E /* Write data sequentially to current
* PRAM page selected
*/
#define CMD_SPI_PRAM_RD 0x3E /* Read data sequentially from current
* PRAM page selected
*/
#define CMD_RC_SLEEP 0xB1 /* Invoke transition of radio controller
* into SLEEP state
*/
#define CMD_RC_IDLE 0xB2 /* Invoke transition of radio controller
* into IDLE state
*/
#define CMD_RC_PHY_RDY 0xB3 /* Invoke transition of radio controller
* into PHY_RDY state
*/
#define CMD_RC_RX 0xB4 /* Invoke transition of radio controller
* into RX state
*/
#define CMD_RC_TX 0xB5 /* Invoke transition of radio controller
* into TX state
*/
#define CMD_RC_MEAS 0xB6 /* Invoke transition of radio controller
* into MEAS state
*/
#define CMD_RC_CCA 0xB7 /* Invoke Clear channel assessment */
#define CMD_RC_CSMACA 0xC1 /* initiates CSMA-CA channel access
* sequence and frame transmission
*/
#define CMD_RC_PC_RESET 0xC7 /* Program counter reset */
#define CMD_RC_RESET 0xC8 /* Resets the ADF7242 and puts it in
* the sleep state
*/
#define CMD_RC_PC_RESET_NO_WAIT (CMD_RC_PC_RESET | BIT(31))
/* STATUS */
#define STAT_SPI_READY BIT(7)
#define STAT_IRQ_STATUS BIT(6)
#define STAT_RC_READY BIT(5)
#define STAT_CCA_RESULT BIT(4)
#define RC_STATUS_IDLE 1
#define RC_STATUS_MEAS 2
#define RC_STATUS_PHY_RDY 3
#define RC_STATUS_RX 4
#define RC_STATUS_TX 5
#define RC_STATUS_MASK 0xF
/* AUTO_STATUS */
#define SUCCESS 0
#define SUCCESS_DATPEND 1
#define FAILURE_CSMACA 2
#define FAILURE_NOACK 3
#define AUTO_STATUS_MASK 0x3
#define PRAM_PAGESIZE 256
/* IRQ1 */
#define IRQ_CCA_COMPLETE BIT(0)
#define IRQ_SFD_RX BIT(1)
#define IRQ_SFD_TX BIT(2)
#define IRQ_RX_PKT_RCVD BIT(3)
#define IRQ_TX_PKT_SENT BIT(4)
#define IRQ_FRAME_VALID BIT(5)
#define IRQ_ADDRESS_VALID BIT(6)
#define IRQ_CSMA_CA BIT(7)
#define AUTO_TX_TURNAROUND BIT(3)
#define ADDON_EN BIT(4)
#define FLAG_XMIT 0
#define FLAG_START 1
#define ADF7242_REPORT_CSMA_CA_STAT 0 /* framework doesn't handle yet */
struct adf7242_local {
struct spi_device *spi;
struct completion tx_complete;
struct ieee802154_hw *hw;
struct mutex bmux; /* protect SPI messages */
struct spi_message stat_msg;
struct spi_transfer stat_xfer;
struct dentry *debugfs_root;
unsigned long flags;
int tx_stat;
bool promiscuous;
s8 rssi;
u8 max_frame_retries;
u8 max_cca_retries;
u8 max_be;
u8 min_be;
/* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
u8 buf[3] ____cacheline_aligned;
u8 buf_reg_tx[3];
u8 buf_read_tx[4];
u8 buf_read_rx[4];
u8 buf_stat_rx;
u8 buf_stat_tx;
u8 buf_cmd;
};
static int adf7242_soft_reset(struct adf7242_local *lp, int line);
static int adf7242_status(struct adf7242_local *lp, u8 *stat)
{
int status;
mutex_lock(&lp->bmux);
status = spi_sync(lp->spi, &lp->stat_msg);
*stat = lp->buf_stat_rx;
mutex_unlock(&lp->bmux);
return status;
}
static int adf7242_wait_status(struct adf7242_local *lp, unsigned status,
unsigned mask, int line)
{
int cnt = 0, ret = 0;
u8 stat;
do {
adf7242_status(lp, &stat);
cnt++;
} while (((stat & mask) != status) && (cnt < MAX_POLL_LOOPS));
if (cnt >= MAX_POLL_LOOPS) {
ret = -ETIMEDOUT;
if (!(stat & STAT_RC_READY)) {
adf7242_soft_reset(lp, line);
adf7242_status(lp, &stat);
if ((stat & mask) == status)
ret = 0;
}
if (ret < 0)
dev_warn(&lp->spi->dev,
"%s:line %d Timeout status 0x%x (%d)\n",
__func__, line, stat, cnt);
}
dev_vdbg(&lp->spi->dev, "%s : loops=%d line %d\n", __func__, cnt, line);
return ret;
}
static int adf7242_wait_ready(struct adf7242_local *lp, int line)
{
return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY,
STAT_RC_READY | STAT_SPI_READY, line);
}
static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
{
u8 *buf = lp->buf;
int status;
struct spi_message msg;
struct spi_transfer xfer_head = {
.len = 2,
.tx_buf = buf,
};
struct spi_transfer xfer_buf = {
.len = len,
.tx_buf = data,
};
spi_message_init(&msg);
spi_message_add_tail(&xfer_head, &msg);
spi_message_add_tail(&xfer_buf, &msg);
adf7242_wait_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
buf[0] = CMD_SPI_PKT_WR;
buf[1] = len + 2;
status = spi_sync(lp->spi, &msg);
mutex_unlock(&lp->bmux);
return status;
}
static int adf7242_read_fbuf(struct adf7242_local *lp,
u8 *data, size_t len, bool packet_read)
{
u8 *buf = lp->buf;
int status;
struct spi_message msg;
struct spi_transfer xfer_head = {
.len = 3,
.tx_buf = buf,
.rx_buf = buf,
};
struct spi_transfer xfer_buf = {
.len = len,
.rx_buf = data,
};
spi_message_init(&msg);
spi_message_add_tail(&xfer_head, &msg);
spi_message_add_tail(&xfer_buf, &msg);
adf7242_wait_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
if (packet_read) {
buf[0] = CMD_SPI_PKT_RD;
buf[1] = CMD_SPI_NOP;
buf[2] = 0; /* PHR */
} else {
buf[0] = CMD_SPI_PRAM_RD;
buf[1] = 0;
buf[2] = CMD_SPI_NOP;
}
status = spi_sync(lp->spi, &msg);
mutex_unlock(&lp->bmux);
return status;
}
static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data)
{
int status;
struct spi_message msg;
struct spi_transfer xfer = {
.len = 4,
.tx_buf = lp->buf_read_tx,
.rx_buf = lp->buf_read_rx,
};
adf7242_wait_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr);
lp->buf_read_tx[1] = addr;
lp->buf_read_tx[2] = CMD_SPI_NOP;
lp->buf_read_tx[3] = CMD_SPI_NOP;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
status = spi_sync(lp->spi, &msg);
if (msg.status)
status = msg.status;
if (!status)
*data = lp->buf_read_rx[3];
mutex_unlock(&lp->bmux);
dev_vdbg(&lp->spi->dev, "%s : REG 0x%X, VAL 0x%X\n", __func__,
addr, *data);
return status;
}
static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data)
{
int status;
adf7242_wait_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr);
lp->buf_reg_tx[1] = addr;
lp->buf_reg_tx[2] = data;
status = spi_write(lp->spi, lp->buf_reg_tx, 3);
mutex_unlock(&lp->bmux);
dev_vdbg(&lp->spi->dev, "%s : REG 0x%X, VAL 0x%X\n",
__func__, addr, data);
return status;
}
static int adf7242_cmd(struct adf7242_local *lp, unsigned cmd)
{
int status;
dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd);
if (cmd != CMD_RC_PC_RESET_NO_WAIT)
adf7242_wait_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_cmd = cmd;
status = spi_write(lp->spi, &lp->buf_cmd, 1);
mutex_unlock(&lp->bmux);
return status;
}
static int adf7242_upload_firmware(struct adf7242_local *lp, u8 *data, u16 len)
{
struct spi_message msg;
struct spi_transfer xfer_buf = { };
int status, i, page = 0;
u8 *buf = lp->buf;
struct spi_transfer xfer_head = {
.len = 2,
.tx_buf = buf,
};
buf[0] = CMD_SPI_PRAM_WR;
buf[1] = 0;
spi_message_init(&msg);
spi_message_add_tail(&xfer_head, &msg);
spi_message_add_tail(&xfer_buf, &msg);
for (i = len; i >= 0; i -= PRAM_PAGESIZE) {
adf7242_write_reg(lp, REG_PRAMPG, page);
xfer_buf.len = (i >= PRAM_PAGESIZE) ? PRAM_PAGESIZE : i;
xfer_buf.tx_buf = &data[page * PRAM_PAGESIZE];
mutex_lock(&lp->bmux);
status = spi_sync(lp->spi, &msg);
mutex_unlock(&lp->bmux);
page++;
}
return status;
}
static int adf7242_verify_firmware(struct adf7242_local *lp,
const u8 *data, size_t len)
{
#ifdef DEBUG
int i, j;
unsigned int page;
u8 *buf = kmalloc(PRAM_PAGESIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (page = 0, i = len; i >= 0; i -= PRAM_PAGESIZE, page++) {
size_t nb = (i >= PRAM_PAGESIZE) ? PRAM_PAGESIZE : i;
adf7242_write_reg(lp, REG_PRAMPG, page);
adf7242_read_fbuf(lp, buf, nb, false);
for (j = 0; j < nb; j++) {
if (buf[j] != data[page * PRAM_PAGESIZE + j]) {
kfree(buf);
return -EIO;
}
}
}
kfree(buf);
#endif
return 0;
}
static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
{
struct adf7242_local *lp = hw->priv;
u8 pwr, bias_ctrl, dbias, tmp;
int db = mbm / 100;
dev_vdbg(&lp->spi->dev, "%s : Power %d dB\n", __func__, db);
if (db > 5 || db < -26)
return -EINVAL;
db = DIV_ROUND_CLOSEST(db + 29, 2);
if (db > 15) {
dbias = PA_DBIAS_HIGH_POWER;
bias_ctrl = PA_BIAS_HIGH_POWER;
} else {
dbias = PA_DBIAS_LOW_POWER;
bias_ctrl = PA_BIAS_LOW_POWER;
}
pwr = clamp_t(u8, db, 3, 15);
adf7242_read_reg(lp, REG_PA_CFG, &tmp);
tmp &= ~PA_BRIDGE_DBIAS(~0);
tmp |= PA_BRIDGE_DBIAS(dbias);
adf7242_write_reg(lp, REG_PA_CFG, tmp);
adf7242_read_reg(lp, REG_PA_BIAS, &tmp);
tmp &= ~PA_BIAS_CTRL(~0);
tmp |= PA_BIAS_CTRL(bias_ctrl);
adf7242_write_reg(lp, REG_PA_BIAS, tmp);
adf7242_read_reg(lp, REG_EXTPA_MSC, &tmp);
tmp &= ~PA_PWR(~0);
tmp |= PA_PWR(pwr);
return adf7242_write_reg(lp, REG_EXTPA_MSC, tmp);
}
static int adf7242_set_csma_params(struct ieee802154_hw *hw, u8 min_be,
u8 max_be, u8 retries)
{
struct adf7242_local *lp = hw->priv;
int ret;
dev_vdbg(&lp->spi->dev, "%s : min_be=%d max_be=%d retries=%d\n",
__func__, min_be, max_be, retries);
if (min_be > max_be || max_be > 8 || retries > 5)
return -EINVAL;
ret = adf7242_write_reg(lp, REG_AUTO_TX1,
MAX_FRAME_RETRIES(lp->max_frame_retries) |
MAX_CCA_RETRIES(retries));
if (ret)
return ret;
lp->max_cca_retries = retries;
lp->max_be = max_be;
lp->min_be = min_be;
return adf7242_write_reg(lp, REG_AUTO_TX2, CSMA_MAX_BE(max_be) |
CSMA_MIN_BE(min_be));
}
static int adf7242_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct adf7242_local *lp = hw->priv;
int ret = 0;
dev_vdbg(&lp->spi->dev, "%s : Retries = %d\n", __func__, retries);
if (retries < -1 || retries > 15)
return -EINVAL;
if (retries >= 0)
ret = adf7242_write_reg(lp, REG_AUTO_TX1,
MAX_FRAME_RETRIES(retries) |
MAX_CCA_RETRIES(lp->max_cca_retries));
lp->max_frame_retries = retries;
return ret;
}
static int adf7242_ed(struct ieee802154_hw *hw, u8 *level)
{
struct adf7242_local *lp = hw->priv;
*level = lp->rssi;
dev_vdbg(&lp->spi->dev, "%s :Exit level=%d\n",
__func__, *level);
return 0;
}
static int adf7242_start(struct ieee802154_hw *hw)
{
struct adf7242_local *lp = hw->priv;
adf7242_cmd(lp, CMD_RC_PHY_RDY);
adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
enable_irq(lp->spi->irq);
set_bit(FLAG_START, &lp->flags);
return adf7242_cmd(lp, CMD_RC_RX);
}
static void adf7242_stop(struct ieee802154_hw *hw)
{
struct adf7242_local *lp = hw->priv;
adf7242_cmd(lp, CMD_RC_IDLE);
clear_bit(FLAG_START, &lp->flags);
disable_irq(lp->spi->irq);
adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
}
static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
struct adf7242_local *lp = hw->priv;
unsigned long freq;
dev_dbg(&lp->spi->dev, "%s :Channel=%d\n", __func__, channel);
might_sleep();
WARN_ON(page != 0);
WARN_ON(channel < 11);
WARN_ON(channel > 26);
freq = (2405 + 5 * (channel - 11)) * 100;
adf7242_cmd(lp, CMD_RC_PHY_RDY);
adf7242_write_reg(lp, REG_CH_FREQ0, freq);
adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
return adf7242_cmd(lp, CMD_RC_RX);
}
static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed)
{
struct adf7242_local *lp = hw->priv;
u8 reg;
dev_dbg(&lp->spi->dev, "%s :Changed=0x%lX\n", __func__, changed);
might_sleep();
if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
u8 addr[8], i;
memcpy(addr, &filt->ieee_addr, 8);
for (i = 0; i < 8; i++)
adf7242_write_reg(lp, REG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 saddr = le16_to_cpu(filt->short_addr);
adf7242_write_reg(lp, REG_SHORT_ADDR_0, saddr);
adf7242_write_reg(lp, REG_SHORT_ADDR_1, saddr >> 8);
}
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan_id = le16_to_cpu(filt->pan_id);
adf7242_write_reg(lp, REG_PAN_ID0, pan_id);
adf7242_write_reg(lp, REG_PAN_ID1, pan_id >> 8);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
adf7242_read_reg(lp, REG_AUTO_CFG, &reg);
if (filt->pan_coord)
reg |= IS_PANCOORD;
else
reg &= ~IS_PANCOORD;
adf7242_write_reg(lp, REG_AUTO_CFG, reg);
}
return 0;
}
static int adf7242_set_promiscuous_mode(struct ieee802154_hw *hw, bool on)
{
struct adf7242_local *lp = hw->priv;
dev_dbg(&lp->spi->dev, "%s : mode %d\n", __func__, on);
lp->promiscuous = on;
if (on) {
adf7242_write_reg(lp, REG_AUTO_CFG, 0);
return adf7242_write_reg(lp, REG_FFILT_CFG,
ACCEPT_BEACON_FRAMES |
ACCEPT_DATA_FRAMES |
ACCEPT_MACCMD_FRAMES |
ACCEPT_ALL_ADDRESS |
ACCEPT_ACK_FRAMES |
ACCEPT_RESERVED_FRAMES);
} else {
adf7242_write_reg(lp, REG_FFILT_CFG,
ACCEPT_BEACON_FRAMES |
ACCEPT_DATA_FRAMES |
ACCEPT_MACCMD_FRAMES |
ACCEPT_RESERVED_FRAMES);
return adf7242_write_reg(lp, REG_AUTO_CFG, RX_AUTO_ACK_EN);
}
}
static int adf7242_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
struct adf7242_local *lp = hw->priv;
s8 level = clamp_t(s8, mbm / 100, S8_MIN, S8_MAX);
dev_dbg(&lp->spi->dev, "%s : level %d\n", __func__, level);
return adf7242_write_reg(lp, REG_CCA1, level);
}
static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
struct adf7242_local *lp = hw->priv;
int ret;
set_bit(FLAG_XMIT, &lp->flags);
reinit_completion(&lp->tx_complete);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
ret = adf7242_write_fbuf(lp, skb->data, skb->len);
if (ret)
goto err;
ret = adf7242_cmd(lp, CMD_RC_CSMACA);
if (ret)
goto err;
ret = wait_for_completion_interruptible_timeout(&lp->tx_complete,
HZ / 10);
if (ret < 0)
goto err;
if (ret == 0) {
dev_dbg(&lp->spi->dev, "Timeout waiting for TX interrupt\n");
ret = -ETIMEDOUT;
goto err;
}
if (lp->tx_stat != SUCCESS) {
dev_dbg(&lp->spi->dev,
"Error xmit: Retry count exceeded Status=0x%x\n",
lp->tx_stat);
ret = -ECOMM;
} else {
ret = 0;
}
err:
clear_bit(FLAG_XMIT, &lp->flags);
adf7242_cmd(lp, CMD_RC_RX);
return ret;
}
static int adf7242_rx(struct adf7242_local *lp)
{
struct sk_buff *skb;
size_t len;
int ret;
u8 lqi, len_u8, *data;
adf7242_read_reg(lp, 0, &len_u8);
len = len_u8;
if (!ieee802154_is_valid_psdu_len(len)) {
dev_dbg(&lp->spi->dev,
"corrupted frame received len %d\n", (int)len);
len = IEEE802154_MTU;
}
skb = dev_alloc_skb(len);
if (!skb) {
adf7242_cmd(lp, CMD_RC_RX);
return -ENOMEM;
}
data = skb_put(skb, len);
ret = adf7242_read_fbuf(lp, data, len, true);
if (ret < 0) {
kfree_skb(skb);
adf7242_cmd(lp, CMD_RC_RX);
return ret;
}
lqi = data[len - 2];
lp->rssi = data[len - 1];
adf7242_cmd(lp, CMD_RC_RX);
skb_trim(skb, len - 2); /* Don't put RSSI/LQI or CRC into the frame */
ieee802154_rx_irqsafe(lp->hw, skb, lqi);
dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n",
__func__, ret, (int)len, (int)lqi, lp->rssi);
return 0;
}
static struct ieee802154_ops adf7242_ops = {
.owner = THIS_MODULE,
.xmit_sync = adf7242_xmit,
.ed = adf7242_ed,
.set_channel = adf7242_channel,
.set_hw_addr_filt = adf7242_set_hw_addr_filt,
.start = adf7242_start,
.stop = adf7242_stop,
.set_csma_params = adf7242_set_csma_params,
.set_frame_retries = adf7242_set_frame_retries,
.set_txpower = adf7242_set_txpower,
.set_promiscuous_mode = adf7242_set_promiscuous_mode,
.set_cca_ed_level = adf7242_set_cca_ed_level,
};
static void adf7242_debug(u8 irq1)
{
#ifdef DEBUG
u8 stat;
adf7242_status(lp, &stat);
dev_dbg(&lp->spi->dev, "%s IRQ1 = %X:\n%s%s%s%s%s%s%s%s\n",
__func__, irq1,
irq1 & IRQ_CCA_COMPLETE ? "IRQ_CCA_COMPLETE\n" : "",
irq1 & IRQ_SFD_RX ? "IRQ_SFD_RX\n" : "",
irq1 & IRQ_SFD_TX ? "IRQ_SFD_TX\n" : "",
irq1 & IRQ_RX_PKT_RCVD ? "IRQ_RX_PKT_RCVD\n" : "",
irq1 & IRQ_TX_PKT_SENT ? "IRQ_TX_PKT_SENT\n" : "",
irq1 & IRQ_CSMA_CA ? "IRQ_CSMA_CA\n" : "",
irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s%s%s%s%s\n",
__func__, stat,
stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
(stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "",
(stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : "");
}
#endif
}
static irqreturn_t adf7242_isr(int irq, void *data)
{
struct adf7242_local *lp = data;
unsigned xmit;
u8 irq1;
adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
adf7242_write_reg(lp, REG_IRQ1_SRC1, irq1);
if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n",
__func__, irq1);
adf7242_debug(irq1);
xmit = test_bit(FLAG_XMIT, &lp->flags);
if (xmit && (irq1 & IRQ_CSMA_CA)) {
if (ADF7242_REPORT_CSMA_CA_STAT) {
u8 astat;
adf7242_read_reg(lp, REG_AUTO_STATUS, &astat);
astat &= AUTO_STATUS_MASK;
dev_dbg(&lp->spi->dev, "AUTO_STATUS = %X:\n%s%s%s%s\n",
astat,
astat == SUCCESS ? "SUCCESS" : "",
astat ==
SUCCESS_DATPEND ? "SUCCESS_DATPEND" : "",
astat == FAILURE_CSMACA ? "FAILURE_CSMACA" : "",
astat == FAILURE_NOACK ? "FAILURE_NOACK" : "");
/* save CSMA-CA completion status */
lp->tx_stat = astat;
} else {
lp->tx_stat = SUCCESS;
}
complete(&lp->tx_complete);
} else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) &&
(irq1 & IRQ_FRAME_VALID)) {
adf7242_rx(lp);
} else if (!xmit && test_bit(FLAG_START, &lp->flags)) {
/* Invalid packet received - drop it and restart */
dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n",
__func__, __LINE__, irq1);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
adf7242_cmd(lp, CMD_RC_RX);
} else {
/* This can only be xmit without IRQ, likely a RX packet.
* we get an TX IRQ shortly - do nothing or let the xmit
* timeout handle this
*/
dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n",
__func__, __LINE__, irq1, xmit);
complete(&lp->tx_complete);
}
return IRQ_HANDLED;
}
static int adf7242_soft_reset(struct adf7242_local *lp, int line)
{
dev_warn(&lp->spi->dev, "%s (line %d)\n", __func__, line);
if (test_bit(FLAG_START, &lp->flags))
disable_irq_nosync(lp->spi->irq);
adf7242_cmd(lp, CMD_RC_PC_RESET_NO_WAIT);
usleep_range(200, 250);
adf7242_write_reg(lp, REG_PKT_CFG, ADDON_EN | BIT(2));
adf7242_cmd(lp, CMD_RC_PHY_RDY);
adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous);
adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be,
lp->max_cca_retries);
adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
if (test_bit(FLAG_START, &lp->flags)) {
enable_irq(lp->spi->irq);
return adf7242_cmd(lp, CMD_RC_RX);
}
return 0;
}
static int adf7242_hw_init(struct adf7242_local *lp)
{
int ret;
const struct firmware *fw;
adf7242_cmd(lp, CMD_RC_RESET);
adf7242_cmd(lp, CMD_RC_IDLE);
/* get ADF7242 addon firmware
* build this driver as module
* and place under /lib/firmware/adf7242_firmware.bin
* or compile firmware into the kernel.
*/
ret = request_firmware(&fw, FIRMWARE, &lp->spi->dev);
if (ret) {
dev_err(&lp->spi->dev,
"request_firmware() failed with %d\n", ret);
return ret;
}
ret = adf7242_upload_firmware(lp, (u8 *)fw->data, fw->size);
if (ret) {
dev_err(&lp->spi->dev,
"upload firmware failed with %d\n", ret);
return ret;
}
ret = adf7242_verify_firmware(lp, (u8 *)fw->data, fw->size);
if (ret) {
dev_err(&lp->spi->dev,
"verify firmware failed with %d\n", ret);
return ret;
}
adf7242_cmd(lp, CMD_RC_PC_RESET);
release_firmware(fw);
adf7242_write_reg(lp, REG_FFILT_CFG,
ACCEPT_BEACON_FRAMES |
ACCEPT_DATA_FRAMES |
ACCEPT_MACCMD_FRAMES |
ACCEPT_RESERVED_FRAMES);
adf7242_write_reg(lp, REG_AUTO_CFG, RX_AUTO_ACK_EN);
adf7242_write_reg(lp, REG_PKT_CFG, ADDON_EN | BIT(2));
adf7242_write_reg(lp, REG_EXTPA_MSC, 0xF1);
adf7242_write_reg(lp, REG_RXFE_CFG, 0x1D);
adf7242_write_reg(lp, REG_IRQ1_EN0, 0);
adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA);
adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF);
adf7242_cmd(lp, CMD_RC_IDLE);
return 0;
}
static int adf7242_stats_show(struct seq_file *file, void *offset)
{
struct adf7242_local *lp = spi_get_drvdata(file->private);
u8 stat, irq1;
adf7242_status(lp, &stat);
adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
seq_printf(file, "IRQ1 = %X:\n%s%s%s%s%s%s%s%s\n", irq1,
irq1 & IRQ_CCA_COMPLETE ? "IRQ_CCA_COMPLETE\n" : "",
irq1 & IRQ_SFD_RX ? "IRQ_SFD_RX\n" : "",
irq1 & IRQ_SFD_TX ? "IRQ_SFD_TX\n" : "",
irq1 & IRQ_RX_PKT_RCVD ? "IRQ_RX_PKT_RCVD\n" : "",
irq1 & IRQ_TX_PKT_SENT ? "IRQ_TX_PKT_SENT\n" : "",
irq1 & IRQ_CSMA_CA ? "IRQ_CSMA_CA\n" : "",
irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
seq_printf(file, "STATUS = %X:\n%s\n%s%s%s%s%s\n", stat,
stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
(stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "",
(stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : "");
seq_printf(file, "RSSI = %d\n", lp->rssi);
return 0;
}
static int adf7242_debugfs_init(struct adf7242_local *lp)
{
char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
struct dentry *stats;
strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
if (IS_ERR_OR_NULL(lp->debugfs_root))
return PTR_ERR_OR_ZERO(lp->debugfs_root);
stats = debugfs_create_devm_seqfile(&lp->spi->dev, "status",
lp->debugfs_root,
adf7242_stats_show);
return PTR_ERR_OR_ZERO(stats);
return 0;
}
static const s32 adf7242_powers[] = {
500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
-800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
-1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
};
static const s32 adf7242_ed_levels[] = {
-9000, -8900, -8800, -8700, -8600, -8500, -8400, -8300, -8200, -8100,
-8000, -7900, -7800, -7700, -7600, -7500, -7400, -7300, -7200, -7100,
-7000, -6900, -6800, -6700, -6600, -6500, -6400, -6300, -6200, -6100,
-6000, -5900, -5800, -5700, -5600, -5500, -5400, -5300, -5200, -5100,
-5000, -4900, -4800, -4700, -4600, -4500, -4400, -4300, -4200, -4100,
-4000, -3900, -3800, -3700, -3600, -3500, -3400, -3200, -3100, -3000
};
static int adf7242_probe(struct spi_device *spi)
{
struct ieee802154_hw *hw;
struct adf7242_local *lp;
int ret, irq_type;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
return -EINVAL;
}
hw = ieee802154_alloc_hw(sizeof(*lp), &adf7242_ops);
if (!hw)
return -ENOMEM;
lp = hw->priv;
lp->hw = hw;
lp->spi = spi;
hw->priv = lp;
hw->parent = &spi->dev;
hw->extra_tx_headroom = 0;
/* We support only 2.4 Ghz */
hw->phy->supported.channels[0] = 0x7FFF800;
hw->flags = IEEE802154_HW_OMIT_CKSUM |
IEEE802154_HW_CSMA_PARAMS |
IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER |
WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY);
hw->phy->supported.cca_ed_levels = adf7242_ed_levels;
hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(adf7242_ed_levels);
hw->phy->cca.mode = NL802154_CCA_ENERGY;
hw->phy->supported.tx_powers = adf7242_powers;
hw->phy->supported.tx_powers_size = ARRAY_SIZE(adf7242_powers);
hw->phy->supported.min_minbe = 0;
hw->phy->supported.max_minbe = 8;
hw->phy->supported.min_maxbe = 3;
hw->phy->supported.max_maxbe = 8;
hw->phy->supported.min_frame_retries = 0;
hw->phy->supported.max_frame_retries = 15;
hw->phy->supported.min_csma_backoffs = 0;
hw->phy->supported.max_csma_backoffs = 5;
ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
mutex_init(&lp->bmux);
init_completion(&lp->tx_complete);
/* Setup Status Message */
lp->stat_xfer.len = 1;
lp->stat_xfer.tx_buf = &lp->buf_stat_tx;
lp->stat_xfer.rx_buf = &lp->buf_stat_rx;
lp->buf_stat_tx = CMD_SPI_NOP;
spi_message_init(&lp->stat_msg);
spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
spi_set_drvdata(spi, lp);
ret = adf7242_hw_init(lp);
if (ret)
goto err_hw_init;
irq_type = irq_get_trigger_type(spi->irq);
if (!irq_type)
irq_type = IRQF_TRIGGER_HIGH;
ret = devm_request_threaded_irq(&spi->dev, spi->irq, NULL, adf7242_isr,
irq_type | IRQF_ONESHOT,
dev_name(&spi->dev), lp);
if (ret)
goto err_hw_init;
disable_irq(spi->irq);
ret = ieee802154_register_hw(lp->hw);
if (ret)
goto err_hw_init;
dev_set_drvdata(&spi->dev, lp);
adf7242_debugfs_init(lp);
dev_info(&spi->dev, "mac802154 IRQ-%d registered\n", spi->irq);
return ret;
err_hw_init:
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
return ret;
}
static int adf7242_remove(struct spi_device *spi)
{
struct adf7242_local *lp = spi_get_drvdata(spi);
if (!IS_ERR_OR_NULL(lp->debugfs_root))
debugfs_remove_recursive(lp->debugfs_root);
ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
return 0;
}
static const struct of_device_id adf7242_of_match[] = {
{ .compatible = "adi,adf7242", },
{ },
};
MODULE_DEVICE_TABLE(of, adf7242_of_match);
static const struct spi_device_id adf7242_device_id[] = {
{ .name = "adf7242", },
{ },
};
MODULE_DEVICE_TABLE(spi, adf7242_device_id);
static struct spi_driver adf7242_driver = {
.id_table = adf7242_device_id,
.driver = {
.of_match_table = of_match_ptr(adf7242_of_match),
.name = "adf7242",
.owner = THIS_MODULE,
},
.probe = adf7242_probe,
.remove = adf7242_remove,
};
module_spi_driver(adf7242_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADF7242 IEEE802.15.4 Transceiver Driver");
MODULE_LICENSE("GPL");
......@@ -310,7 +310,6 @@ static void atusb_free_urbs(struct atusb *atusb)
urb = usb_get_from_anchor(&atusb->idle_urbs);
if (!urb)
break;
if (urb->context)
kfree_skb(urb->context);
usb_free_urb(urb);
}
......
......@@ -53,6 +53,8 @@
#ifndef __6LOWPAN_H__
#define __6LOWPAN_H__
#include <linux/debugfs.h>
#include <net/ipv6.h>
#include <net/net_namespace.h>
......@@ -98,6 +100,7 @@ enum lowpan_lltypes {
struct lowpan_priv {
enum lowpan_lltypes lltype;
struct dentry *iface_debugfs;
/* must be last */
u8 priv[0] __aligned(sizeof(void *));
......@@ -185,7 +188,12 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
*hc_ptr += len;
}
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
int lowpan_register_netdevice(struct net_device *dev,
enum lowpan_lltypes lltype);
int lowpan_register_netdev(struct net_device *dev,
enum lowpan_lltypes lltype);
void lowpan_unregister_netdevice(struct net_device *dev);
void lowpan_unregister_netdev(struct net_device *dev);
/**
* lowpan_header_decompress - replace 6LoWPAN header with IPv6 header
......
......@@ -239,7 +239,6 @@ enum {
HCI_LE_ENABLED,
HCI_ADVERTISING,
HCI_ADVERTISING_CONNECTABLE,
HCI_ADVERTISING_INSTANCE,
HCI_CONNECTABLE,
HCI_DISCOVERABLE,
HCI_LIMITED_DISCOVERABLE,
......
......@@ -329,6 +329,9 @@ struct hci_dev {
struct work_struct discov_update;
struct work_struct bg_scan_update;
struct work_struct scan_update;
struct work_struct connectable_update;
struct work_struct discoverable_update;
struct delayed_work le_scan_disable;
struct delayed_work le_scan_restart;
......@@ -1432,10 +1435,8 @@ int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
int mgmt_powered(struct hci_dev *hdev, u8 powered);
int mgmt_update_adv_data(struct hci_dev *hdev);
void mgmt_discoverable_timeout(struct hci_dev *hdev);
void mgmt_adv_timeout_expired(struct hci_dev *hdev);
void mgmt_power_on(struct hci_dev *hdev, int err);
void __mgmt_power_off(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
......@@ -1490,8 +1491,15 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 bdaddr_type, u8 store_hint, u16 min_interval,
u16 max_interval, u16 latency, u16 timeout);
void mgmt_reenable_advertising(struct hci_dev *hdev);
void mgmt_smp_complete(struct hci_conn *conn, bool complete);
bool mgmt_get_connectable(struct hci_dev *hdev);
void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status);
void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status);
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
......
......@@ -401,6 +401,21 @@ static inline void ipv6_addr_prefix(struct in6_addr *pfx,
pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b);
}
static inline void ipv6_addr_prefix_copy(struct in6_addr *addr,
const struct in6_addr *pfx,
int plen)
{
/* caller must guarantee 0 <= plen <= 128 */
int o = plen >> 3,
b = plen & 0x7;
memcpy(addr->s6_addr, pfx, o);
if (b != 0) {
addr->s6_addr[o] &= ~(0xff00 >> b);
addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b));
}
}
static inline void __ipv6_addr_set_half(__be32 *addr,
__be32 wh, __be32 wl)
{
......
#ifndef __6LOWPAN_I_H
#define __6LOWPAN_I_H
#include <linux/netdevice.h>
#ifdef CONFIG_6LOWPAN_DEBUGFS
int lowpan_dev_debugfs_init(struct net_device *dev);
void lowpan_dev_debugfs_exit(struct net_device *dev);
int __init lowpan_debugfs_init(void);
void lowpan_debugfs_exit(void);
#else
static inline int lowpan_dev_debugfs_init(struct net_device *dev)
{
return 0;
}
static inline void lowpan_dev_debugfs_exit(struct net_device *dev) { }
static inline int __init lowpan_debugfs_init(void)
{
return 0;
}
static inline void lowpan_debugfs_exit(void) { }
#endif /* CONFIG_6LOWPAN_DEBUGFS */
#endif /* __6LOWPAN_I_H */
......@@ -5,12 +5,21 @@ menuconfig 6LOWPAN
This enables IPv6 over Low power Wireless Personal Area Network -
"6LoWPAN" which is supported by IEEE 802.15.4 or Bluetooth stacks.
config 6LOWPAN_DEBUGFS
bool "6LoWPAN debugfs support"
depends on 6LOWPAN
depends on DEBUG_FS
---help---
This enables 6LoWPAN debugfs support. For example to manipulate
IPHC context information at runtime.
menuconfig 6LOWPAN_NHC
tristate "Next Header Compression Support"
tristate "Next Header and Generic Header Compression Support"
depends on 6LOWPAN
default y
---help---
Support for next header compression.
Support for next header and generic header compression defined in
RFC6282 and RFC7400.
if 6LOWPAN_NHC
......@@ -58,4 +67,38 @@ config 6LOWPAN_NHC_UDP
---help---
6LoWPAN IPv6 UDP Header compression according to RFC6282.
config 6LOWPAN_GHC_EXT_HDR_HOP
tristate "GHC Hop-by-Hop Options Header Support"
---help---
6LoWPAN IPv6 Hop-by-Hop option generic header compression according
to RFC7400.
config 6LOWPAN_GHC_UDP
tristate "GHC UDP Support"
---help---
6LoWPAN IPv6 UDP generic header compression according to RFC7400.
config 6LOWPAN_GHC_ICMPV6
tristate "GHC ICMPv6 Support"
---help---
6LoWPAN IPv6 ICMPv6 generic header compression according to RFC7400.
config 6LOWPAN_GHC_EXT_HDR_DEST
tristate "GHC Destination Options Header Support"
---help---
6LoWPAN IPv6 destination option generic header compression according
to RFC7400.
config 6LOWPAN_GHC_EXT_HDR_FRAG
tristate "GHC Fragmentation Options Header Support"
---help---
6LoWPAN IPv6 fragmentation option generic header compression
according to RFC7400.
config 6LOWPAN_GHC_EXT_HDR_ROUTE
tristate "GHC Routing Options Header Support"
---help---
6LoWPAN IPv6 routing option generic header compression according
to RFC7400.
endif
obj-$(CONFIG_6LOWPAN) += 6lowpan.o
6lowpan-y := core.o iphc.o nhc.o
6lowpan-$(CONFIG_6LOWPAN_DEBUGFS) += debugfs.o
#rfc6282 nhcs
obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
......@@ -10,3 +11,11 @@ obj-$(CONFIG_6LOWPAN_NHC_IPV6) += nhc_ipv6.o
obj-$(CONFIG_6LOWPAN_NHC_MOBILITY) += nhc_mobility.o
obj-$(CONFIG_6LOWPAN_NHC_ROUTING) += nhc_routing.o
obj-$(CONFIG_6LOWPAN_NHC_UDP) += nhc_udp.o
#rfc7400 ghcs
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_HOP) += nhc_ghc_ext_hop.o
obj-$(CONFIG_6LOWPAN_GHC_UDP) += nhc_ghc_udp.o
obj-$(CONFIG_6LOWPAN_GHC_ICMPV6) += nhc_ghc_icmpv6.o
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_DEST) += nhc_ghc_ext_dest.o
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG) += nhc_ghc_ext_frag.o
obj-$(CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE) += nhc_ghc_ext_route.o
......@@ -15,19 +15,67 @@
#include <net/6lowpan.h>
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
#include "6lowpan_i.h"
int lowpan_register_netdevice(struct net_device *dev,
enum lowpan_lltypes lltype)
{
int ret;
dev->addr_len = EUI64_ADDR_LEN;
dev->type = ARPHRD_6LOWPAN;
dev->mtu = IPV6_MIN_MTU;
dev->priv_flags |= IFF_NO_QUEUE;
lowpan_priv(dev)->lltype = lltype;
ret = lowpan_dev_debugfs_init(dev);
if (ret < 0)
return ret;
ret = register_netdevice(dev);
if (ret < 0)
lowpan_dev_debugfs_exit(dev);
return ret;
}
EXPORT_SYMBOL(lowpan_netdev_setup);
EXPORT_SYMBOL(lowpan_register_netdevice);
int lowpan_register_netdev(struct net_device *dev,
enum lowpan_lltypes lltype)
{
int ret;
rtnl_lock();
ret = lowpan_register_netdevice(dev, lltype);
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(lowpan_register_netdev);
void lowpan_unregister_netdevice(struct net_device *dev)
{
unregister_netdevice(dev);
lowpan_dev_debugfs_exit(dev);
}
EXPORT_SYMBOL(lowpan_unregister_netdevice);
void lowpan_unregister_netdev(struct net_device *dev)
{
rtnl_lock();
lowpan_unregister_netdevice(dev);
rtnl_unlock();
}
EXPORT_SYMBOL(lowpan_unregister_netdev);
static int __init lowpan_module_init(void)
{
int ret;
ret = lowpan_debugfs_init();
if (ret < 0)
return ret;
request_module_nowait("ipv6");
request_module_nowait("nhc_dest");
......@@ -40,6 +88,13 @@ static int __init lowpan_module_init(void)
return 0;
}
static void __exit lowpan_module_exit(void)
{
lowpan_debugfs_exit();
}
module_init(lowpan_module_init);
module_exit(lowpan_module_exit);
MODULE_LICENSE("GPL");
/* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Authors:
* (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
* Copyright (c) 2015 Nordic Semiconductor. All Rights Reserved.
*/
#include <net/6lowpan.h>
#include "6lowpan_i.h"
static struct dentry *lowpan_debugfs;
int lowpan_dev_debugfs_init(struct net_device *dev)
{
struct lowpan_priv *lpriv = lowpan_priv(dev);
/* creating the root */
lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
if (!lpriv->iface_debugfs)
goto fail;
return 0;
fail:
return -EINVAL;
}
void lowpan_dev_debugfs_exit(struct net_device *dev)
{
debugfs_remove_recursive(lowpan_priv(dev)->iface_debugfs);
}
int __init lowpan_debugfs_init(void)
{
lowpan_debugfs = debugfs_create_dir("6lowpan", NULL);
if (!lowpan_debugfs)
return -EINVAL;
return 0;
}
void lowpan_debugfs_exit(void)
{
debugfs_remove_recursive(lowpan_debugfs);
}
/*
* 6LoWPAN Extension Header compression according to RFC7400
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_DEST_IDLEN 1
#define LOWPAN_GHC_EXT_DEST_ID_0 0xb6
#define LOWPAN_GHC_EXT_DEST_MASK_0 0xfe
static void dest_ghid_setup(struct lowpan_nhc *nhc)
{
nhc->id[0] = LOWPAN_GHC_EXT_DEST_ID_0;
nhc->idmask[0] = LOWPAN_GHC_EXT_DEST_MASK_0;
}
LOWPAN_NHC(ghc_ext_dest, "RFC7400 Destination Extension Header", NEXTHDR_DEST,
0, dest_ghid_setup, LOWPAN_GHC_EXT_DEST_IDLEN, NULL, NULL);
module_lowpan_nhc(ghc_ext_dest);
MODULE_DESCRIPTION("6LoWPAN generic header destination extension compression");
MODULE_LICENSE("GPL");
/*
* 6LoWPAN Extension Header compression according to RFC7400
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_FRAG_IDLEN 1
#define LOWPAN_GHC_EXT_FRAG_ID_0 0xb4
#define LOWPAN_GHC_EXT_FRAG_MASK_0 0xfe
static void frag_ghid_setup(struct lowpan_nhc *nhc)
{
nhc->id[0] = LOWPAN_GHC_EXT_FRAG_ID_0;
nhc->idmask[0] = LOWPAN_GHC_EXT_FRAG_MASK_0;
}
LOWPAN_NHC(ghc_ext_frag, "RFC7400 Fragmentation Extension Header",
NEXTHDR_FRAGMENT, 0, frag_ghid_setup,
LOWPAN_GHC_EXT_FRAG_IDLEN, NULL, NULL);
module_lowpan_nhc(ghc_ext_frag);
MODULE_DESCRIPTION("6LoWPAN generic header fragmentation extension compression");
MODULE_LICENSE("GPL");
/*
* 6LoWPAN Extension Header compression according to RFC7400
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_HOP_IDLEN 1
#define LOWPAN_GHC_EXT_HOP_ID_0 0xb0
#define LOWPAN_GHC_EXT_HOP_MASK_0 0xfe
static void hop_ghid_setup(struct lowpan_nhc *nhc)
{
nhc->id[0] = LOWPAN_GHC_EXT_HOP_ID_0;
nhc->idmask[0] = LOWPAN_GHC_EXT_HOP_MASK_0;
}
LOWPAN_NHC(ghc_ext_hop, "RFC7400 Hop-by-Hop Extension Header", NEXTHDR_HOP, 0,
hop_ghid_setup, LOWPAN_GHC_EXT_HOP_IDLEN, NULL, NULL);
module_lowpan_nhc(ghc_ext_hop);
MODULE_DESCRIPTION("6LoWPAN generic header hop-by-hop extension compression");
MODULE_LICENSE("GPL");
/*
* 6LoWPAN Extension Header compression according to RFC7400
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "nhc.h"
#define LOWPAN_GHC_EXT_ROUTE_IDLEN 1
#define LOWPAN_GHC_EXT_ROUTE_ID_0 0xb2
#define LOWPAN_GHC_EXT_ROUTE_MASK_0 0xfe
static void route_ghid_setup(struct lowpan_nhc *nhc)
{
nhc->id[0] = LOWPAN_GHC_EXT_ROUTE_ID_0;
nhc->idmask[0] = LOWPAN_GHC_EXT_ROUTE_MASK_0;
}
LOWPAN_NHC(ghc_ext_route, "RFC7400 Routing Extension Header", NEXTHDR_ROUTING,
0, route_ghid_setup, LOWPAN_GHC_EXT_ROUTE_IDLEN, NULL, NULL);
module_lowpan_nhc(ghc_ext_route);
MODULE_DESCRIPTION("6LoWPAN generic header routing extension compression");
MODULE_LICENSE("GPL");
/*
* 6LoWPAN ICMPv6 compression according to RFC7400
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "nhc.h"
#define LOWPAN_GHC_ICMPV6_IDLEN 1
#define LOWPAN_GHC_ICMPV6_ID_0 0xdf
#define LOWPAN_GHC_ICMPV6_MASK_0 0xff
static void icmpv6_ghid_setup(struct lowpan_nhc *nhc)
{
nhc->id[0] = LOWPAN_GHC_ICMPV6_ID_0;
nhc->idmask[0] = LOWPAN_GHC_ICMPV6_MASK_0;
}
LOWPAN_NHC(ghc_icmpv6, "RFC7400 ICMPv6", NEXTHDR_ICMP, 0,
icmpv6_ghid_setup, LOWPAN_GHC_ICMPV6_IDLEN, NULL, NULL);
module_lowpan_nhc(ghc_icmpv6);
MODULE_DESCRIPTION("6LoWPAN generic header ICMPv6 compression");
MODULE_LICENSE("GPL");
/*
* 6LoWPAN UDP compression according to RFC7400
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "nhc.h"
#define LOWPAN_GHC_UDP_IDLEN 1
#define LOWPAN_GHC_UDP_ID_0 0xd0
#define LOWPAN_GHC_UDP_MASK_0 0xf8
static void udp_ghid_setup(struct lowpan_nhc *nhc)
{
nhc->id[0] = LOWPAN_GHC_UDP_ID_0;
nhc->idmask[0] = LOWPAN_GHC_UDP_MASK_0;
}
LOWPAN_NHC(ghc_udp, "RFC7400 UDP", NEXTHDR_UDP, 0,
udp_ghid_setup, LOWPAN_GHC_UDP_IDLEN, NULL, NULL);
module_lowpan_nhc(ghc_udp);
MODULE_DESCRIPTION("6LoWPAN generic header UDP compression");
MODULE_LICENSE("GPL");
......@@ -825,9 +825,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
spin_unlock(&devices_lock);
lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
err = register_netdev(netdev);
err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
if (err < 0) {
BT_INFO("register_netdev failed %d", err);
spin_lock(&devices_lock);
......@@ -890,7 +888,7 @@ static void delete_netdev(struct work_struct *work)
struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
delete_netdev);
unregister_netdev(entry->netdev);
lowpan_unregister_netdev(entry->netdev);
/* The entry pointer is deleted by the netdev destructor. */
}
......@@ -1348,7 +1346,7 @@ static void disconnect_devices(void)
ifdown(entry->netdev);
BT_DBG("Unregistering netdev %s %p",
entry->netdev->name, entry->netdev);
unregister_netdev(entry->netdev);
lowpan_unregister_netdev(entry->netdev);
kfree(entry);
}
}
......
......@@ -186,8 +186,8 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
release_sock(sk);
bt_accept_unlink(sk);
release_sock(sk);
continue;
}
......
......@@ -608,8 +608,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
s->msg.msg_flags = MSG_NOSIGNAL;
#ifdef CONFIG_BT_BNEP_MC_FILTER
/* Set default mc filter */
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
/* Set default mc filter to not filter out any mc addresses
* as defined in the BNEP specification (revision 0.95a)
* http://grouper.ieee.org/groups/802/15/Bluetooth/BNEP.pdf
*/
s->mc_filter = ~0LL;
#endif
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
......
......@@ -668,8 +668,16 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
conn->state = BT_CLOSED;
mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
status);
/* If the status indicates successful cancellation of
* the attempt (i.e. Unkown Connection Id) there's no point of
* notifying failure since we'll go back to keep trying to
* connect. The only exception is explicit connect requests
* where a timeout + cancel does indicate an actual failure.
*/
if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
(params && params->explicit_connect))
mgmt_connect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
hci_connect_cfm(conn, status);
......@@ -683,7 +691,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
/* Re-enable advertising in case this was a failed connection
* attempt as a peripheral.
*/
mgmt_reenable_advertising(hdev);
hci_req_reenable_advertising(hdev);
}
static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
......@@ -726,8 +734,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
if (hci_update_random_address(req, false, &own_addr_type))
return;
/* Set window to be the same value as the interval to enable
* continuous scanning.
*/
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
cp.scan_window = cpu_to_le16(hdev->le_scan_window);
cp.scan_window = cp.scan_interval;
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
cp.own_address_type = own_addr_type;
......
......@@ -1399,10 +1399,10 @@ static int hci_dev_do_open(struct hci_dev *hdev)
!hci_dev_test_flag(hdev, HCI_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_MGMT) &&
hdev->dev_type == HCI_BREDR) {
hci_dev_lock(hdev);
mgmt_powered(hdev, 1);
hci_dev_unlock(hdev);
ret = __hci_req_hci_power_on(hdev);
mgmt_power_on(hdev, ret);
}
} else {
/* Init failed, cleanup */
......@@ -1537,7 +1537,6 @@ int hci_dev_do_close(struct hci_dev *hdev)
flush_work(&hdev->rx_work);
if (hdev->discov_timeout > 0) {
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = 0;
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
......@@ -1549,11 +1548,6 @@ int hci_dev_do_close(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_MGMT))
cancel_delayed_work_sync(&hdev->rpa_expired);
if (hdev->adv_instance_timeout) {
cancel_delayed_work_sync(&hdev->adv_instance_expire);
hdev->adv_instance_timeout = 0;
}
/* Avoid potential lockdep warnings from the *_flush() calls by
* ensuring the workqueue is empty up front.
*/
......@@ -1565,8 +1559,9 @@ int hci_dev_do_close(struct hci_dev *hdev)
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
if (!auto_off && hdev->dev_type == HCI_BREDR)
mgmt_powered(hdev, 0);
if (!auto_off && hdev->dev_type == HCI_BREDR &&
hci_dev_test_flag(hdev, HCI_MGMT))
__mgmt_power_off(hdev);
hci_inquiry_cache_flush(hdev);
hci_pend_le_actions_clear(hdev);
......@@ -1774,7 +1769,7 @@ static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
mgmt_update_adv_data(hdev);
hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
mgmt_new_settings(hdev);
}
......@@ -2019,6 +2014,16 @@ static void hci_power_on(struct work_struct *work)
BT_DBG("%s", hdev->name);
if (test_bit(HCI_UP, &hdev->flags) &&
hci_dev_test_flag(hdev, HCI_MGMT) &&
hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
hci_req_sync_lock(hdev);
err = __hci_req_hci_power_on(hdev);
hci_req_sync_unlock(hdev);
mgmt_power_on(hdev, err);
return;
}
err = hci_dev_do_open(hdev);
if (err < 0) {
hci_dev_lock(hdev);
......@@ -2101,28 +2106,6 @@ static void hci_error_reset(struct work_struct *work)
hci_dev_do_open(hdev);
}
static void hci_discov_off(struct work_struct *work)
{
struct hci_dev *hdev;
hdev = container_of(work, struct hci_dev, discov_off.work);
BT_DBG("%s", hdev->name);
mgmt_discoverable_timeout(hdev);
}
static void hci_adv_timeout_expire(struct work_struct *work)
{
struct hci_dev *hdev;
hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
BT_DBG("%s", hdev->name);
mgmt_adv_timeout_expired(hdev);
}
void hci_uuids_clear(struct hci_dev *hdev)
{
struct bt_uuid *uuid, *tmp;
......@@ -2627,10 +2610,13 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
BT_DBG("%s removing %dMR", hdev->name, instance);
if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
if (hdev->cur_adv_instance == instance) {
if (hdev->adv_instance_timeout) {
cancel_delayed_work(&hdev->adv_instance_expire);
hdev->adv_instance_timeout = 0;
}
hdev->cur_adv_instance = 0x00;
}
list_del(&adv_instance->list);
kfree(adv_instance);
......@@ -2656,6 +2642,7 @@ void hci_adv_instances_clear(struct hci_dev *hdev)
}
hdev->adv_instance_cnt = 0;
hdev->cur_adv_instance = 0x00;
}
/* This function requires the caller holds hdev->lock */
......@@ -3002,8 +2989,6 @@ struct hci_dev *hci_alloc_dev(void)
INIT_WORK(&hdev->error_reset, hci_error_reset);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
......
......@@ -1183,7 +1183,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
hdev->discovery.state == DISCOVERY_FINDING)
mgmt_reenable_advertising(hdev);
hci_req_reenable_advertising(hdev);
break;
......@@ -2176,7 +2176,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
sizeof(cp), &cp);
hci_update_page_scan(hdev);
hci_req_update_scan(hdev);
}
/* Set packet type for incoming connection */
......@@ -2362,7 +2362,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
hci_remove_link_key(hdev, &conn->dst);
hci_update_page_scan(hdev);
hci_req_update_scan(hdev);
}
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
......@@ -2401,7 +2401,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
* is timed out due to Directed Advertising."
*/
if (type == LE_LINK)
mgmt_reenable_advertising(hdev);
hci_req_reenable_advertising(hdev);
unlock:
hci_dev_unlock(hdev);
......
......@@ -21,8 +21,11 @@
SOFTWARE IS DISCLAIMED.
*/
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "smp.h"
#include "hci_request.h"
......@@ -346,6 +349,311 @@ void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
hci_req_add_ev(req, opcode, plen, param, 0);
}
void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_write_page_scan_activity acp;
u8 type;
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
if (enable) {
type = PAGE_SCAN_TYPE_INTERLACED;
/* 160 msec page scan interval */
acp.interval = cpu_to_le16(0x0100);
} else {
type = PAGE_SCAN_TYPE_STANDARD; /* default */
/* default 1.28 sec page scan */
acp.interval = cpu_to_le16(0x0800);
}
acp.window = cpu_to_le16(0x0012);
if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
__cpu_to_le16(hdev->page_scan_window) != acp.window)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
sizeof(acp), &acp);
if (hdev->page_scan_type != type)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
}
/* This function controls the background scanning based on hdev->pend_le_conns
* list. If there are pending LE connection we start the background scanning,
* otherwise we stop it.
*
* This function requires the caller holds hdev->lock.
*/
static void __hci_update_background_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
if (!test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG) ||
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return;
/* No point in doing scanning if LE support hasn't been enabled */
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
/* If discovery is active don't interfere with it */
if (hdev->discovery.state != DISCOVERY_STOPPED)
return;
/* Reset RSSI and UUID filters when starting background scanning
* since these filters are meant for service discovery only.
*
* The Start Discovery and Start Service Discovery operations
* ensure to set proper values for RSSI threshold and UUID
* filter list. So it is safe to just reset them here.
*/
hci_discovery_filter_clear(hdev);
if (list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports)) {
/* If there is no pending LE connections or devices
* to be scanned for, we should stop the background
* scanning.
*/
/* If controller is not scanning we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
hci_req_add_le_scan_disable(req);
BT_DBG("%s stopping background scanning", hdev->name);
} else {
/* If there is at least one pending LE connection, we should
* keep the background scan running.
*/
/* If controller is connecting, we should not start scanning
* since some controllers are not able to scan and connect at
* the same time.
*/
if (hci_lookup_le_connect(hdev))
return;
/* If controller is currently scanning, we stop it to ensure we
* don't miss any advertising (due to duplicates filter).
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req);
hci_req_add_le_passive_scan(req);
BT_DBG("%s starting background scanning", hdev->name);
}
}
void __hci_req_update_name(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_write_local_name cp;
memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
}
#define PNP_INFO_SVCLASS_ID 0x1200
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
u8 *ptr = data, *uuids_start = NULL;
struct bt_uuid *uuid;
if (len < 4)
return ptr;
list_for_each_entry(uuid, &hdev->uuids, list) {
u16 uuid16;
if (uuid->size != 16)
continue;
uuid16 = get_unaligned_le16(&uuid->uuid[12]);
if (uuid16 < 0x1100)
continue;
if (uuid16 == PNP_INFO_SVCLASS_ID)
continue;
if (!uuids_start) {
uuids_start = ptr;
uuids_start[0] = 1;
uuids_start[1] = EIR_UUID16_ALL;
ptr += 2;
}
/* Stop if not enough space to put next UUID */
if ((ptr - data) + sizeof(u16) > len) {
uuids_start[1] = EIR_UUID16_SOME;
break;
}
*ptr++ = (uuid16 & 0x00ff);
*ptr++ = (uuid16 & 0xff00) >> 8;
uuids_start[0] += sizeof(uuid16);
}
return ptr;
}
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
u8 *ptr = data, *uuids_start = NULL;
struct bt_uuid *uuid;
if (len < 6)
return ptr;
list_for_each_entry(uuid, &hdev->uuids, list) {
if (uuid->size != 32)
continue;
if (!uuids_start) {
uuids_start = ptr;
uuids_start[0] = 1;
uuids_start[1] = EIR_UUID32_ALL;
ptr += 2;
}
/* Stop if not enough space to put next UUID */
if ((ptr - data) + sizeof(u32) > len) {
uuids_start[1] = EIR_UUID32_SOME;
break;
}
memcpy(ptr, &uuid->uuid[12], sizeof(u32));
ptr += sizeof(u32);
uuids_start[0] += sizeof(u32);
}
return ptr;
}
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
u8 *ptr = data, *uuids_start = NULL;
struct bt_uuid *uuid;
if (len < 18)
return ptr;
list_for_each_entry(uuid, &hdev->uuids, list) {
if (uuid->size != 128)
continue;
if (!uuids_start) {
uuids_start = ptr;
uuids_start[0] = 1;
uuids_start[1] = EIR_UUID128_ALL;
ptr += 2;
}
/* Stop if not enough space to put next UUID */
if ((ptr - data) + 16 > len) {
uuids_start[1] = EIR_UUID128_SOME;
break;
}
memcpy(ptr, uuid->uuid, 16);
ptr += 16;
uuids_start[0] += 16;
}
return ptr;
}
static void create_eir(struct hci_dev *hdev, u8 *data)
{
u8 *ptr = data;
size_t name_len;
name_len = strlen(hdev->dev_name);
if (name_len > 0) {
/* EIR Data type */
if (name_len > 48) {
name_len = 48;
ptr[1] = EIR_NAME_SHORT;
} else
ptr[1] = EIR_NAME_COMPLETE;
/* EIR Data length */
ptr[0] = name_len + 1;
memcpy(ptr + 2, hdev->dev_name, name_len);
ptr += (name_len + 2);
}
if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
ptr[0] = 2;
ptr[1] = EIR_TX_POWER;
ptr[2] = (u8) hdev->inq_tx_power;
ptr += 3;
}
if (hdev->devid_source > 0) {
ptr[0] = 9;
ptr[1] = EIR_DEVICE_ID;
put_unaligned_le16(hdev->devid_source, ptr + 2);
put_unaligned_le16(hdev->devid_vendor, ptr + 4);
put_unaligned_le16(hdev->devid_product, ptr + 6);
put_unaligned_le16(hdev->devid_version, ptr + 8);
ptr += 10;
}
ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
}
void __hci_req_update_eir(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_write_eir cp;
if (!hdev_is_powered(hdev))
return;
if (!lmp_ext_inq_capable(hdev))
return;
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
return;
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
return;
memset(&cp, 0, sizeof(cp));
create_eir(hdev, cp.data);
if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
return;
memcpy(hdev->eir, cp.data, sizeof(cp.data));
hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
}
void hci_req_add_le_scan_disable(struct hci_request *req)
{
struct hci_cp_le_set_scan_enable cp;
......@@ -455,56 +763,533 @@ static u8 update_white_list(struct hci_request *req)
return 0x01;
}
void hci_req_add_le_passive_scan(struct hci_request *req)
void hci_req_add_le_passive_scan(struct hci_request *req)
{
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
u8 filter_policy;
/* Set require_privacy to false since no SCAN_REQ are send
* during passive scanning. Not using an non-resolvable address
* here is important so that peer devices using direct
* advertising with our address will be correctly reported
* by the controller.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;
/* Adding or removing entries from the white list must
* happen before enabling scanning. The controller does
* not allow white list modification while scanning.
*/
filter_policy = update_white_list(req);
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
* for handling directed advertising.
*
* So instead of using filter polices 0x00 (no whitelist)
* and 0x01 (whitelist enabled) use the new filter policies
* 0x02 (no whitelist) and 0x03 (whitelist enabled).
*/
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02;
memset(&param_cp, 0, sizeof(param_cp));
param_cp.type = LE_SCAN_PASSIVE;
param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
param_cp.window = cpu_to_le16(hdev->le_scan_window);
param_cp.own_address_type = own_addr_type;
param_cp.filter_policy = filter_policy;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
memset(&enable_cp, 0, sizeof(enable_cp));
enable_cp.enable = LE_SCAN_ENABLE;
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp);
}
static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
{
u8 instance = hdev->cur_adv_instance;
struct adv_info *adv_instance;
/* Ignore instance 0 */
if (instance == 0x00)
return 0;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
/* TODO: Take into account the "appearance" and "local-name" flags here.
* These are currently being ignored as they are not supported.
*/
return adv_instance->scan_rsp_len;
}
void __hci_req_disable_advertising(struct hci_request *req)
{
u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
{
u32 flags;
struct adv_info *adv_instance;
if (instance == 0x00) {
/* Instance 0 always manages the "Tx Power" and "Flags"
* fields
*/
flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
* corresponds to the "connectable" instance flag.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
flags |= MGMT_ADV_FLAG_CONNECTABLE;
return flags;
}
adv_instance = hci_find_adv_instance(hdev, instance);
/* Return 0 when we got an invalid instance identifier. */
if (!adv_instance)
return 0;
return adv_instance->flags;
}
void __hci_req_enable_advertising(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type, enable = 0x01;
bool connectable;
u32 flags;
if (hci_conn_num(hdev, LE_LINK) > 0)
return;
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_disable_advertising(req);
/* Clear the HCI_LE_ADV bit temporarily so that the
* hci_update_random_address knows that it's safe to go ahead
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
/* If the "connectable" instance flag was not set, then choose between
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
*/
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
mgmt_get_connectable(hdev);
/* Set require_privacy to true only when non-connectable
* advertising is used. In that case it is fine to use a
* non-resolvable private address.
*/
if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
return;
memset(&cp, 0, sizeof(cp));
cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
if (connectable)
cp.type = LE_ADV_IND;
else if (get_cur_adv_instance_scan_rsp_len(hdev))
cp.type = LE_ADV_SCAN_IND;
else
cp.type = LE_ADV_NONCONN_IND;
cp.own_address_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
u8 ad_len = 0;
size_t name_len;
name_len = strlen(hdev->dev_name);
if (name_len > 0) {
size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
if (name_len > max_len) {
name_len = max_len;
ptr[1] = EIR_NAME_SHORT;
} else
ptr[1] = EIR_NAME_COMPLETE;
ptr[0] = name_len + 1;
memcpy(ptr + 2, hdev->dev_name, name_len);
ad_len += (name_len + 2);
ptr += (name_len + 2);
}
return ad_len;
}
static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
u8 *ptr)
{
struct adv_info *adv_instance;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
/* TODO: Set the appropriate entries based on advertising instance flags
* here once flags other than 0 are supported.
*/
memcpy(ptr, adv_instance->scan_rsp_data,
adv_instance->scan_rsp_len);
return adv_instance->scan_rsp_len;
}
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_scan_rsp_data cp;
u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
memset(&cp, 0, sizeof(cp));
if (instance)
len = create_instance_scan_rsp_data(hdev, instance, cp.data);
else
len = create_default_scan_rsp_data(hdev, cp.data);
if (hdev->scan_rsp_data_len == len &&
!memcmp(cp.data, hdev->scan_rsp_data, len))
return;
memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
hdev->scan_rsp_data_len = len;
cp.length = len;
hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}
static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
{
struct adv_info *adv_instance = NULL;
u8 ad_len = 0, flags = 0;
u32 instance_flags;
/* Return 0 when the current instance identifier is invalid. */
if (instance) {
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
}
instance_flags = get_adv_instance_flags(hdev, instance);
/* The Add Advertising command allows userspace to set both the general
* and limited discoverable flags.
*/
if (instance_flags & MGMT_ADV_FLAG_DISCOV)
flags |= LE_AD_GENERAL;
if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
flags |= LE_AD_LIMITED;
if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
/* If a discovery flag wasn't provided, simply use the global
* settings.
*/
if (!flags)
flags |= mgmt_get_adv_discov_flags(hdev);
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
flags |= LE_AD_NO_BREDR;
/* If flags would still be empty, then there is no need to
* include the "Flags" AD field".
*/
if (flags) {
ptr[0] = 0x02;
ptr[1] = EIR_FLAGS;
ptr[2] = flags;
ad_len += 3;
ptr += 3;
}
}
if (adv_instance) {
memcpy(ptr, adv_instance->adv_data,
adv_instance->adv_data_len);
ad_len += adv_instance->adv_data_len;
ptr += adv_instance->adv_data_len;
}
/* Provide Tx Power only if we can provide a valid value for it */
if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
(instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
ptr[0] = 0x02;
ptr[1] = EIR_TX_POWER;
ptr[2] = (u8)hdev->adv_tx_power;
ad_len += 3;
ptr += 3;
}
return ad_len;
}
void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_data cp;
u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
memset(&cp, 0, sizeof(cp));
len = create_instance_adv_data(hdev, instance, cp.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return;
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
hdev->adv_data_len = len;
cp.length = len;
hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}
int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
{
struct hci_request req;
hci_req_init(&req, hdev);
__hci_req_update_adv_data(&req, instance);
return hci_req_run(&req, NULL);
}
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
BT_DBG("%s status %u", hdev->name, status);
}
void hci_req_reenable_advertising(struct hci_dev *hdev)
{
struct hci_request req;
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
list_empty(&hdev->adv_instances))
return;
hci_req_init(&req, hdev);
if (hdev->cur_adv_instance) {
__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
true);
} else {
__hci_req_update_adv_data(&req, 0x00);
__hci_req_update_scan_rsp_data(&req, 0x00);
__hci_req_enable_advertising(&req);
}
hci_req_run(&req, adv_enable_complete);
}
static void adv_timeout_expire(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
adv_instance_expire.work);
struct hci_request req;
u8 instance;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
hdev->adv_instance_timeout = 0;
instance = hdev->cur_adv_instance;
if (instance == 0x00)
goto unlock;
hci_req_init(&req, hdev);
hci_req_clear_adv_instance(hdev, &req, instance, false);
if (list_empty(&hdev->adv_instances))
__hci_req_disable_advertising(&req);
hci_req_run(&req, NULL);
unlock:
hci_dev_unlock(hdev);
}
int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
bool force)
{
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
u8 filter_policy;
struct adv_info *adv_instance = NULL;
u16 timeout;
/* Set require_privacy to false since no SCAN_REQ are send
* during passive scanning. Not using an non-resolvable address
* here is important so that peer devices using direct
* advertising with our address will be correctly reported
* by the controller.
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
list_empty(&hdev->adv_instances))
return -EPERM;
if (hdev->adv_instance_timeout)
return -EBUSY;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return -ENOENT;
/* A zero timeout means unlimited advertising. As long as there is
* only one instance, duration should be ignored. We still set a timeout
* in case further instances are being added later on.
*
* If the remaining lifetime of the instance is more than the duration
* then the timeout corresponds to the duration, otherwise it will be
* reduced to the remaining instance lifetime.
*/
if (hci_update_random_address(req, false, &own_addr_type))
return;
if (adv_instance->timeout == 0 ||
adv_instance->duration <= adv_instance->remaining_time)
timeout = adv_instance->duration;
else
timeout = adv_instance->remaining_time;
/* Adding or removing entries from the white list must
* happen before enabling scanning. The controller does
* not allow white list modification while scanning.
/* The remaining time is being reduced unless the instance is being
* advertised without time limit.
*/
filter_policy = update_white_list(req);
if (adv_instance->timeout)
adv_instance->remaining_time =
adv_instance->remaining_time - timeout;
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
* for handling directed advertising.
hdev->adv_instance_timeout = timeout;
queue_delayed_work(hdev->req_workqueue,
&hdev->adv_instance_expire,
msecs_to_jiffies(timeout * 1000));
/* If we're just re-scheduling the same instance again then do not
* execute any HCI commands. This happens when a single instance is
* being advertised.
*/
if (!force && hdev->cur_adv_instance == instance &&
hci_dev_test_flag(hdev, HCI_LE_ADV))
return 0;
hdev->cur_adv_instance = instance;
__hci_req_update_adv_data(req, instance);
__hci_req_update_scan_rsp_data(req, instance);
__hci_req_enable_advertising(req);
return 0;
}
static void cancel_adv_timeout(struct hci_dev *hdev)
{
if (hdev->adv_instance_timeout) {
hdev->adv_instance_timeout = 0;
cancel_delayed_work(&hdev->adv_instance_expire);
}
}
/* For a single instance:
* - force == true: The instance will be removed even when its remaining
* lifetime is not zero.
* - force == false: the instance will be deactivated but kept stored unless
* the remaining lifetime is zero.
*
* So instead of using filter polices 0x00 (no whitelist)
* and 0x01 (whitelist enabled) use the new filter policies
* 0x02 (no whitelist) and 0x03 (whitelist enabled).
* For instance == 0x00:
* - force == true: All instances will be removed regardless of their timeout
* setting.
* - force == false: Only instances that have a timeout will be removed.
*/
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02;
void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
u8 instance, bool force)
{
struct adv_info *adv_instance, *n, *next_instance = NULL;
int err;
u8 rem_inst;
memset(&param_cp, 0, sizeof(param_cp));
param_cp.type = LE_SCAN_PASSIVE;
param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
param_cp.window = cpu_to_le16(hdev->le_scan_window);
param_cp.own_address_type = own_addr_type;
param_cp.filter_policy = filter_policy;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
/* Cancel any timeout concerning the removed instance(s). */
if (!instance || hdev->cur_adv_instance == instance)
cancel_adv_timeout(hdev);
memset(&enable_cp, 0, sizeof(enable_cp));
enable_cp.enable = LE_SCAN_ENABLE;
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp);
/* Get the next instance to advertise BEFORE we remove
* the current one. This can be the same instance again
* if there is only one instance.
*/
if (instance && hdev->cur_adv_instance == instance)
next_instance = hci_get_next_instance(hdev, instance);
if (instance == 0x00) {
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
list) {
if (!(force || adv_instance->timeout))
continue;
rem_inst = adv_instance->instance;
err = hci_remove_adv_instance(hdev, rem_inst);
if (!err)
mgmt_advertising_removed(NULL, hdev, rem_inst);
}
} else {
adv_instance = hci_find_adv_instance(hdev, instance);
if (force || (adv_instance && adv_instance->timeout &&
!adv_instance->remaining_time)) {
/* Don't advertise a removed instance. */
if (next_instance &&
next_instance->instance == instance)
next_instance = NULL;
err = hci_remove_adv_instance(hdev, instance);
if (!err)
mgmt_advertising_removed(NULL, hdev, instance);
}
}
if (!req || !hdev_is_powered(hdev) ||
hci_dev_test_flag(hdev, HCI_ADVERTISING))
return;
if (next_instance)
__hci_req_schedule_adv_instance(req, next_instance->instance,
false);
}
static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
......@@ -637,7 +1422,7 @@ static bool disconnected_whitelist_entries(struct hci_dev *hdev)
return false;
}
void __hci_update_page_scan(struct hci_request *req)
void __hci_req_update_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 scan;
......@@ -657,95 +1442,168 @@ void __hci_update_page_scan(struct hci_request *req)
else
scan = SCAN_DISABLED;
if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
return;
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
scan |= SCAN_INQUIRY;
if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
return;
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
void hci_update_page_scan(struct hci_dev *hdev)
static int update_scan(struct hci_request *req, unsigned long opt)
{
struct hci_request req;
hci_dev_lock(req->hdev);
__hci_req_update_scan(req);
hci_dev_unlock(req->hdev);
return 0;
}
hci_req_init(&req, hdev);
__hci_update_page_scan(&req);
hci_req_run(&req, NULL);
static void scan_update_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
}
/* This function controls the background scanning based on hdev->pend_le_conns
* list. If there are pending LE connection we start the background scanning,
* otherwise we stop it.
*
* This function requires the caller holds hdev->lock.
static int connectable_update(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
hci_dev_lock(hdev);
__hci_req_update_scan(req);
/* If BR/EDR is not enabled and we disable advertising as a
* by-product of disabling connectable, we need to update the
* advertising flags.
*/
static void __hci_update_background_scan(struct hci_request *req)
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
__hci_req_update_adv_data(req, hdev->cur_adv_instance);
/* Update the advertising parameters if necessary */
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
!list_empty(&hdev->adv_instances))
__hci_req_enable_advertising(req);
__hci_update_background_scan(req);
hci_dev_unlock(hdev);
return 0;
}
static void connectable_update_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
connectable_update);
u8 status;
hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
mgmt_set_connectable_complete(hdev, status);
}
static u8 get_service_classes(struct hci_dev *hdev)
{
struct bt_uuid *uuid;
u8 val = 0;
list_for_each_entry(uuid, &hdev->uuids, list)
val |= uuid->svc_hint;
return val;
}
void __hci_req_update_class(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 cod[3];
if (!test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG) ||
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
hci_dev_test_flag(hdev, HCI_UNREGISTER))
BT_DBG("%s", hdev->name);
if (!hdev_is_powered(hdev))
return;
/* No point in doing scanning if LE support hasn't been enabled */
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
/* If discovery is active don't interfere with it */
if (hdev->discovery.state != DISCOVERY_STOPPED)
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
return;
/* Reset RSSI and UUID filters when starting background scanning
* since these filters are meant for service discovery only.
*
* The Start Discovery and Start Service Discovery operations
* ensure to set proper values for RSSI threshold and UUID
* filter list. So it is safe to just reset them here.
*/
hci_discovery_filter_clear(hdev);
cod[0] = hdev->minor_class;
cod[1] = hdev->major_class;
cod[2] = get_service_classes(hdev);
if (list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports)) {
/* If there is no pending LE connections or devices
* to be scanned for, we should stop the background
* scanning.
*/
if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
cod[1] |= 0x20;
/* If controller is not scanning we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
if (memcmp(cod, hdev->dev_class, 3) == 0)
return;
hci_req_add_le_scan_disable(req);
hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
}
BT_DBG("%s stopping background scanning", hdev->name);
} else {
/* If there is at least one pending LE connection, we should
* keep the background scan running.
*/
static void write_iac(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_write_current_iac_lap cp;
/* If controller is connecting, we should not start scanning
* since some controllers are not able to scan and connect at
* the same time.
*/
if (hci_lookup_le_connect(hdev))
if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
return;
/* If controller is currently scanning, we stop it to ensure we
* don't miss any advertising (due to duplicates filter).
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req);
if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
/* Limited discoverable mode */
cp.num_iac = min_t(u8, hdev->num_iac, 2);
cp.iac_lap[0] = 0x00; /* LIAC */
cp.iac_lap[1] = 0x8b;
cp.iac_lap[2] = 0x9e;
cp.iac_lap[3] = 0x33; /* GIAC */
cp.iac_lap[4] = 0x8b;
cp.iac_lap[5] = 0x9e;
} else {
/* General discoverable mode */
cp.num_iac = 1;
cp.iac_lap[0] = 0x33; /* GIAC */
cp.iac_lap[1] = 0x8b;
cp.iac_lap[2] = 0x9e;
}
hci_req_add_le_passive_scan(req);
hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
(cp.num_iac * 3) + 1, &cp);
}
BT_DBG("%s starting background scanning", hdev->name);
static int discoverable_update(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
hci_dev_lock(hdev);
if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
write_iac(req);
__hci_req_update_scan(req);
__hci_req_update_class(req);
}
/* Advertising instances don't use the global discoverable setting, so
* only update AD if advertising was enabled using Set Advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
__hci_req_update_adv_data(req, 0x00);
hci_dev_unlock(hdev);
return 0;
}
static void discoverable_update_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
discoverable_update);
u8 status;
hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
mgmt_set_discoverable_complete(hdev, status);
}
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
......@@ -1024,14 +1882,6 @@ static void le_scan_restart_work(struct work_struct *work)
hci_dev_unlock(hdev);
}
static void cancel_adv_timeout(struct hci_dev *hdev)
{
if (hdev->adv_instance_timeout) {
hdev->adv_instance_timeout = 0;
cancel_delayed_work(&hdev->adv_instance_expire);
}
}
static void disable_advertising(struct hci_request *req)
{
u8 enable = 0x00;
......@@ -1266,12 +2116,134 @@ static void discov_update(struct work_struct *work)
}
}
static void discov_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
discov_off.work);
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
/* When discoverable timeout triggers, then just make sure
* the limited discoverable flag is cleared. Even in the case
* of a timeout triggered from general discoverable, it is
* safe to unconditionally clear the flag.
*/
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hdev->discov_timeout = 0;
hci_dev_unlock(hdev);
hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
mgmt_new_settings(hdev);
}
static int powered_update_hci(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
u8 link_sec;
hci_dev_lock(hdev);
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
!lmp_host_ssp_capable(hdev)) {
u8 mode = 0x01;
hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
u8 support = 0x01;
hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
sizeof(support), &support);
}
}
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
cp.le = 0x01;
cp.simul = 0x00;
/* Check first if we already have the right
* host state (host features set)
*/
if (cp.le != lmp_host_le_capable(hdev) ||
cp.simul != lmp_host_le_br_capable(hdev))
hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
sizeof(cp), &cp);
}
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
/* Make sure the controller has a good default for
* advertising data. This also applies to the case
* where BR/EDR was toggled during the AUTO_OFF phase.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
list_empty(&hdev->adv_instances)) {
__hci_req_update_adv_data(req, 0x00);
__hci_req_update_scan_rsp_data(req, 0x00);
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
__hci_req_enable_advertising(req);
} else if (!list_empty(&hdev->adv_instances)) {
struct adv_info *adv_instance;
adv_instance = list_first_entry(&hdev->adv_instances,
struct adv_info, list);
__hci_req_schedule_adv_instance(req,
adv_instance->instance,
true);
}
}
link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
sizeof(link_sec), &link_sec);
if (lmp_bredr_capable(hdev)) {
if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
__hci_req_write_fast_connectable(req, true);
else
__hci_req_write_fast_connectable(req, false);
__hci_req_update_scan(req);
__hci_req_update_class(req);
__hci_req_update_name(req);
__hci_req_update_eir(req);
}
hci_dev_unlock(hdev);
return 0;
}
int __hci_req_hci_power_on(struct hci_dev *hdev)
{
/* Register the available SMP channels (BR/EDR and LE) only when
* successfully powering on the controller. This late
* registration is required so that LE SMP can clearly decide if
* the public address or static address is used.
*/
smp_register(hdev);
return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
NULL);
}
void hci_request_setup(struct hci_dev *hdev)
{
INIT_WORK(&hdev->discov_update, discov_update);
INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
INIT_WORK(&hdev->scan_update, scan_update_work);
INIT_WORK(&hdev->connectable_update, connectable_update_work);
INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
}
void hci_request_cancel_all(struct hci_dev *hdev)
......@@ -1280,6 +2252,15 @@ void hci_request_cancel_all(struct hci_dev *hdev)
cancel_work_sync(&hdev->discov_update);
cancel_work_sync(&hdev->bg_scan_update);
cancel_work_sync(&hdev->scan_update);
cancel_work_sync(&hdev->connectable_update);
cancel_work_sync(&hdev->discoverable_update);
cancel_delayed_work_sync(&hdev->discov_off);
cancel_delayed_work_sync(&hdev->le_scan_disable);
cancel_delayed_work_sync(&hdev->le_scan_restart);
if (hdev->adv_instance_timeout) {
cancel_delayed_work_sync(&hdev->adv_instance_expire);
hdev->adv_instance_timeout = 0;
}
}
......@@ -55,14 +55,38 @@ void hci_req_sync_cancel(struct hci_dev *hdev, int err);
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
int __hci_req_hci_power_on(struct hci_dev *hdev);
void __hci_req_write_fast_connectable(struct hci_request *req, bool enable);
void __hci_req_update_name(struct hci_request *req);
void __hci_req_update_eir(struct hci_request *req);
void hci_req_add_le_scan_disable(struct hci_request *req);
void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req);
void __hci_req_update_adv_data(struct hci_request *req, u8 instance);
int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance);
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
bool force);
void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
u8 instance, bool force);
void __hci_req_update_class(struct hci_request *req);
/* Returns true if HCI commands were queued */
bool hci_req_stop_discovery(struct hci_request *req);
void hci_update_page_scan(struct hci_dev *hdev);
void __hci_update_page_scan(struct hci_request *req);
static inline void hci_req_update_scan(struct hci_dev *hdev)
{
queue_work(hdev->req_workqueue, &hdev->scan_update);
}
void __hci_req_update_scan(struct hci_request *req);
int hci_update_random_address(struct hci_request *req, bool require_privacy,
u8 *own_addr_type);
......
......@@ -719,116 +719,6 @@ static u32 get_current_settings(struct hci_dev *hdev)
return settings;
}
#define PNP_INFO_SVCLASS_ID 0x1200
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
u8 *ptr = data, *uuids_start = NULL;
struct bt_uuid *uuid;
if (len < 4)
return ptr;
list_for_each_entry(uuid, &hdev->uuids, list) {
u16 uuid16;
if (uuid->size != 16)
continue;
uuid16 = get_unaligned_le16(&uuid->uuid[12]);
if (uuid16 < 0x1100)
continue;
if (uuid16 == PNP_INFO_SVCLASS_ID)
continue;
if (!uuids_start) {
uuids_start = ptr;
uuids_start[0] = 1;
uuids_start[1] = EIR_UUID16_ALL;
ptr += 2;
}
/* Stop if not enough space to put next UUID */
if ((ptr - data) + sizeof(u16) > len) {
uuids_start[1] = EIR_UUID16_SOME;
break;
}
*ptr++ = (uuid16 & 0x00ff);
*ptr++ = (uuid16 & 0xff00) >> 8;
uuids_start[0] += sizeof(uuid16);
}
return ptr;
}
static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
u8 *ptr = data, *uuids_start = NULL;
struct bt_uuid *uuid;
if (len < 6)
return ptr;
list_for_each_entry(uuid, &hdev->uuids, list) {
if (uuid->size != 32)
continue;
if (!uuids_start) {
uuids_start = ptr;
uuids_start[0] = 1;
uuids_start[1] = EIR_UUID32_ALL;
ptr += 2;
}
/* Stop if not enough space to put next UUID */
if ((ptr - data) + sizeof(u32) > len) {
uuids_start[1] = EIR_UUID32_SOME;
break;
}
memcpy(ptr, &uuid->uuid[12], sizeof(u32));
ptr += sizeof(u32);
uuids_start[0] += sizeof(u32);
}
return ptr;
}
static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
{
u8 *ptr = data, *uuids_start = NULL;
struct bt_uuid *uuid;
if (len < 18)
return ptr;
list_for_each_entry(uuid, &hdev->uuids, list) {
if (uuid->size != 128)
continue;
if (!uuids_start) {
uuids_start = ptr;
uuids_start[0] = 1;
uuids_start[1] = EIR_UUID128_ALL;
ptr += 2;
}
/* Stop if not enough space to put next UUID */
if ((ptr - data) + 16 > len) {
uuids_start[1] = EIR_UUID128_SOME;
break;
}
memcpy(ptr, uuid->uuid, 16);
ptr += 16;
uuids_start[0] += 16;
}
return ptr;
}
static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
{
return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
......@@ -841,98 +731,7 @@ static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
}
static u8 get_current_adv_instance(struct hci_dev *hdev)
{
/* The "Set Advertising" setting supersedes the "Add Advertising"
* setting. Here we set the advertising data based on which
* setting was set. When neither apply, default to the global settings,
* represented by instance "0".
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
!hci_dev_test_flag(hdev, HCI_ADVERTISING))
return hdev->cur_adv_instance;
return 0x00;
}
static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
{
u8 ad_len = 0;
size_t name_len;
name_len = strlen(hdev->dev_name);
if (name_len > 0) {
size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
if (name_len > max_len) {
name_len = max_len;
ptr[1] = EIR_NAME_SHORT;
} else
ptr[1] = EIR_NAME_COMPLETE;
ptr[0] = name_len + 1;
memcpy(ptr + 2, hdev->dev_name, name_len);
ad_len += (name_len + 2);
ptr += (name_len + 2);
}
return ad_len;
}
static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
u8 *ptr)
{
struct adv_info *adv_instance;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
/* TODO: Set the appropriate entries based on advertising instance flags
* here once flags other than 0 are supported.
*/
memcpy(ptr, adv_instance->scan_rsp_data,
adv_instance->scan_rsp_len);
return adv_instance->scan_rsp_len;
}
static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_scan_rsp_data cp;
u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
memset(&cp, 0, sizeof(cp));
if (instance)
len = create_instance_scan_rsp_data(hdev, instance, cp.data);
else
len = create_default_scan_rsp_data(hdev, cp.data);
if (hdev->scan_rsp_data_len == len &&
!memcmp(cp.data, hdev->scan_rsp_data, len))
return;
memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
hdev->scan_rsp_data_len = len;
cp.length = len;
hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
}
static void update_scan_rsp_data(struct hci_request *req)
{
update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
}
static u8 get_adv_discov_flags(struct hci_dev *hdev)
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
{
struct mgmt_pending_cmd *cmd;
......@@ -956,7 +755,7 @@ static u8 get_adv_discov_flags(struct hci_dev *hdev)
return 0;
}
static bool get_connectable(struct hci_dev *hdev)
bool mgmt_get_connectable(struct hci_dev *hdev)
{
struct mgmt_pending_cmd *cmd;
......@@ -973,344 +772,6 @@ static bool get_connectable(struct hci_dev *hdev)
return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
}
static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
{
u32 flags;
struct adv_info *adv_instance;
if (instance == 0x00) {
/* Instance 0 always manages the "Tx Power" and "Flags"
* fields
*/
flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
* corresponds to the "connectable" instance flag.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
flags |= MGMT_ADV_FLAG_CONNECTABLE;
return flags;
}
adv_instance = hci_find_adv_instance(hdev, instance);
/* Return 0 when we got an invalid instance identifier. */
if (!adv_instance)
return 0;
return adv_instance->flags;
}
static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
{
u8 instance = get_current_adv_instance(hdev);
struct adv_info *adv_instance;
/* Ignore instance 0 */
if (instance == 0x00)
return 0;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
/* TODO: Take into account the "appearance" and "local-name" flags here.
* These are currently being ignored as they are not supported.
*/
return adv_instance->scan_rsp_len;
}
static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
{
struct adv_info *adv_instance = NULL;
u8 ad_len = 0, flags = 0;
u32 instance_flags;
/* Return 0 when the current instance identifier is invalid. */
if (instance) {
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
}
instance_flags = get_adv_instance_flags(hdev, instance);
/* The Add Advertising command allows userspace to set both the general
* and limited discoverable flags.
*/
if (instance_flags & MGMT_ADV_FLAG_DISCOV)
flags |= LE_AD_GENERAL;
if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
flags |= LE_AD_LIMITED;
if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
/* If a discovery flag wasn't provided, simply use the global
* settings.
*/
if (!flags)
flags |= get_adv_discov_flags(hdev);
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
flags |= LE_AD_NO_BREDR;
/* If flags would still be empty, then there is no need to
* include the "Flags" AD field".
*/
if (flags) {
ptr[0] = 0x02;
ptr[1] = EIR_FLAGS;
ptr[2] = flags;
ad_len += 3;
ptr += 3;
}
}
if (adv_instance) {
memcpy(ptr, adv_instance->adv_data,
adv_instance->adv_data_len);
ad_len += adv_instance->adv_data_len;
ptr += adv_instance->adv_data_len;
}
/* Provide Tx Power only if we can provide a valid value for it */
if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
(instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
ptr[0] = 0x02;
ptr[1] = EIR_TX_POWER;
ptr[2] = (u8)hdev->adv_tx_power;
ad_len += 3;
ptr += 3;
}
return ad_len;
}
static void update_inst_adv_data(struct hci_request *req, u8 instance)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_data cp;
u8 len;
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
memset(&cp, 0, sizeof(cp));
len = create_instance_adv_data(hdev, instance, cp.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return;
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
hdev->adv_data_len = len;
cp.length = len;
hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
}
static void update_adv_data(struct hci_request *req)
{
update_inst_adv_data(req, get_current_adv_instance(req->hdev));
}
int mgmt_update_adv_data(struct hci_dev *hdev)
{
struct hci_request req;
hci_req_init(&req, hdev);
update_adv_data(&req);
return hci_req_run(&req, NULL);
}
static void create_eir(struct hci_dev *hdev, u8 *data)
{
u8 *ptr = data;
size_t name_len;
name_len = strlen(hdev->dev_name);
if (name_len > 0) {
/* EIR Data type */
if (name_len > 48) {
name_len = 48;
ptr[1] = EIR_NAME_SHORT;
} else
ptr[1] = EIR_NAME_COMPLETE;
/* EIR Data length */
ptr[0] = name_len + 1;
memcpy(ptr + 2, hdev->dev_name, name_len);
ptr += (name_len + 2);
}
if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
ptr[0] = 2;
ptr[1] = EIR_TX_POWER;
ptr[2] = (u8) hdev->inq_tx_power;
ptr += 3;
}
if (hdev->devid_source > 0) {
ptr[0] = 9;
ptr[1] = EIR_DEVICE_ID;
put_unaligned_le16(hdev->devid_source, ptr + 2);
put_unaligned_le16(hdev->devid_vendor, ptr + 4);
put_unaligned_le16(hdev->devid_product, ptr + 6);
put_unaligned_le16(hdev->devid_version, ptr + 8);
ptr += 10;
}
ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
}
static void update_eir(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_write_eir cp;
if (!hdev_is_powered(hdev))
return;
if (!lmp_ext_inq_capable(hdev))
return;
if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
return;
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
return;
memset(&cp, 0, sizeof(cp));
create_eir(hdev, cp.data);
if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
return;
memcpy(hdev->eir, cp.data, sizeof(cp.data));
hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
}
static u8 get_service_classes(struct hci_dev *hdev)
{
struct bt_uuid *uuid;
u8 val = 0;
list_for_each_entry(uuid, &hdev->uuids, list)
val |= uuid->svc_hint;
return val;
}
static void update_class(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 cod[3];
BT_DBG("%s", hdev->name);
if (!hdev_is_powered(hdev))
return;
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
return;
cod[0] = hdev->minor_class;
cod[1] = hdev->major_class;
cod[2] = get_service_classes(hdev);
if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
cod[1] |= 0x20;
if (memcmp(cod, hdev->dev_class, 3) == 0)
return;
hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
}
static void disable_advertising(struct hci_request *req)
{
u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
static void enable_advertising(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type, enable = 0x01;
bool connectable;
u8 instance;
u32 flags;
if (hci_conn_num(hdev, LE_LINK) > 0)
return;
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
disable_advertising(req);
/* Clear the HCI_LE_ADV bit temporarily so that the
* hci_update_random_address knows that it's safe to go ahead
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
instance = get_current_adv_instance(hdev);
flags = get_adv_instance_flags(hdev, instance);
/* If the "connectable" instance flag was not set, then choose between
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
*/
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
get_connectable(hdev);
/* Set require_privacy to true only when non-connectable
* advertising is used. In that case it is fine to use a
* non-resolvable private address.
*/
if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
return;
memset(&cp, 0, sizeof(cp));
cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
if (connectable)
cp.type = LE_ADV_IND;
else if (get_cur_adv_instance_scan_rsp_len(hdev))
cp.type = LE_ADV_SCAN_IND;
else
cp.type = LE_ADV_NONCONN_IND;
cp.own_address_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
static void service_cache_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
......@@ -1324,8 +785,8 @@ static void service_cache_off(struct work_struct *work)
hci_dev_lock(hdev);
update_eir(&req);
update_class(&req);
__hci_req_update_eir(&req);
__hci_req_update_class(&req);
hci_dev_unlock(hdev);
......@@ -1346,10 +807,11 @@ static void rpa_expired(struct work_struct *work)
return;
/* The generation of a new RPA and programming it into the
* controller happens in the enable_advertising() function.
* controller happens in the hci_req_enable_advertising()
* function.
*/
hci_req_init(&req, hdev);
enable_advertising(&req);
__hci_req_enable_advertising(&req);
hci_req_run(&req, NULL);
}
......@@ -1417,8 +879,7 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
}
}
static void advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance)
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
{
struct mgmt_ev_advertising_added ev;
......@@ -1427,151 +888,22 @@ static void advertising_added(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
}
static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance)
{
struct mgmt_ev_advertising_removed ev;
ev.instance = instance;
mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
}
static int schedule_adv_instance(struct hci_request *req, u8 instance,
bool force) {
struct hci_dev *hdev = req->hdev;
struct adv_info *adv_instance = NULL;
u16 timeout;
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
return -EPERM;
if (hdev->adv_instance_timeout)
return -EBUSY;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return -ENOENT;
/* A zero timeout means unlimited advertising. As long as there is
* only one instance, duration should be ignored. We still set a timeout
* in case further instances are being added later on.
*
* If the remaining lifetime of the instance is more than the duration
* then the timeout corresponds to the duration, otherwise it will be
* reduced to the remaining instance lifetime.
*/
if (adv_instance->timeout == 0 ||
adv_instance->duration <= adv_instance->remaining_time)
timeout = adv_instance->duration;
else
timeout = adv_instance->remaining_time;
/* The remaining time is being reduced unless the instance is being
* advertised without time limit.
*/
if (adv_instance->timeout)
adv_instance->remaining_time =
adv_instance->remaining_time - timeout;
hdev->adv_instance_timeout = timeout;
queue_delayed_work(hdev->workqueue,
&hdev->adv_instance_expire,
msecs_to_jiffies(timeout * 1000));
/* If we're just re-scheduling the same instance again then do not
* execute any HCI commands. This happens when a single instance is
* being advertised.
*/
if (!force && hdev->cur_adv_instance == instance &&
hci_dev_test_flag(hdev, HCI_LE_ADV))
return 0;
hdev->cur_adv_instance = instance;
update_adv_data(req);
update_scan_rsp_data(req);
enable_advertising(req);
return 0;
}
static void cancel_adv_timeout(struct hci_dev *hdev)
{
if (hdev->adv_instance_timeout) {
hdev->adv_instance_timeout = 0;
cancel_delayed_work(&hdev->adv_instance_expire);
}
}
/* For a single instance:
* - force == true: The instance will be removed even when its remaining
* lifetime is not zero.
* - force == false: the instance will be deactivated but kept stored unless
* the remaining lifetime is zero.
*
* For instance == 0x00:
* - force == true: All instances will be removed regardless of their timeout
* setting.
* - force == false: Only instances that have a timeout will be removed.
*/
static void clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
u8 instance, bool force)
{
struct adv_info *adv_instance, *n, *next_instance = NULL;
int err;
u8 rem_inst;
/* Cancel any timeout concerning the removed instance(s). */
if (!instance || hdev->cur_adv_instance == instance)
cancel_adv_timeout(hdev);
/* Get the next instance to advertise BEFORE we remove
* the current one. This can be the same instance again
* if there is only one instance.
*/
if (instance && hdev->cur_adv_instance == instance)
next_instance = hci_get_next_instance(hdev, instance);
if (instance == 0x00) {
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
list) {
if (!(force || adv_instance->timeout))
continue;
rem_inst = adv_instance->instance;
err = hci_remove_adv_instance(hdev, rem_inst);
if (!err)
advertising_removed(NULL, hdev, rem_inst);
}
hdev->cur_adv_instance = 0x00;
} else {
adv_instance = hci_find_adv_instance(hdev, instance);
if (force || (adv_instance && adv_instance->timeout &&
!adv_instance->remaining_time)) {
/* Don't advertise a removed instance. */
if (next_instance &&
next_instance->instance == instance)
next_instance = NULL;
err = hci_remove_adv_instance(hdev, instance);
if (!err)
advertising_removed(NULL, hdev, instance);
}
}
if (list_empty(&hdev->adv_instances)) {
hdev->cur_adv_instance = 0x00;
hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
}
struct mgmt_ev_advertising_removed ev;
if (!req || !hdev_is_powered(hdev) ||
hci_dev_test_flag(hdev, HCI_ADVERTISING))
return;
ev.instance = instance;
if (next_instance)
schedule_adv_instance(req, next_instance->instance, false);
mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
}
static void cancel_adv_timeout(struct hci_dev *hdev)
{
if (hdev->adv_instance_timeout) {
hdev->adv_instance_timeout = 0;
cancel_delayed_work(&hdev->adv_instance_expire);
}
}
static int clean_up_hci_state(struct hci_dev *hdev)
......@@ -1589,10 +921,10 @@ static int clean_up_hci_state(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
clear_adv_instance(hdev, NULL, 0x00, false);
hci_req_clear_adv_instance(hdev, NULL, 0x00, false);
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
disable_advertising(&req);
__hci_req_disable_advertising(&req);
discov_stopped = hci_req_stop_discovery(&req);
......@@ -1629,17 +961,6 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
cancel_delayed_work(&hdev->power_off);
if (cp->val) {
mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
data, len);
err = mgmt_powered(hdev, 1);
goto failed;
}
}
if (!!cp->val == hdev_is_powered(hdev)) {
err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
goto failed;
......@@ -1763,13 +1084,9 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
return MGMT_STATUS_SUCCESS;
}
static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
{
struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
struct hci_request req;
bool changed;
BT_DBG("status 0x%02x", status);
......@@ -1786,34 +1103,15 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
goto remove_cmd;
}
cp = cmd->param;
if (cp->val) {
changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
if (hdev->discov_timeout > 0) {
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
hdev->discov_timeout > 0) {
int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
queue_delayed_work(hdev->workqueue, &hdev->discov_off,
to);
}
} else {
changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
}
send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
if (changed)
new_settings(hdev, cmd->sk);
/* When the discoverable mode gets changed, make sure
* that class of device has the limited discoverable
* bit correctly set. Also update page scan based on whitelist
* entries.
*/
hci_req_init(&req, hdev);
__hci_update_page_scan(&req);
update_class(&req);
hci_req_run(&req, NULL);
remove_cmd:
mgmt_pending_remove(cmd);
......@@ -1826,9 +1124,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_set_discoverable *cp = data;
struct mgmt_pending_cmd *cmd;
struct hci_request req;
u16 timeout;
u8 scan;
int err;
BT_DBG("request for %s", hdev->name);
......@@ -1907,8 +1203,8 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->val && hdev->discov_timeout > 0) {
int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
queue_delayed_work(hdev->workqueue, &hdev->discov_off,
to);
queue_delayed_work(hdev->req_workqueue,
&hdev->discov_off, to);
}
err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
......@@ -1928,105 +1224,28 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = timeout;
if (cp->val)
hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
else
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
/* Limited discoverable mode */
if (cp->val == 0x02)
hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
else
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
hci_req_init(&req, hdev);
/* The procedure for LE-only controllers is much simpler - just
* update the advertising data.
*/
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
goto update_ad;
scan = SCAN_PAGE;
if (cp->val) {
struct hci_cp_write_current_iac_lap hci_cp;
if (cp->val == 0x02) {
/* Limited discoverable mode */
hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
hci_cp.iac_lap[0] = 0x00; /* LIAC */
hci_cp.iac_lap[1] = 0x8b;
hci_cp.iac_lap[2] = 0x9e;
hci_cp.iac_lap[3] = 0x33; /* GIAC */
hci_cp.iac_lap[4] = 0x8b;
hci_cp.iac_lap[5] = 0x9e;
} else {
/* General discoverable mode */
hci_cp.num_iac = 1;
hci_cp.iac_lap[0] = 0x33; /* GIAC */
hci_cp.iac_lap[1] = 0x8b;
hci_cp.iac_lap[2] = 0x9e;
}
hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
(hci_cp.num_iac * 3) + 1, &hci_cp);
scan |= SCAN_INQUIRY;
} else {
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
}
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
update_ad:
update_adv_data(&req);
err = hci_req_run(&req, set_discoverable_complete);
if (err < 0)
mgmt_pending_remove(cmd);
queue_work(hdev->req_workqueue, &hdev->discoverable_update);
err = 0;
failed:
hci_dev_unlock(hdev);
return err;
}
static void write_fast_connectable(struct hci_request *req, bool enable)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_write_page_scan_activity acp;
u8 type;
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
if (enable) {
type = PAGE_SCAN_TYPE_INTERLACED;
/* 160 msec page scan interval */
acp.interval = cpu_to_le16(0x0100);
} else {
type = PAGE_SCAN_TYPE_STANDARD; /* default */
/* default 1.28 sec page scan */
acp.interval = cpu_to_le16(0x0800);
}
acp.window = cpu_to_le16(0x0012);
if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
__cpu_to_le16(hdev->page_scan_window) != acp.window)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
sizeof(acp), &acp);
if (hdev->page_scan_type != type)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
}
static void set_connectable_complete(struct hci_dev *hdev, u8 status,
u16 opcode)
void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
{
struct mgmt_pending_cmd *cmd;
struct mgmt_mode *cp;
bool conn_changed, discov_changed;
BT_DBG("status 0x%02x", status);
......@@ -2042,27 +1261,8 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status,
goto remove_cmd;
}
cp = cmd->param;
if (cp->val) {
conn_changed = !hci_dev_test_and_set_flag(hdev,
HCI_CONNECTABLE);
discov_changed = false;
} else {
conn_changed = hci_dev_test_and_clear_flag(hdev,
HCI_CONNECTABLE);
discov_changed = hci_dev_test_and_clear_flag(hdev,
HCI_DISCOVERABLE);
}
send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
if (conn_changed || discov_changed) {
new_settings(hdev, cmd->sk);
hci_update_page_scan(hdev);
if (discov_changed)
mgmt_update_adv_data(hdev);
hci_update_background_scan(hdev);
}
remove_cmd:
mgmt_pending_remove(cmd);
......@@ -2092,7 +1292,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
return err;
if (changed) {
hci_update_page_scan(hdev);
hci_req_update_scan(hdev);
hci_update_background_scan(hdev);
return new_settings(hdev, sk);
}
......@@ -2105,8 +1305,6 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
struct hci_request req;
u8 scan;
int err;
BT_DBG("request for %s", hdev->name);
......@@ -2140,57 +1338,19 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
hci_req_init(&req, hdev);
/* If BR/EDR is not enabled and we disable advertising as a
* by-product of disabling connectable, we need to update the
* advertising flags.
*/
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
if (!cp->val) {
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
}
update_adv_data(&req);
} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
if (cp->val) {
scan = SCAN_PAGE;
hci_dev_set_flag(hdev, HCI_CONNECTABLE);
} else {
/* If we don't have any whitelist entries just
* disable all scanning. If there are entries
* and we had both page and inquiry scanning
* enabled then fall back to only page scanning.
* Otherwise no changes are needed.
*/
if (list_empty(&hdev->whitelist))
scan = SCAN_DISABLED;
else if (test_bit(HCI_ISCAN, &hdev->flags))
scan = SCAN_PAGE;
else
goto no_scan_update;
if (test_bit(HCI_ISCAN, &hdev->flags) &&
hdev->discov_timeout > 0)
if (hdev->discov_timeout > 0)
cancel_delayed_work(&hdev->discov_off);
}
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
}
no_scan_update:
/* Update the advertising parameters if necessary */
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
enable_advertising(&req);
err = hci_req_run(&req, set_connectable_complete);
if (err < 0) {
mgmt_pending_remove(cmd);
if (err == -ENODATA)
err = set_connectable_update_settings(hdev, sk,
cp->val);
goto failed;
}
queue_work(hdev->req_workqueue, &hdev->connectable_update);
err = 0;
failed:
hci_dev_unlock(hdev);
......@@ -2466,8 +1626,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
struct hci_request req;
hci_req_init(&req, hdev);
update_adv_data(&req);
update_scan_rsp_data(&req);
__hci_req_update_adv_data(&req, 0x00);
__hci_req_update_scan_rsp_data(&req, 0x00);
hci_req_run(&req, NULL);
hci_update_background_scan(hdev);
}
......@@ -2518,7 +1678,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
enabled = lmp_host_le_capable(hdev);
if (!val)
clear_adv_instance(hdev, NULL, 0x00, true);
hci_req_clear_adv_instance(hdev, NULL, 0x00, true);
if (!hdev_is_powered(hdev) || val == enabled) {
bool changed = false;
......@@ -2565,7 +1725,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_cp.simul = 0x00;
} else {
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
disable_advertising(&req);
__hci_req_disable_advertising(&req);
}
hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
......@@ -2680,8 +1840,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_req_init(&req, hdev);
update_class(&req);
update_eir(&req);
__hci_req_update_class(&req);
__hci_req_update_eir(&req);
err = hci_req_run(&req, add_uuid_complete);
if (err < 0) {
......@@ -2780,8 +1940,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
update_class:
hci_req_init(&req, hdev);
update_class(&req);
update_eir(&req);
__hci_req_update_class(&req);
__hci_req_update_eir(&req);
err = hci_req_run(&req, remove_uuid_complete);
if (err < 0) {
......@@ -2856,10 +2016,10 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_unlock(hdev);
cancel_delayed_work_sync(&hdev->service_cache);
hci_dev_lock(hdev);
update_eir(&req);
__hci_req_update_eir(&req);
}
update_class(&req);
__hci_req_update_class(&req);
err = hci_req_run(&req, set_class_complete);
if (err < 0) {
......@@ -3760,16 +2920,6 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
}
static void update_name(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_write_local_name cp;
memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
}
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct mgmt_cp_set_local_name *cp;
......@@ -3848,15 +2998,15 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
hci_req_init(&req, hdev);
if (lmp_bredr_capable(hdev)) {
update_name(&req);
update_eir(&req);
__hci_req_update_name(&req);
__hci_req_update_eir(&req);
}
/* The name is stored in the scan response data and so
* no need to udpate the advertising data here.
*/
if (lmp_le_capable(hdev))
update_scan_rsp_data(&req);
__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
err = hci_req_run(&req, set_name_complete);
if (err < 0)
......@@ -4534,7 +3684,7 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
NULL, 0);
hci_req_init(&req, hdev);
update_eir(&req);
__hci_req_update_eir(&req);
hci_req_run(&req, NULL);
hci_dev_unlock(hdev);
......@@ -4584,7 +3734,6 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
* set up earlier, then re-enable multi-instance advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) ||
list_empty(&hdev->adv_instances))
goto unlock;
......@@ -4600,7 +3749,7 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
hci_req_init(&req, hdev);
err = schedule_adv_instance(&req, instance, true);
err = __hci_req_schedule_adv_instance(&req, instance, true);
if (!err)
err = hci_req_run(&req, enable_advertising_instance);
......@@ -4650,6 +3799,7 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
bool changed;
if (cp->val) {
hdev->cur_adv_instance = 0x00;
changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
if (cp->val == 0x02)
hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
......@@ -4697,11 +3847,12 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
* We cannot use update_[adv|scan_rsp]_data() here as the
* HCI_ADVERTISING flag is not yet set.
*/
update_inst_adv_data(&req, 0x00);
update_inst_scan_rsp_data(&req, 0x00);
enable_advertising(&req);
hdev->cur_adv_instance = 0x00;
__hci_req_update_adv_data(&req, 0x00);
__hci_req_update_scan_rsp_data(&req, 0x00);
__hci_req_enable_advertising(&req);
} else {
disable_advertising(&req);
__hci_req_disable_advertising(&req);
}
err = hci_req_run(&req, set_advertising_complete);
......@@ -4898,7 +4049,7 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev);
write_fast_connectable(&req, cp->val);
__hci_req_write_fast_connectable(&req, cp->val);
err = hci_req_run(&req, fast_connectable_complete);
if (err < 0) {
......@@ -5033,20 +4184,20 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto unlock;
}
/* We need to flip the bit already here so that update_adv_data
* generates the correct flags.
/* We need to flip the bit already here so that
* hci_req_update_adv_data generates the correct flags.
*/
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
hci_req_init(&req, hdev);
write_fast_connectable(&req, false);
__hci_update_page_scan(&req);
__hci_req_write_fast_connectable(&req, false);
__hci_req_update_scan(&req);
/* Since only the advertising data flags will change, there
* is no need to update the scan response data.
*/
update_adv_data(&req);
__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
err = hci_req_run(&req, set_bredr_complete);
if (err < 0)
......@@ -5927,7 +5078,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
if (err)
goto unlock;
hci_update_page_scan(hdev);
hci_req_update_scan(hdev);
goto added;
}
......@@ -6024,7 +5175,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
hci_update_page_scan(hdev);
hci_req_update_scan(hdev);
device_removed(sk, hdev, &cp->addr.bdaddr,
cp->addr.type);
......@@ -6089,7 +5240,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
kfree(b);
}
hci_update_page_scan(hdev);
hci_req_update_scan(hdev);
list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
......@@ -6583,7 +5734,7 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
rand, sizeof(rand));
}
flags = get_adv_discov_flags(hdev);
flags = mgmt_get_adv_discov_flags(hdev);
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
flags |= LE_AD_NO_BREDR;
......@@ -6638,10 +5789,10 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_rp_read_adv_features *rp;
size_t rp_len;
int err, i;
bool instance;
int err;
struct adv_info *adv_instance;
u32 supported_flags;
u8 *instance;
BT_DBG("%s", hdev->name);
......@@ -6651,12 +5802,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
rp_len = sizeof(*rp);
instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
if (instance)
rp_len += hdev->adv_instance_cnt;
rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
rp = kmalloc(rp_len, GFP_ATOMIC);
if (!rp) {
hci_dev_unlock(hdev);
......@@ -6669,19 +5815,12 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
rp->max_instances = HCI_MAX_ADV_INSTANCES;
rp->num_instances = hdev->adv_instance_cnt;
if (instance) {
i = 0;
instance = rp->instance;
list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
if (i >= hdev->adv_instance_cnt)
break;
rp->instance[i] = adv_instance->instance;
i++;
}
rp->num_instances = hdev->adv_instance_cnt;
} else {
rp->num_instances = 0;
*instance = adv_instance->instance;
instance++;
}
hci_dev_unlock(hdev);
......@@ -6754,9 +5893,6 @@ static void add_advertising_complete(struct hci_dev *hdev, u8 status,
cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
if (status)
hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
if (!adv_instance->pending)
continue;
......@@ -6772,7 +5908,7 @@ static void add_advertising_complete(struct hci_dev *hdev, u8 status,
cancel_adv_timeout(hdev);
hci_remove_adv_instance(hdev, instance);
advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
}
if (!cmd)
......@@ -6794,31 +5930,6 @@ static void add_advertising_complete(struct hci_dev *hdev, u8 status,
hci_dev_unlock(hdev);
}
void mgmt_adv_timeout_expired(struct hci_dev *hdev)
{
u8 instance;
struct hci_request req;
hdev->adv_instance_timeout = 0;
instance = get_current_adv_instance(hdev);
if (instance == 0x00)
return;
hci_dev_lock(hdev);
hci_req_init(&req, hdev);
clear_adv_instance(hdev, &req, instance, false);
if (list_empty(&hdev->adv_instances))
disable_advertising(&req);
if (!skb_queue_empty(&req.cmd_q))
hci_req_run(&req, NULL);
hci_dev_unlock(hdev);
}
static int add_advertising(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
......@@ -6897,9 +6008,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
* actually added.
*/
if (hdev->adv_instance_cnt > prev_instance_cnt)
advertising_added(sk, hdev, cp->instance);
hci_dev_set_flag(hdev, HCI_ADVERTISING_INSTANCE);
mgmt_advertising_added(sk, hdev, cp->instance);
if (hdev->cur_adv_instance == cp->instance) {
/* If the currently advertised instance is being changed then
......@@ -6944,7 +6053,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev);
err = schedule_adv_instance(&req, schedule_instance, true);
err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
if (!err)
err = hci_req_run(&req, add_advertising_complete);
......@@ -7016,7 +6125,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
if (list_empty(&hdev->adv_instances)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
goto unlock;
......@@ -7024,10 +6133,10 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev);
clear_adv_instance(hdev, &req, cp->instance, true);
hci_req_clear_adv_instance(hdev, &req, cp->instance, true);
if (list_empty(&hdev->adv_instances))
disable_advertising(&req);
__hci_req_disable_advertising(&req);
/* If no HCI commands have been collected so far or the HCI_ADVERTISING
* flag is set or the device isn't powered then we have no HCI
......@@ -7298,138 +6407,33 @@ static void restart_le_actions(struct hci_dev *hdev)
}
}
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
void mgmt_power_on(struct hci_dev *hdev, int err)
{
struct cmd_lookup match = { NULL, hdev };
BT_DBG("status 0x%02x", status);
BT_DBG("err %d", err);
if (!status) {
hci_dev_lock(hdev);
if (!err) {
restart_le_actions(hdev);
hci_update_background_scan(hdev);
}
hci_dev_lock(hdev);
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
new_settings(hdev, match.sk);
hci_dev_unlock(hdev);
if (match.sk)
sock_put(match.sk);
}
static int powered_update_hci(struct hci_dev *hdev)
{
struct hci_request req;
struct adv_info *adv_instance;
u8 link_sec;
hci_req_init(&req, hdev);
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
!lmp_host_ssp_capable(hdev)) {
u8 mode = 0x01;
hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
u8 support = 0x01;
hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
sizeof(support), &support);
}
}
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
lmp_bredr_capable(hdev)) {
struct hci_cp_write_le_host_supported cp;
cp.le = 0x01;
cp.simul = 0x00;
/* Check first if we already have the right
* host state (host features set)
*/
if (cp.le != lmp_host_le_capable(hdev) ||
cp.simul != lmp_host_le_br_capable(hdev))
hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
sizeof(cp), &cp);
}
if (lmp_le_capable(hdev)) {
/* Make sure the controller has a good default for
* advertising data. This also applies to the case
* where BR/EDR was toggled during the AUTO_OFF phase.
*/
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
(hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))) {
update_adv_data(&req);
update_scan_rsp_data(&req);
}
if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
hdev->cur_adv_instance == 0x00 &&
!list_empty(&hdev->adv_instances)) {
adv_instance = list_first_entry(&hdev->adv_instances,
struct adv_info, list);
hdev->cur_adv_instance = adv_instance->instance;
}
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
enable_advertising(&req);
else if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
hdev->cur_adv_instance)
schedule_adv_instance(&req, hdev->cur_adv_instance,
true);
}
link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
sizeof(link_sec), &link_sec);
if (lmp_bredr_capable(hdev)) {
if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
write_fast_connectable(&req, true);
else
write_fast_connectable(&req, false);
__hci_update_page_scan(&req);
update_class(&req);
update_name(&req);
update_eir(&req);
}
return hci_req_run(&req, powered_complete);
hci_dev_unlock(hdev);
}
int mgmt_powered(struct hci_dev *hdev, u8 powered)
void __mgmt_power_off(struct hci_dev *hdev)
{
struct cmd_lookup match = { NULL, hdev };
u8 status, zero_cod[] = { 0, 0, 0 };
int err;
if (!hci_dev_test_flag(hdev, HCI_MGMT))
return 0;
if (powered) {
/* Register the available SMP channels (BR/EDR and LE) only
* when successfully powering on the controller. This late
* registration is required so that LE SMP can clearly
* decide if the public address or static address is used.
*/
smp_register(hdev);
if (powered_update_hci(hdev) == 0)
return 0;
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
&match);
goto new_settings;
}
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
......@@ -7451,13 +6455,10 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
zero_cod, sizeof(zero_cod), NULL);
new_settings:
err = new_settings(hdev, match.sk);
new_settings(hdev, match.sk);
if (match.sk)
sock_put(match.sk);
return err;
}
void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
......@@ -7479,43 +6480,6 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
mgmt_pending_remove(cmd);
}
void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
struct hci_request req;
hci_dev_lock(hdev);
/* When discoverable timeout triggers, then just make sure
* the limited discoverable flag is cleared. Even in the case
* of a timeout triggered from general discoverable, it is
* safe to unconditionally clear the flag.
*/
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hci_req_init(&req, hdev);
if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
u8 scan = SCAN_PAGE;
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
sizeof(scan), &scan);
}
update_class(&req);
/* Advertising instances don't use the global discoverable setting, so
* only update AD if advertising was enabled using Set Advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
update_adv_data(&req);
hci_req_run(&req, NULL);
hdev->discov_timeout = 0;
new_settings(hdev, NULL);
hci_dev_unlock(hdev);
}
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent)
{
......@@ -8058,7 +7022,7 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
sizeof(enable), &enable);
update_eir(&req);
__hci_req_update_eir(&req);
} else {
clear_eir(&req);
}
......@@ -8352,35 +7316,6 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
}
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
BT_DBG("%s status %u", hdev->name, status);
}
void mgmt_reenable_advertising(struct hci_dev *hdev)
{
struct hci_request req;
u8 instance;
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
return;
instance = get_current_adv_instance(hdev);
hci_req_init(&req, hdev);
if (instance) {
schedule_adv_instance(&req, instance, true);
} else {
update_adv_data(&req);
update_scan_rsp_data(&req);
enable_advertising(&req);
}
hci_req_run(&req, adv_enable_complete);
}
static struct hci_mgmt_chan chan = {
.channel = HCI_CHANNEL_CONTROL,
.handler_count = ARRAY_SIZE(mgmt_handlers),
......
......@@ -161,9 +161,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
wdev->needed_headroom;
ldev->needed_tailroom = wdev->needed_tailroom;
lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154);
ret = register_netdevice(ldev);
ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154);
if (ret < 0) {
dev_put(wdev);
return ret;
......@@ -180,7 +178,7 @@ static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
ASSERT_RTNL();
wdev->ieee802154_ptr->lowpan_dev = NULL;
unregister_netdevice(ldev);
lowpan_unregister_netdevice(ldev);
dev_put(wdev);
}
......
......@@ -18,9 +18,6 @@ drv_xmit_async(struct ieee802154_local *local, struct sk_buff *skb)
static inline int
drv_xmit_sync(struct ieee802154_local *local, struct sk_buff *skb)
{
/* don't allow other operations while sync xmit */
ASSERT_RTNL();
might_sleep();
return local->ops->xmit_sync(&local->hw, skb);
......
......@@ -38,12 +38,6 @@ void ieee802154_xmit_worker(struct work_struct *work)
struct net_device *dev = skb->dev;
int res;
rtnl_lock();
/* check if ifdown occurred while schedule */
if (!netif_running(dev))
goto err_tx;
res = drv_xmit_sync(local, skb);
if (res)
goto err_tx;
......@@ -53,14 +47,11 @@ void ieee802154_xmit_worker(struct work_struct *work)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
rtnl_unlock();
return;
err_tx:
/* Restart the netif queue on each sub_if_data object. */
ieee802154_wake_queue(&local->hw);
rtnl_unlock();
kfree_skb(skb);
netdev_dbg(dev, "transmission failed\n");
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment