Commit 5f1764dd authored by David S. Miller's avatar David S. Miller

Merge branch 'for-upstream' of...

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
Here's another set of Bluetooth & ieee802154 patches intended for 4.1:

 - Added support for QCA ROME chipset family in the btusb driver
 - at86rf230 driver fixes & cleanups
 - ieee802154 cleanups
 - Refactoring of Bluetooth mgmt API to allow new users
 - New setting for static Bluetooth address exposed to user space
 - Refactoring of hci_dev flags to remove limit of 32
 - Remove unnecessary fast-connectable setting usage restrictions
 - Fix behavior to be consistent when trying to pair already paired device
 - Service discovery corner-case fixes

Please let me know if there are any issues pulling. Thanks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2801be4a b6d595e3
......@@ -52,6 +52,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_SWAVE 0x1000
#define BTUSB_INTEL_NEW 0x2000
#define BTUSB_AMP 0x4000
#define BTUSB_QCA_ROME 0x8000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
......@@ -213,6 +214,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME},
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME},
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
......@@ -338,6 +343,8 @@ struct btusb_data {
int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
int (*setup_on_usb)(struct hci_dev *hdev);
};
static inline void btusb_free_frags(struct btusb_data *data)
......@@ -879,6 +886,15 @@ static int btusb_open(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
/* Patching USB firmware files prior to starting any URBs of HCI path
* It is more safe to use USB bulk channel for downloading USB patch
*/
if (data->setup_on_usb) {
err = data->setup_on_usb(hdev);
if (err <0)
return err;
}
err = usb_autopm_get_interface(data->intf);
if (err < 0)
return err;
......@@ -1254,6 +1270,28 @@ static void btusb_waker(struct work_struct *work)
usb_autopm_put_interface(data->intf);
}
static struct sk_buff *btusb_read_local_version(struct hci_dev *hdev)
{
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
hdev->name, PTR_ERR(skb));
return skb;
}
if (skb->len != sizeof(struct hci_rp_read_local_version)) {
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
hdev->name);
kfree_skb(skb);
return ERR_PTR(-EIO);
}
return skb;
}
static int btusb_setup_bcm92035(struct hci_dev *hdev)
{
struct sk_buff *skb;
......@@ -1278,12 +1316,9 @@ static int btusb_setup_csr(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("Reading local version failed (%ld)", -PTR_ERR(skb));
skb = btusb_read_local_version(hdev);
if (IS_ERR(skb))
return -PTR_ERR(skb);
}
rp = (struct hci_rp_read_local_version *)skb->data;
......@@ -2414,21 +2449,9 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
kfree_skb(skb);
/* Read Local Version Info */
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
hdev->name, ret);
return ret;
}
if (skb->len != sizeof(*ver)) {
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
hdev->name);
kfree_skb(skb);
return -EIO;
}
skb = btusb_read_local_version(hdev);
if (IS_ERR(skb))
return PTR_ERR(skb);
ver = (struct hci_rp_read_local_version *)skb->data;
rev = le16_to_cpu(ver->hci_rev);
......@@ -2516,20 +2539,9 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
kfree_skb(skb);
/* Read Local Version Info */
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
skb = btusb_read_local_version(hdev);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
hdev->name, ret);
goto done;
}
if (skb->len != sizeof(*ver)) {
BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
hdev->name);
kfree_skb(skb);
ret = -EIO;
goto done;
}
......@@ -2628,6 +2640,258 @@ static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
return 0;
}
#define QCA_DFU_PACKET_LEN 4096
#define QCA_GET_TARGET_VERSION 0x09
#define QCA_CHECK_STATUS 0x05
#define QCA_DFU_DOWNLOAD 0x01
#define QCA_SYSCFG_UPDATED 0x40
#define QCA_PATCH_UPDATED 0x80
#define QCA_DFU_TIMEOUT 3000
struct qca_version {
__le32 rom_version;
__le32 patch_version;
__le32 ram_version;
__le32 ref_clock;
__u8 reserved[4];
} __packed;
struct qca_rampatch_version {
__le16 rom_version;
__le16 patch_version;
} __packed;
struct qca_device_info {
u32 rom_version;
u8 rampatch_hdr; /* length of header in rampatch */
u8 nvm_hdr; /* length of header in NVM */
u8 ver_offset; /* offset of version structure in rampatch */
};
static const struct qca_device_info qca_devices_table[] = {
{ 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
{ 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
{ 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
{ 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
{ 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
};
static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request,
void *data, u16 size)
{
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
int pipe, err;
u8 *buf;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Found some of USB hosts have IOT issues with ours so that we should
* not wait until HCI layer is ready.
*/
pipe = usb_rcvctrlpipe(udev, 0);
err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN,
0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
if (err < 0) {
BT_ERR("%s: Failed to access otp area (%d)", hdev->name, err);
goto done;
}
memcpy(data, buf, size);
done:
kfree(buf);
return err;
}
static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
const struct firmware *firmware,
size_t hdr_size)
{
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
size_t count, size, sent = 0;
int pipe, len, err;
u8 *buf;
buf = kmalloc(QCA_DFU_PACKET_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
count = firmware->size;
size = min_t(size_t, count, hdr_size);
memcpy(buf, firmware->data, size);
/* USB patches should go down to controller through USB path
* because binary format fits to go down through USB channel.
* USB control path is for patching headers and USB bulk is for
* patch body.
*/
pipe = usb_sndctrlpipe(udev, 0);
err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR,
0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
if (err < 0) {
BT_ERR("%s: Failed to send headers (%d)", hdev->name, err);
goto done;
}
sent += size;
count -= size;
while (count) {
size = min_t(size_t, count, QCA_DFU_PACKET_LEN);
memcpy(buf, firmware->data + sent, size);
pipe = usb_sndbulkpipe(udev, 0x02);
err = usb_bulk_msg(udev, pipe, buf, size, &len,
QCA_DFU_TIMEOUT);
if (err < 0) {
BT_ERR("%s: Failed to send body at %zd of %zd (%d)",
hdev->name, sent, firmware->size, err);
break;
}
if (size != len) {
BT_ERR("%s: Failed to get bulk buffer", hdev->name);
err = -EILSEQ;
break;
}
sent += size;
count -= size;
}
done:
kfree(buf);
return err;
}
static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
struct qca_version *ver,
const struct qca_device_info *info)
{
struct qca_rampatch_version *rver;
const struct firmware *fw;
u32 ver_rom, ver_patch;
u16 rver_rom, rver_patch;
char fwname[64];
int err;
ver_rom = le32_to_cpu(ver->rom_version);
ver_patch = le32_to_cpu(ver->patch_version);
snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
BT_ERR("%s: failed to request rampatch file: %s (%d)",
hdev->name, fwname, err);
return err;
}
BT_INFO("%s: using rampatch file: %s", hdev->name, fwname);
rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset);
rver_rom = le16_to_cpu(rver->rom_version);
rver_patch = le16_to_cpu(rver->patch_version);
BT_INFO("%s: QCA: patch rome 0x%x build 0x%x, firmware rome 0x%x "
"build 0x%x", hdev->name, rver_rom, rver_patch, ver_rom,
ver_patch);
if (rver_rom != ver_rom || rver_patch <= ver_patch) {
BT_ERR("%s: rampatch file version did not match with firmware",
hdev->name);
err = -EINVAL;
goto done;
}
err = btusb_setup_qca_download_fw(hdev, fw, info->rampatch_hdr);
done:
release_firmware(fw);
return err;
}
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
struct qca_version *ver,
const struct qca_device_info *info)
{
const struct firmware *fw;
char fwname[64];
int err;
snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
le32_to_cpu(ver->rom_version));
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
BT_ERR("%s: failed to request NVM file: %s (%d)",
hdev->name, fwname, err);
return err;
}
BT_INFO("%s: using NVM file: %s", hdev->name, fwname);
err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr);
release_firmware(fw);
return err;
}
static int btusb_setup_qca(struct hci_dev *hdev)
{
const struct qca_device_info *info = NULL;
struct qca_version ver;
u32 ver_rom;
u8 status;
int i, err;
err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver,
sizeof(ver));
if (err < 0)
return err;
ver_rom = le32_to_cpu(ver.rom_version);
for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) {
if (ver_rom == qca_devices_table[i].rom_version)
info = &qca_devices_table[i];
}
if (!info) {
BT_ERR("%s: don't support firmware rome 0x%x", hdev->name,
ver_rom);
return -ENODEV;
}
err = btusb_qca_send_vendor_req(hdev, QCA_CHECK_STATUS, &status,
sizeof(status));
if (err < 0)
return err;
if (!(status & QCA_PATCH_UPDATED)) {
err = btusb_setup_qca_load_rampatch(hdev, &ver, info);
if (err < 0)
return err;
}
if (!(status & QCA_SYSCFG_UPDATED)) {
err = btusb_setup_qca_load_nvm(hdev, &ver, info);
if (err < 0)
return err;
}
return 0;
}
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
......@@ -2781,6 +3045,11 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
}
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;
......
......@@ -19,6 +19,8 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
......@@ -52,11 +54,21 @@ struct at86rf2xx_chip_data {
int (*get_desense_steps)(struct at86rf230_local *, s32);
};
#define AT86RF2XX_MAX_BUF (127 + 3)
#define AT86RF2XX_MAX_BUF (127 + 3)
/* tx retries to access the TX_ON state
* if it's above then force change will be started.
*
* We assume the max_frame_retries (7) value of 802.15.4 here.
*/
#define AT86RF2XX_MAX_TX_RETRIES 7
/* We use the recommended 5 minutes timeout to recalibrate */
#define AT86RF2XX_CAL_LOOP_TIMEOUT (5 * 60 * HZ)
struct at86rf230_state_change {
struct at86rf230_local *lp;
int irq;
struct hrtimer timer;
struct spi_message msg;
struct spi_transfer trx;
u8 buf[AT86RF2XX_MAX_BUF];
......@@ -81,10 +93,12 @@ struct at86rf230_local {
struct at86rf230_state_change irq;
bool tx_aret;
unsigned long cal_timeout;
s8 max_frame_retries;
bool is_tx;
/* spinlock for is_tx protection */
spinlock_t lock;
u8 tx_retry;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
};
......@@ -407,6 +421,8 @@ at86rf230_reg_volatile(struct device *dev, unsigned int reg)
case RG_PHY_ED_LEVEL:
case RG_IRQ_STATUS:
case RG_VREG_CTRL:
case RG_PLL_CF:
case RG_PLL_DCU:
return true;
default:
return false;
......@@ -470,18 +486,25 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
u8 *tx_buf = ctx->buf;
tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
ctx->trx.len = 2;
ctx->msg.complete = complete;
ctx->irq_enable = irq_enable;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
if (irq_enable)
enable_irq(lp->spi->irq);
enable_irq(ctx->irq);
at86rf230_async_error(lp, ctx, rc);
}
}
static inline u8 at86rf230_state_to_force(u8 state)
{
if (state == STATE_TX_ON)
return STATE_FORCE_TX_ON;
else
return STATE_FORCE_TRX_OFF;
}
static void
at86rf230_async_state_assert(void *context)
{
......@@ -512,10 +535,21 @@ at86rf230_async_state_assert(void *context)
* in STATE_BUSY_RX_AACK, we run a force state change
* to STATE_TX_ON. This is a timeout handling, if the
* transceiver stucks in STATE_BUSY_RX_AACK.
*
* Additional we do several retries to try to get into
* TX_ON state without forcing. If the retries are
* higher or equal than AT86RF2XX_MAX_TX_RETRIES we
* will do a force change.
*/
if (ctx->to_state == STATE_TX_ON) {
at86rf230_async_state_change(lp, ctx,
STATE_FORCE_TX_ON,
if (ctx->to_state == STATE_TX_ON ||
ctx->to_state == STATE_TRX_OFF) {
u8 state = ctx->to_state;
if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES)
state = at86rf230_state_to_force(state);
lp->tx_retry++;
at86rf230_async_state_change(lp, ctx, state,
ctx->complete,
ctx->irq_enable);
return;
......@@ -531,6 +565,19 @@ at86rf230_async_state_assert(void *context)
ctx->complete(context);
}
static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer)
{
struct at86rf230_state_change *ctx =
container_of(timer, struct at86rf230_state_change, timer);
struct at86rf230_local *lp = ctx->lp;
at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
at86rf230_async_state_assert,
ctx->irq_enable);
return HRTIMER_NORESTART;
}
/* Do state change timing delay. */
static void
at86rf230_async_state_delay(void *context)
......@@ -539,6 +586,7 @@ at86rf230_async_state_delay(void *context)
struct at86rf230_local *lp = ctx->lp;
struct at86rf2xx_chip_data *c = lp->data;
bool force = false;
ktime_t tim;
/* The force state changes are will show as normal states in the
* state status subregister. We change the to_state to the
......@@ -562,11 +610,15 @@ at86rf230_async_state_delay(void *context)
case STATE_TRX_OFF:
switch (ctx->to_state) {
case STATE_RX_AACK_ON:
usleep_range(c->t_off_to_aack, c->t_off_to_aack + 10);
tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
goto change;
case STATE_TX_ON:
usleep_range(c->t_off_to_tx_on,
c->t_off_to_tx_on + 10);
tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
/* state change from TRX_OFF to TX_ON to do a
* calibration, we need to reset the timeout for the
* next one.
*/
lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
goto change;
default:
break;
......@@ -574,14 +626,15 @@ at86rf230_async_state_delay(void *context)
break;
case STATE_BUSY_RX_AACK:
switch (ctx->to_state) {
case STATE_TRX_OFF:
case STATE_TX_ON:
/* Wait for worst case receiving time if we
* didn't make a force change from BUSY_RX_AACK
* to TX_ON.
* to TX_ON or TRX_OFF.
*/
if (!force) {
usleep_range(c->t_frame + c->t_p_ack,
c->t_frame + c->t_p_ack + 1000);
tim = ktime_set(0, (c->t_frame + c->t_p_ack) *
NSEC_PER_USEC);
goto change;
}
break;
......@@ -593,7 +646,7 @@ at86rf230_async_state_delay(void *context)
case STATE_P_ON:
switch (ctx->to_state) {
case STATE_TRX_OFF:
usleep_range(c->t_reset_to_off, c->t_reset_to_off + 10);
tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC);
goto change;
default:
break;
......@@ -604,12 +657,10 @@ at86rf230_async_state_delay(void *context)
}
/* Default delay is 1us in the most cases */
udelay(1);
tim = ktime_set(0, NSEC_PER_USEC);
change:
at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
at86rf230_async_state_assert,
ctx->irq_enable);
hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
}
static void
......@@ -645,12 +696,11 @@ at86rf230_async_state_change_start(void *context)
*/
buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
buf[1] = ctx->to_state;
ctx->trx.len = 2;
ctx->msg.complete = at86rf230_async_state_delay;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
if (ctx->irq_enable)
enable_irq(lp->spi->irq);
enable_irq(ctx->irq);
at86rf230_async_error(lp, ctx, rc);
}
......@@ -708,11 +758,10 @@ at86rf230_tx_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
struct sk_buff *skb = lp->tx_skb;
enable_irq(lp->spi->irq);
enable_irq(ctx->irq);
ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret);
ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
}
static void
......@@ -721,7 +770,7 @@ at86rf230_tx_on(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
at86rf230_tx_complete, true);
}
......@@ -765,14 +814,25 @@ at86rf230_tx_trac_status(void *context)
}
static void
at86rf230_rx(struct at86rf230_local *lp,
const u8 *data, const u8 len, const u8 lqi)
at86rf230_rx_read_frame_complete(void *context)
{
struct sk_buff *skb;
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
u8 rx_local_buf[AT86RF2XX_MAX_BUF];
const u8 *buf = ctx->buf;
struct sk_buff *skb;
u8 len, lqi;
memcpy(rx_local_buf, data, len);
enable_irq(lp->spi->irq);
len = buf[1];
if (!ieee802154_is_valid_psdu_len(len)) {
dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
len = IEEE802154_MTU;
}
lqi = buf[2 + len];
memcpy(rx_local_buf, buf + 2, len);
ctx->trx.len = 2;
enable_irq(ctx->irq);
skb = dev_alloc_skb(IEEE802154_MTU);
if (!skb) {
......@@ -785,51 +845,34 @@ at86rf230_rx(struct at86rf230_local *lp,
}
static void
at86rf230_rx_read_frame_complete(void *context)
at86rf230_rx_read_frame(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = lp->irq.buf;
u8 len = buf[1];
if (!ieee802154_is_valid_psdu_len(len)) {
dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
len = IEEE802154_MTU;
}
at86rf230_rx(lp, buf + 2, len, buf[2 + len]);
}
static void
at86rf230_rx_read_frame(struct at86rf230_local *lp)
{
u8 *buf = ctx->buf;
int rc;
u8 *buf = lp->irq.buf;
buf[0] = CMD_FB;
lp->irq.trx.len = AT86RF2XX_MAX_BUF;
lp->irq.msg.complete = at86rf230_rx_read_frame_complete;
rc = spi_async(lp->spi, &lp->irq.msg);
ctx->trx.len = AT86RF2XX_MAX_BUF;
ctx->msg.complete = at86rf230_rx_read_frame_complete;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
enable_irq(lp->spi->irq);
at86rf230_async_error(lp, &lp->irq, rc);
ctx->trx.len = 2;
enable_irq(ctx->irq);
at86rf230_async_error(lp, ctx, rc);
}
}
static void
at86rf230_rx_trac_check(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
/* Possible check on trac status here. This could be useful to make
* some stats why receive is failed. Not used at the moment, but it's
* maybe timing relevant. Datasheet doesn't say anything about this.
* The programming guide say do it so.
*/
at86rf230_rx_read_frame(lp);
at86rf230_rx_read_frame(context);
}
static void
......@@ -862,13 +905,13 @@ at86rf230_irq_status(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
const u8 *buf = lp->irq.buf;
const u8 *buf = ctx->buf;
const u8 irq = buf[1];
if (irq & IRQ_TRX_END) {
at86rf230_irq_trx_end(lp);
} else {
enable_irq(lp->spi->irq);
enable_irq(ctx->irq);
dev_err(&lp->spi->dev, "not supported irq %02x received\n",
irq);
}
......@@ -884,7 +927,6 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
disable_irq_nosync(irq);
buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
ctx->trx.len = 2;
ctx->msg.complete = at86rf230_irq_status;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
......@@ -919,7 +961,7 @@ at86rf230_write_frame(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
struct sk_buff *skb = lp->tx_skb;
u8 *buf = lp->tx.buf;
u8 *buf = ctx->buf;
int rc;
spin_lock(&lp->lock);
......@@ -929,11 +971,13 @@ at86rf230_write_frame(void *context)
buf[0] = CMD_FB | CMD_WRITE;
buf[1] = skb->len + 2;
memcpy(buf + 2, skb->data, skb->len);
lp->tx.trx.len = skb->len + 2;
lp->tx.msg.complete = at86rf230_write_frame_complete;
rc = spi_async(lp->spi, &lp->tx.msg);
if (rc)
ctx->trx.len = skb->len + 2;
ctx->msg.complete = at86rf230_write_frame_complete;
rc = spi_async(lp->spi, &ctx->msg);
if (rc) {
ctx->trx.len = 2;
at86rf230_async_error(lp, ctx, rc);
}
}
static void
......@@ -946,24 +990,45 @@ at86rf230_xmit_tx_on(void *context)
at86rf230_write_frame, false);
}
static void
at86rf230_xmit_start(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
/* In ARET mode we need to go into STATE_TX_ARET_ON after we
* are in STATE_TX_ON. The pfad differs here, so we change
* the complete handler.
*/
if (lp->tx_aret)
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
at86rf230_xmit_tx_on, false);
else
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
at86rf230_write_frame, false);
}
static int
at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
struct at86rf230_local *lp = hw->priv;
struct at86rf230_state_change *ctx = &lp->tx;
void (*tx_complete)(void *context) = at86rf230_write_frame;
lp->tx_skb = skb;
lp->tx_retry = 0;
/* In ARET mode we need to go into STATE_TX_ARET_ON after we
* are in STATE_TX_ON. The pfad differs here, so we change
* the complete handler.
/* After 5 minutes in PLL and the same frequency we run again the
* calibration loops which is recommended by at86rf2xx datasheets.
*
* The calibration is initiate by a state change from TRX_OFF
* to TX_ON, the lp->cal_timeout should be reinit by state_delay
* function then to start in the next 5 minutes.
*/
if (lp->tx_aret)
tx_complete = at86rf230_xmit_tx_on;
at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false);
if (time_is_before_jiffies(lp->cal_timeout))
at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
at86rf230_xmit_start, false);
else
at86rf230_xmit_start(ctx);
return 0;
}
......@@ -979,6 +1044,9 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
static int
at86rf230_start(struct ieee802154_hw *hw)
{
struct at86rf230_local *lp = hw->priv;
lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
}
......@@ -1059,6 +1127,8 @@ at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
/* Wait for PLL */
usleep_range(lp->data->t_channel_switch,
lp->data->t_channel_switch + 10);
lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
return rc;
}
......@@ -1528,25 +1598,37 @@ static void
at86rf230_setup_spi_messages(struct at86rf230_local *lp)
{
lp->state.lp = lp;
lp->state.irq = lp->spi->irq;
spi_message_init(&lp->state.msg);
lp->state.msg.context = &lp->state;
lp->state.trx.len = 2;
lp->state.trx.tx_buf = lp->state.buf;
lp->state.trx.rx_buf = lp->state.buf;
spi_message_add_tail(&lp->state.trx, &lp->state.msg);
hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
lp->state.timer.function = at86rf230_async_state_timer;
lp->irq.lp = lp;
lp->irq.irq = lp->spi->irq;
spi_message_init(&lp->irq.msg);
lp->irq.msg.context = &lp->irq;
lp->irq.trx.len = 2;
lp->irq.trx.tx_buf = lp->irq.buf;
lp->irq.trx.rx_buf = lp->irq.buf;
spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
lp->irq.timer.function = at86rf230_async_state_timer;
lp->tx.lp = lp;
lp->tx.irq = lp->spi->irq;
spi_message_init(&lp->tx.msg);
lp->tx.msg.context = &lp->tx;
lp->tx.trx.len = 2;
lp->tx.trx.tx_buf = lp->tx.buf;
lp->tx.trx.rx_buf = lp->tx.buf;
spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
lp->tx.timer.function = at86rf230_async_state_timer;
}
static int at86rf230_probe(struct spi_device *spi)
......@@ -1555,7 +1637,7 @@ static int at86rf230_probe(struct spi_device *spi)
struct at86rf230_local *lp;
unsigned int status;
int rc, irq_type, rstn, slp_tr;
u8 xtal_trim;
u8 xtal_trim = 0;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
......
......@@ -30,6 +30,7 @@
#define IEEE802154_MTU 127
#define IEEE802154_ACK_PSDU_LEN 5
#define IEEE802154_MIN_PSDU_LEN 9
#define IEEE802154_FCS_LEN 2
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
......@@ -39,6 +40,7 @@
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
#define IEEE802154_MAX_CHANNEL 26
#define IEEE802154_MAX_PAGE 31
......
......@@ -354,6 +354,9 @@ void l2cap_exit(void);
int sco_init(void);
void sco_exit(void);
int mgmt_init(void);
void mgmt_exit(void);
void bt_sock_reclassify_lock(struct sock *sk, int proto);
#endif /* __BLUETOOTH_H */
......@@ -179,15 +179,6 @@ enum {
HCI_RESET,
};
/* BR/EDR and/or LE controller flags: the flags defined here should represent
* states configured via debugfs for debugging and testing purposes only.
*/
enum {
HCI_DUT_MODE,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
};
/*
* BR/EDR and/or LE controller flags: the flags defined here should represent
* states from the controller.
......@@ -217,6 +208,7 @@ enum {
HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
HCI_ADVERTISING_CONNECTABLE,
HCI_CONNECTABLE,
HCI_DISCOVERABLE,
HCI_LIMITED_DISCOVERABLE,
......@@ -225,13 +217,13 @@ enum {
HCI_FAST_CONNECTABLE,
HCI_BREDR_ENABLED,
HCI_LE_SCAN_INTERRUPTED,
};
/* A mask for the flags that are supposed to remain when a reset happens
* or the HCI device is closed.
*/
#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV))
HCI_DUT_MODE,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
__HCI_NUM_FLAGS,
};
/* HCI timeouts */
#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
......
......@@ -76,6 +76,7 @@ struct discovery_state {
u8 last_adv_data[HCI_MAX_AD_LENGTH];
u8 last_adv_data_len;
bool report_invalid_rssi;
bool result_filtering;
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
......@@ -352,8 +353,7 @@ struct hci_dev {
struct rfkill *rfkill;
unsigned long dbg_flags;
unsigned long dev_flags;
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
struct delayed_work le_scan_disable;
struct delayed_work le_scan_restart;
......@@ -501,6 +501,21 @@ extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
extern struct mutex hci_cb_list_lock;
#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
#define hci_dev_clear_volatile_flags(hdev) \
do { \
hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
hci_dev_clear_flag(hdev, HCI_LE_ADV); \
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
} while (0)
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon);
......@@ -525,6 +540,7 @@ static inline void discovery_init(struct hci_dev *hdev)
static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
{
hdev->discovery.result_filtering = false;
hdev->discovery.report_invalid_rssi = true;
hdev->discovery.rssi = HCI_RSSI_INVALID;
hdev->discovery.uuid_count = 0;
......@@ -596,14 +612,14 @@ enum {
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
}
......@@ -965,6 +981,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
void hci_smp_irks_clear(struct hci_dev *hdev);
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
void hci_remote_oob_data_clear(struct hci_dev *hdev);
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 bdaddr_type);
......@@ -1021,10 +1039,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
!hci_dev_test_flag(dev, HCI_AUTO_OFF))
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED))
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
......@@ -1271,6 +1289,27 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define HCI_MGMT_VAR_LEN (1 << 0)
#define HCI_MGMT_NO_HDEV (1 << 1)
#define HCI_MGMT_UNCONFIGURED (1 << 2)
struct hci_mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
size_t data_len;
unsigned long flags;
};
struct hci_mgmt_chan {
struct list_head list;
unsigned short channel;
size_t handler_count;
const struct hci_mgmt_handler *handlers;
};
int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
/* Management interface */
#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
......@@ -1290,7 +1329,9 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
struct msghdr *msg, size_t msglen);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
......
......@@ -43,6 +43,7 @@
#define MGMT_STATUS_CANCELLED 0x10
#define MGMT_STATUS_INVALID_INDEX 0x11
#define MGMT_STATUS_RFKILLED 0x12
#define MGMT_STATUS_ALREADY_PAIRED 0x13
struct mgmt_hdr {
__le16 opcode;
......@@ -98,6 +99,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_DEBUG_KEYS 0x00001000
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
......
......@@ -749,6 +749,13 @@ static int __init bt_init(void)
goto sock_err;
}
err = mgmt_init();
if (err < 0) {
sco_exit();
l2cap_exit();
goto sock_err;
}
return 0;
sock_err:
......@@ -763,6 +770,8 @@ static int __init bt_init(void)
static void __exit bt_exit(void)
{
mgmt_exit();
sco_exit();
l2cap_exit();
......
......@@ -571,7 +571,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) ||
test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
d->dev_type != HCI_BREDR)
continue;
......@@ -700,7 +700,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
clear_bit(HCI_LE_ADV, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_LE_ADV);
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
......@@ -734,7 +734,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
int err;
/* Let's make sure that le is enabled.*/
if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
if (lmp_le_capable(hdev))
return ERR_PTR(-ECONNREFUSED);
......@@ -799,7 +799,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* anyway have to disable it in order to start directed
* advertising.
*/
if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
u8 enable = 0x00;
hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
&enable);
......@@ -810,7 +810,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
/* If we're active scanning most controllers are unable
* to initiate advertising. Simply reject the attempt.
*/
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
hdev->le_scan_type == LE_SCAN_ACTIVE) {
skb_queue_purge(&req.cmd_q);
hci_conn_del(conn);
......@@ -840,9 +840,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* handler for scan disabling knows to set the correct discovery
* state.
*/
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(&req);
set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
hci_req_add_le_create_conn(&req, conn);
......@@ -864,7 +864,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
{
struct hci_conn *acl;
if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
if (lmp_bredr_capable(hdev))
return ERR_PTR(-ECONNREFUSED);
......@@ -942,7 +942,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
* Connections is used and the link is encrypted with AES-CCM
* using a P-256 authenticated combination key.
*/
if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
if (!hci_conn_sc_enabled(conn) ||
!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
......
......@@ -80,7 +80,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
......@@ -106,7 +106,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
if (strtobool(buf, &enable))
return -EINVAL;
if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
return -EALREADY;
hci_req_lock(hdev);
......@@ -127,7 +127,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
if (err < 0)
return err;
change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
hci_dev_change_flag(hdev, HCI_DUT_MODE);
return count;
}
......@@ -501,7 +501,7 @@ static void le_setup(struct hci_request *req)
/* LE-only controllers have LE implicitly enabled */
if (!lmp_bredr_capable(hdev))
set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_LE_ENABLED);
}
static void hci_setup_event_mask(struct hci_request *req)
......@@ -591,7 +591,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
if (lmp_bredr_capable(hdev))
bredr_setup(req);
else
clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
if (lmp_le_capable(hdev))
le_setup(req);
......@@ -617,7 +617,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
*/
hdev->max_page = 0x01;
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
u8 mode = 0x01;
hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
......@@ -656,7 +656,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
sizeof(cp), &cp);
}
if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
u8 enable = 1;
hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
&enable);
......@@ -693,7 +693,7 @@ static void hci_set_le_support(struct hci_request *req)
memset(&cp, 0, sizeof(cp));
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
cp.le = 0x01;
cp.simul = 0x00;
}
......@@ -881,7 +881,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
/* Enable Secure Connections if supported and configured */
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
bredr_sc_enabled(hdev)) {
u8 support = 0x01;
......@@ -901,7 +901,7 @@ static int __hci_init(struct hci_dev *hdev)
/* The Device Under Test (DUT) mode is special and available for
* all controller types. So just create it early on.
*/
if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_SETUP)) {
debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
&dut_mode_fops);
}
......@@ -937,8 +937,8 @@ static int __hci_init(struct hci_dev *hdev)
* So only when in setup phase or config phase, create the debugfs
* entries and register the SMP channels.
*/
if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
!test_bit(HCI_CONFIG, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG))
return 0;
hci_debugfs_create_common(hdev);
......@@ -1300,12 +1300,12 @@ int hci_inquiry(void __user *arg)
if (!hdev)
return -ENODEV;
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
err = -EOPNOTSUPP;
goto done;
}
......@@ -1315,7 +1315,7 @@ int hci_inquiry(void __user *arg)
goto done;
}
if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
}
......@@ -1387,17 +1387,17 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hci_req_lock(hdev);
if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
ret = -ENODEV;
goto done;
}
if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
!test_bit(HCI_CONFIG, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG)) {
/* Check for rfkill but allow the HCI setup stage to
* proceed (which in itself doesn't cause any RF activity).
*/
if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
ret = -ERFKILL;
goto done;
}
......@@ -1414,7 +1414,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
* This check is only valid for BR/EDR controllers
* since AMP controllers do not have an address.
*/
if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hdev->dev_type == HCI_BREDR &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY)) {
......@@ -1436,7 +1436,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_SETUP)) {
if (hdev->setup)
ret = hdev->setup(hdev);
......@@ -1448,7 +1448,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
*/
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
/* For an unconfigured controller it is required to
* read at least the version information provided by
......@@ -1458,11 +1458,11 @@ static int hci_dev_do_open(struct hci_dev *hdev)
* also the original Bluetooth public device address
* will be read using the Read BD Address command.
*/
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
ret = __hci_unconf_init(hdev);
}
if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
/* If public address change is configured, ensure that
* the address gets programmed. If the driver does not
* support changing the public address, fail the power
......@@ -1476,8 +1476,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
}
if (!ret) {
if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
ret = __hci_init(hdev);
}
......@@ -1485,13 +1485,13 @@ static int hci_dev_do_open(struct hci_dev *hdev)
if (!ret) {
hci_dev_hold(hdev);
set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
!test_bit(HCI_CONFIG, &hdev->dev_flags) &&
!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hdev->dev_type == HCI_BREDR) {
hci_dev_lock(hdev);
mgmt_powered(hdev, 1);
......@@ -1543,8 +1543,8 @@ int hci_dev_open(__u16 dev)
* HCI_USER_CHANNEL will be set first before attempting to
* open the device.
*/
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EOPNOTSUPP;
goto done;
}
......@@ -1554,7 +1554,7 @@ int hci_dev_open(__u16 dev)
* particularly important if the setup procedure has not yet
* completed.
*/
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
cancel_delayed_work(&hdev->power_off);
/* After this call it is guaranteed that the setup procedure
......@@ -1569,9 +1569,9 @@ int hci_dev_open(__u16 dev)
* is in use this bit will be cleared again and userspace has
* to explicitly enable it.
*/
if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
!test_bit(HCI_MGMT, &hdev->dev_flags))
set_bit(HCI_BONDABLE, &hdev->dev_flags);
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
!hci_dev_test_flag(hdev, HCI_MGMT))
hci_dev_set_flag(hdev, HCI_BONDABLE);
err = hci_dev_do_open(hdev);
......@@ -1601,7 +1601,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
{
BT_DBG("%s %p", hdev->name, hdev);
if (!test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
/* Execute vendor specific shutdown routine */
if (hdev->shutdown)
hdev->shutdown(hdev);
......@@ -1625,17 +1625,17 @@ static int hci_dev_do_close(struct hci_dev *hdev)
if (hdev->discov_timeout > 0) {
cancel_delayed_work(&hdev->discov_off);
hdev->discov_timeout = 0;
clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
}
if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
cancel_delayed_work(&hdev->service_cache);
cancel_delayed_work_sync(&hdev->le_scan_disable);
cancel_delayed_work_sync(&hdev->le_scan_restart);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
cancel_delayed_work_sync(&hdev->rpa_expired);
/* Avoid potential lockdep warnings from the *_flush() calls by
......@@ -1647,7 +1647,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
if (hdev->dev_type == HCI_BREDR)
mgmt_powered(hdev, 0);
}
......@@ -1667,8 +1667,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
......@@ -1699,7 +1699,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Clear flags */
hdev->flags &= BIT(HCI_RAW);
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
hci_dev_clear_volatile_flags(hdev);
/* Controller radio is available but is currently powered down */
hdev->amp_status = AMP_STATUS_POWERED_DOWN;
......@@ -1723,12 +1723,12 @@ int hci_dev_close(__u16 dev)
if (!hdev)
return -ENODEV;
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
cancel_delayed_work(&hdev->power_off);
err = hci_dev_do_close(hdev);
......@@ -1786,12 +1786,12 @@ int hci_dev_reset(__u16 dev)
goto done;
}
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
err = -EOPNOTSUPP;
goto done;
}
......@@ -1812,12 +1812,12 @@ int hci_dev_reset_stat(__u16 dev)
if (!hdev)
return -ENODEV;
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
ret = -EBUSY;
goto done;
}
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
ret = -EOPNOTSUPP;
goto done;
}
......@@ -1836,29 +1836,29 @@ static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
BT_DBG("%s scan 0x%02x", hdev->name, scan);
if ((scan & SCAN_PAGE))
conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
&hdev->dev_flags);
conn_changed = !hci_dev_test_and_set_flag(hdev,
HCI_CONNECTABLE);
else
conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
&hdev->dev_flags);
conn_changed = hci_dev_test_and_clear_flag(hdev,
HCI_CONNECTABLE);
if ((scan & SCAN_INQUIRY)) {
discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
&hdev->dev_flags);
discov_changed = !hci_dev_test_and_set_flag(hdev,
HCI_DISCOVERABLE);
} else {
clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
&hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
discov_changed = hci_dev_test_and_clear_flag(hdev,
HCI_DISCOVERABLE);
}
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
return;
if (conn_changed || discov_changed) {
/* In case this was disabled through mgmt */
set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
mgmt_update_adv_data(hdev);
mgmt_new_settings(hdev);
......@@ -1878,12 +1878,12 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!hdev)
return -ENODEV;
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY;
goto done;
}
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
err = -EOPNOTSUPP;
goto done;
}
......@@ -1893,7 +1893,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
goto done;
}
if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
}
......@@ -1997,7 +1997,7 @@ int hci_get_dev_list(void __user *arg)
* is running, but in that case still indicate that the
* device is actually down.
*/
if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags &= ~BIT(HCI_UP);
(dr + n)->dev_id = hdev->id;
......@@ -2035,7 +2035,7 @@ int hci_get_dev_info(void __user *arg)
* is running, but in that case still indicate that the
* device is actually down.
*/
if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags = hdev->flags & ~BIT(HCI_UP);
else
flags = hdev->flags;
......@@ -2078,16 +2078,16 @@ static int hci_rfkill_set_block(void *data, bool blocked)
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
if (blocked) {
set_bit(HCI_RFKILLED, &hdev->dev_flags);
if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
!test_bit(HCI_CONFIG, &hdev->dev_flags))
hci_dev_set_flag(hdev, HCI_RFKILLED);
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG))
hci_dev_do_close(hdev);
} else {
clear_bit(HCI_RFKILLED, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_RFKILLED);
}
return 0;
......@@ -2116,23 +2116,23 @@ static void hci_power_on(struct work_struct *work)
* ignored and they need to be checked now. If they are still
* valid, it is important to turn the device back off.
*/
if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
(hdev->dev_type == HCI_BREDR &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_do_close(hdev);
} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
HCI_AUTO_OFF_TIMEOUT);
}
if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
/* For unconfigured devices, set the HCI_RAW flag
* so that userspace can easily identify them.
*/
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
set_bit(HCI_RAW, &hdev->flags);
/* For fully configured devices, this will send
......@@ -2143,11 +2143,11 @@ static void hci_power_on(struct work_struct *work)
* and no event will be send.
*/
mgmt_index_added(hdev);
} else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
/* When the controller is now configured, then it
* is important to clear the HCI_RAW flag.
*/
if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
clear_bit(HCI_RAW, &hdev->flags);
/* Powering on the controller with HCI_CONFIG set only
......@@ -2516,6 +2516,42 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
}
}
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
struct smp_ltk *k;
struct smp_irk *irk;
u8 addr_type;
if (type == BDADDR_BREDR) {
if (hci_find_link_key(hdev, bdaddr))
return true;
return false;
}
/* Convert to HCI addr type which struct smp_ltk uses */
if (type == BDADDR_LE_PUBLIC)
addr_type = ADDR_LE_DEV_PUBLIC;
else
addr_type = ADDR_LE_DEV_RANDOM;
irk = hci_get_irk(hdev, bdaddr, addr_type);
if (irk) {
bdaddr = &irk->bdaddr;
addr_type = irk->addr_type;
}
rcu_read_lock();
list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
/* HCI command timer function */
static void hci_cmd_timeout(struct work_struct *work)
{
......@@ -2950,7 +2986,7 @@ static void le_scan_restart_work(struct work_struct *work)
BT_DBG("%s", hdev->name);
/* If controller is not scanning we are done. */
if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
hci_req_init(&req, hdev);
......@@ -2983,9 +3019,9 @@ static void le_scan_restart_work(struct work_struct *work)
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 *bdaddr_type)
{
if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
(!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
bacmp(&hdev->static_addr, BDADDR_ANY))) {
bacpy(bdaddr, &hdev->static_addr);
*bdaddr_type = ADDR_LE_DEV_RANDOM;
......@@ -3153,16 +3189,16 @@ int hci_register_dev(struct hci_dev *hdev)
}
if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
set_bit(HCI_RFKILLED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_RFKILLED);
set_bit(HCI_SETUP, &hdev->dev_flags);
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_SETUP);
hci_dev_set_flag(hdev, HCI_AUTO_OFF);
if (hdev->dev_type == HCI_BREDR) {
/* Assume BR/EDR support until proven otherwise (such as
* through reading supported features during init.
*/
set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
}
write_lock(&hci_dev_list_lock);
......@@ -3173,7 +3209,7 @@ int hci_register_dev(struct hci_dev *hdev)
* and should not be included in normal operation.
*/
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
......@@ -3199,7 +3235,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
set_bit(HCI_UNREGISTER, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_UNREGISTER);
id = hdev->id;
......@@ -3215,8 +3251,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on);
if (!test_bit(HCI_INIT, &hdev->flags) &&
!test_bit(HCI_SETUP, &hdev->dev_flags) &&
!test_bit(HCI_CONFIG, &hdev->dev_flags)) {
!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG)) {
hci_dev_lock(hdev);
mgmt_index_removed(hdev);
hci_dev_unlock(hdev);
......@@ -3890,7 +3926,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
{
if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!cnt && time_after(jiffies, hdev->acl_last_tx +
......@@ -4073,7 +4109,7 @@ static void hci_sched_le(struct hci_dev *hdev)
if (!hci_conn_num(hdev, LE_LINK))
return;
if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->le_cnt && hdev->le_pkts &&
......@@ -4121,7 +4157,7 @@ static void hci_tx_work(struct work_struct *work)
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
/* Schedule queues and send stuff to HCI driver */
hci_sched_acl(hdev);
hci_sched_sco(hdev);
......@@ -4318,7 +4354,7 @@ static void hci_rx_work(struct work_struct *work)
hci_send_to_sock(hdev, skb);
}
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
kfree_skb(skb);
continue;
}
......
......@@ -247,7 +247,7 @@ static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
buf[0] = hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
......@@ -265,7 +265,7 @@ static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
struct hci_dev *hdev = file->private_data;
char buf[3];
buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
buf[0] = hci_dev_test_flag(hdev, HCI_SC_ONLY) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
......@@ -679,7 +679,7 @@ static ssize_t force_static_address_read(struct file *file,
struct hci_dev *hdev = file->private_data;
char buf[3];
buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
......@@ -704,10 +704,10 @@ static ssize_t force_static_address_write(struct file *file,
if (strtobool(buf, &enable))
return -EINVAL;
if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
if (enable == hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR))
return -EALREADY;
change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
hci_dev_change_flag(hdev, HCI_FORCE_STATIC_ADDR);
return count;
}
......
......@@ -70,7 +70,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
}
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
......@@ -82,7 +82,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
hci_conn_check_pending(hdev);
}
......@@ -198,7 +198,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
return;
/* Reset all non-persistent flags */
hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
hci_dev_clear_volatile_flags(hdev);
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
......@@ -265,7 +265,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_set_local_name_complete(hdev, sent, status);
else if (!status)
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
......@@ -282,8 +282,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
test_bit(HCI_CONFIG, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG))
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
......@@ -309,7 +309,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_AUTH, &hdev->flags);
}
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_auth_enable_complete(hdev, status);
hci_dev_unlock(hdev);
......@@ -404,7 +404,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
if (status == 0)
memcpy(hdev->dev_class, sent, 3);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_set_class_of_dev_complete(hdev, sent, status);
hci_dev_unlock(hdev);
......@@ -497,13 +497,13 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
hdev->features[1][0] &= ~LMP_HOST_SSP;
}
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_ssp_enable_complete(hdev, sent->mode, status);
else if (!status) {
if (sent->mode)
set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
else
clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
}
hci_dev_unlock(hdev);
......@@ -529,11 +529,11 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
hdev->features[1][0] &= ~LMP_HOST_SC;
}
if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
if (sent->support)
set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_SC_ENABLED);
else
clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
}
hci_dev_unlock(hdev);
......@@ -548,8 +548,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
test_bit(HCI_CONFIG, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG)) {
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
hdev->lmp_ver = rp->lmp_ver;
......@@ -568,8 +568,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
if (rp->status)
return;
if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
test_bit(HCI_CONFIG, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG))
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
......@@ -691,7 +691,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
if (test_bit(HCI_INIT, &hdev->flags))
bacpy(&hdev->bdaddr, &rp->bdaddr);
if (test_bit(HCI_SETUP, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_SETUP))
bacpy(&hdev->setup_addr, &rp->bdaddr);
}
......@@ -900,7 +900,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
if (rp->status)
......@@ -926,7 +926,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
......@@ -985,7 +985,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
rp->status);
......@@ -1001,7 +1001,7 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
ACL_LINK, 0, rp->status);
......@@ -1016,7 +1016,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
0, rp->status);
......@@ -1032,7 +1032,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
ACL_LINK, 0, rp->status);
......@@ -1109,7 +1109,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (*sent) {
struct hci_conn *conn;
set_bit(HCI_LE_ADV, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_LE_ADV);
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (conn)
......@@ -1117,7 +1117,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
&conn->le_conn_timeout,
conn->conn_timeout);
} else {
clear_bit(HCI_LE_ADV, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_LE_ADV);
}
hci_dev_unlock(hdev);
......@@ -1192,7 +1192,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
switch (cp->enable) {
case LE_SCAN_ENABLE:
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_LE_SCAN);
if (hdev->le_scan_type == LE_SCAN_ACTIVE)
clear_pending_adv_report(hdev);
break;
......@@ -1217,7 +1217,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
*/
cancel_delayed_work(&hdev->le_scan_disable);
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_LE_SCAN);
/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
* interrupted scanning due to a connect request. Mark
......@@ -1226,10 +1226,9 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
* been disabled because of active scanning, so
* re-enable it again if necessary.
*/
if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
&hdev->dev_flags))
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
hdev->discovery.state == DISCOVERY_FINDING)
mgmt_reenable_advertising(hdev);
......@@ -1388,11 +1387,11 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
if (sent->le) {
hdev->features[1][0] |= LMP_HOST_LE;
set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_LE_ENABLED);
} else {
hdev->features[1][0] &= ~LMP_HOST_LE;
clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
hci_dev_clear_flag(hdev, HCI_ADVERTISING);
}
if (sent->simul)
......@@ -1769,7 +1768,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
if (!conn)
......@@ -2118,7 +2117,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
wake_up_bit(&hdev->flags, HCI_INQUIRY);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
return;
hci_dev_lock(hdev);
......@@ -2154,7 +2153,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (!num_rsp)
return;
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
return;
hci_dev_lock(hdev);
......@@ -2304,8 +2303,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
* connection. These features are only touched through mgmt so
* only do the checks if HCI_MGMT is set.
*/
if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
if (hci_dev_test_flag(hdev, HCI_MGMT) &&
!hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
......@@ -2542,7 +2541,7 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto check_auth;
if (ev->status == 0)
......@@ -2608,7 +2607,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
* whenever the encryption procedure fails.
*/
if (ev->status && conn->type == LE_LINK)
set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
......@@ -2626,7 +2625,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
* connections that are not encrypted with AES-CCM
* using a P-256 authenticated combination key.
*/
if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
(!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
......@@ -3331,11 +3330,11 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_drop(conn);
}
if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
!test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
} else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
u8 secure;
if (conn->pending_sec_level == BT_SECURITY_HIGH)
......@@ -3391,7 +3390,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
return;
hci_dev_lock(hdev);
......@@ -3465,7 +3464,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
conn_set_key(conn, ev->key_type, conn->pin_length);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
......@@ -3487,7 +3486,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
* store_hint being 0).
*/
if (key->type == HCI_LK_DEBUG_COMBINATION &&
!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
list_del_rcu(&key->list);
kfree_rcu(key, rcu);
goto unlock;
......@@ -3570,7 +3569,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
if (!num_rsp)
return;
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
return;
hci_dev_lock(hdev);
......@@ -3776,7 +3775,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
if (!num_rsp)
return;
if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
return;
hci_dev_lock(hdev);
......@@ -3794,7 +3793,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
data.rssi = info->rssi;
data.ssp_mode = 0x01;
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
name_known = eir_has_data_type(info->data,
sizeof(info->data),
EIR_NAME_COMPLETE);
......@@ -3898,7 +3897,7 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* information. However it can only be trusted when
* not in Secure Connection Only mode.
*/
if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
return data->present;
/* When Secure Connections Only mode is enabled, then
......@@ -3942,13 +3941,13 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_hold(conn);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
/* Allow pairing if we're pairable, the initiators of the
* pairing or if the remote is not requesting bonding.
*/
if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
......@@ -3974,7 +3973,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* If we're not bondable, force one of the non-bondable
* authentication requirement values.
*/
if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
conn->auth_type &= HCI_AT_NO_BONDING_MITM;
cp.authentication = conn->auth_type;
......@@ -4029,7 +4028,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
......@@ -4100,7 +4099,7 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
BT_DBG("%s", hdev->name);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
}
......@@ -4119,7 +4118,7 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
conn->passkey_notify = __le32_to_cpu(ev->passkey);
conn->passkey_entered = 0;
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
conn->dst_type, conn->passkey_notify,
conn->passkey_entered);
......@@ -4157,7 +4156,7 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
if (test_bit(HCI_MGMT, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
conn->dst_type, conn->passkey_notify,
conn->passkey_entered);
......@@ -4226,7 +4225,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_MGMT))
goto unlock;
data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
......@@ -4243,7 +4242,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
struct hci_cp_remote_oob_ext_data_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
memset(cp.hash192, 0, sizeof(cp.hash192));
memset(cp.rand192, 0, sizeof(cp.rand192));
} else {
......@@ -4409,7 +4408,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* All controllers implicitly stop advertising in the event of a
* connection, so ensure that the state bit is cleared.
*/
clear_bit(HCI_LE_ADV, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_LE_ADV);
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (!conn) {
......@@ -4432,7 +4431,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (conn->out) {
conn->resp_addr_type = ev->bdaddr_type;
bacpy(&conn->resp_addr, &ev->bdaddr);
if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
conn->init_addr_type = ADDR_LE_DEV_RANDOM;
bacpy(&conn->init_addr, &hdev->rpa);
} else {
......@@ -4658,7 +4657,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
/* If the controller is not using resolvable random
* addresses, then this report can be ignored.
*/
if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
return;
/* If the local IRK of the controller does not match
......
......@@ -270,7 +270,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
* and 0x01 (whitelist enabled) use the new filter policies
* 0x02 (no whitelist) and 0x03 (whitelist enabled).
*/
if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02;
......@@ -304,10 +304,10 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
* In this kind of scenario skip the update and let the random
* address be updated at the next cycle.
*/
if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
BT_DBG("Deferring random address update");
set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return;
}
......@@ -324,12 +324,12 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
* current RPA has expired or there is something else than
* the current RPA in use, then generate a new one.
*/
if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
int to;
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
!bacmp(&hdev->random_addr, &hdev->rpa))
return 0;
......@@ -383,9 +383,9 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
* and a static address has been configured, then use that
* address instead of the public BR/EDR address.
*/
if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
(!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
bacmp(&hdev->static_addr, BDADDR_ANY))) {
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (bacmp(&hdev->static_addr, &hdev->random_addr))
......@@ -425,7 +425,7 @@ void __hci_update_page_scan(struct hci_request *req)
struct hci_dev *hdev = req->hdev;
u8 scan;
if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
if (!hdev_is_powered(hdev))
......@@ -434,7 +434,7 @@ void __hci_update_page_scan(struct hci_request *req)
if (mgmt_powering_down(hdev))
return;
if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
disconnected_whitelist_entries(hdev))
scan = SCAN_PAGE;
else
......@@ -443,7 +443,7 @@ void __hci_update_page_scan(struct hci_request *req)
if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
return;
if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
scan |= SCAN_INQUIRY;
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
......@@ -471,14 +471,14 @@ void __hci_update_background_scan(struct hci_request *req)
if (!test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
test_bit(HCI_SETUP, &hdev->dev_flags) ||
test_bit(HCI_CONFIG, &hdev->dev_flags) ||
test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
test_bit(HCI_UNREGISTER, &hdev->dev_flags))
hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG) ||
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return;
/* No point in doing scanning if LE support hasn't been enabled */
if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
/* If discovery is active don't interfere with it */
......@@ -502,7 +502,7 @@ void __hci_update_background_scan(struct hci_request *req)
*/
/* If controller is not scanning we are done. */
if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
hci_req_add_le_scan_disable(req);
......@@ -524,7 +524,7 @@ void __hci_update_background_scan(struct hci_request *req)
/* If controller is currently scanning, we stop it to ensure we
* don't miss any advertising (due to duplicates filter).
*/
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req);
hci_req_add_le_passive_scan(req);
......
......@@ -31,6 +31,9 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci_mon.h>
static LIST_HEAD(mgmt_chan_list);
static DEFINE_MUTEX(mgmt_chan_list_lock);
static atomic_t monitor_promisc = ATOMIC_INIT(0);
/* ----- HCI socket interface ----- */
......@@ -401,6 +404,56 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
}
}
static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
{
struct hci_mgmt_chan *c;
list_for_each_entry(c, &mgmt_chan_list, list) {
if (c->channel == channel)
return c;
}
return NULL;
}
static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
{
struct hci_mgmt_chan *c;
mutex_lock(&mgmt_chan_list_lock);
c = __hci_mgmt_chan_find(channel);
mutex_unlock(&mgmt_chan_list_lock);
return c;
}
int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
{
if (c->channel < HCI_CHANNEL_CONTROL)
return -EINVAL;
mutex_lock(&mgmt_chan_list_lock);
if (__hci_mgmt_chan_find(c->channel)) {
mutex_unlock(&mgmt_chan_list_lock);
return -EALREADY;
}
list_add_tail(&c->list, &mgmt_chan_list);
mutex_unlock(&mgmt_chan_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_mgmt_chan_register);
void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
{
mutex_lock(&mgmt_chan_list_lock);
list_del(&c->list);
mutex_unlock(&mgmt_chan_list_lock);
}
EXPORT_SYMBOL(hci_mgmt_chan_unregister);
static int hci_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
......@@ -421,7 +474,7 @@ static int hci_sock_release(struct socket *sock)
if (hdev) {
if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
mgmt_index_added(hdev);
clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
hci_dev_close(hdev->id);
}
......@@ -481,10 +534,10 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
if (!hdev)
return -EBADFD;
if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
return -EOPNOTSUPP;
if (hdev->dev_type != HCI_BREDR)
......@@ -660,14 +713,14 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
if (test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
test_bit(HCI_SETUP, &hdev->dev_flags) ||
test_bit(HCI_CONFIG, &hdev->dev_flags)) {
hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG)) {
err = -EBUSY;
hci_dev_put(hdev);
goto done;
}
if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
err = -EUSERS;
hci_dev_put(hdev);
goto done;
......@@ -677,7 +730,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
err = hci_dev_open(hdev->id);
if (err) {
clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
mgmt_index_added(hdev);
hci_dev_put(hdev);
goto done;
......@@ -688,38 +741,39 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
hci_pi(sk)->hdev = hdev;
break;
case HCI_CHANNEL_CONTROL:
case HCI_CHANNEL_MONITOR:
if (haddr.hci_dev != HCI_DEV_NONE) {
err = -EINVAL;
goto done;
}
if (!capable(CAP_NET_ADMIN)) {
if (!capable(CAP_NET_RAW)) {
err = -EPERM;
goto done;
}
send_monitor_replay(sk);
atomic_inc(&monitor_promisc);
break;
case HCI_CHANNEL_MONITOR:
default:
if (!hci_mgmt_chan_find(haddr.hci_channel)) {
err = -EINVAL;
goto done;
}
if (haddr.hci_dev != HCI_DEV_NONE) {
err = -EINVAL;
goto done;
}
if (!capable(CAP_NET_RAW)) {
if (!capable(CAP_NET_ADMIN)) {
err = -EPERM;
goto done;
}
send_monitor_replay(sk);
atomic_inc(&monitor_promisc);
break;
default:
err = -EINVAL;
goto done;
}
......@@ -833,10 +887,13 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
hci_sock_cmsg(sk, msg, skb);
break;
case HCI_CHANNEL_USER:
case HCI_CHANNEL_CONTROL:
case HCI_CHANNEL_MONITOR:
sock_recv_timestamp(msg, sk, skb);
break;
default:
if (hci_mgmt_chan_find(hci_pi(sk)->channel))
sock_recv_timestamp(msg, sk, skb);
break;
}
skb_free_datagram(sk, skb);
......@@ -848,6 +905,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct hci_mgmt_chan *chan;
struct hci_dev *hdev;
struct sk_buff *skb;
int err;
......@@ -869,14 +927,18 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
case HCI_CHANNEL_RAW:
case HCI_CHANNEL_USER:
break;
case HCI_CHANNEL_CONTROL:
err = mgmt_control(sk, msg, len);
goto done;
case HCI_CHANNEL_MONITOR:
err = -EOPNOTSUPP;
goto done;
default:
err = -EINVAL;
mutex_lock(&mgmt_chan_list_lock);
chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
if (chan)
err = mgmt_control(chan, sk, msg, len);
else
err = -EINVAL;
mutex_unlock(&mgmt_chan_list_lock);
goto done;
}
......
......@@ -3900,7 +3900,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
return -EPROTO;
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
if (hci_dev_test_flag(hdev, HCI_MGMT) &&
!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
mgmt_device_connected(hdev, hcon, 0, NULL, 0);
hci_dev_unlock(hdev);
......@@ -6987,12 +6987,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
if (hcon->type == ACL_LINK &&
test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
conn->local_fixed_chan |= L2CAP_FC_A2MP;
if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
(bredr_sc_enabled(hcon->hdev) ||
test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
......@@ -7112,7 +7112,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
else
dst_type = ADDR_LE_DEV_RANDOM;
if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
role = HCI_ROLE_SLAVE;
else
role = HCI_ROLE_MASTER;
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -1231,7 +1231,7 @@ int __init sco_init(void)
return err;
}
void __exit sco_exit(void)
void sco_exit(void)
{
bt_procfs_cleanup(&init_net, "sco");
......
......@@ -52,7 +52,7 @@
#define SMP_TIMEOUT msecs_to_jiffies(30000)
#define AUTH_REQ_MASK(dev) (test_bit(HCI_SC_ENABLED, &(dev)->dev_flags) ? \
#define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \
0x1f : 0x07)
#define KEY_DIST_MASK 0x07
......@@ -589,7 +589,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
struct hci_dev *hdev = hcon->hdev;
u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT;
if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
authreq |= SMP_AUTH_BONDING;
......@@ -597,18 +597,18 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
authreq &= ~SMP_AUTH_BONDING;
}
if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
remote_dist |= SMP_DIST_ID_KEY;
if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
local_dist |= SMP_DIST_ID_KEY;
if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
(authreq & SMP_AUTH_SC)) {
struct oob_data *oob_data;
u8 bdaddr_type;
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
local_dist |= SMP_DIST_LINK_KEY;
remote_dist |= SMP_DIST_LINK_KEY;
}
......@@ -692,7 +692,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
* support hasn't been explicitly enabled.
*/
if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG &&
!test_bit(HCI_KEEP_DEBUG_KEYS, &hcon->hdev->dev_flags)) {
!hci_dev_test_flag(hcon->hdev, HCI_KEEP_DEBUG_KEYS)) {
list_del_rcu(&smp->ltk->list);
kfree_rcu(smp->ltk, rcu);
smp->ltk = NULL;
......@@ -1052,7 +1052,7 @@ static void smp_notify_keys(struct l2cap_conn *conn)
/* Don't keep debug keys around if the relevant
* flag is not set.
*/
if (!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags) &&
if (!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS) &&
key->type == HCI_LK_DEBUG_COMBINATION) {
list_del_rcu(&key->list);
kfree_rcu(key, rcu);
......@@ -1604,15 +1604,15 @@ static void build_bredr_pairing_cmd(struct smp_chan *smp,
struct hci_dev *hdev = conn->hcon->hdev;
u8 local_dist = 0, remote_dist = 0;
if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
}
if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
remote_dist |= SMP_DIST_ID_KEY;
if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
local_dist |= SMP_DIST_ID_KEY;
if (!rsp) {
......@@ -1664,11 +1664,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
/* We didn't start the pairing, so match remote */
auth = req->auth_req & AUTH_REQ_MASK(hdev);
if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
(auth & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
return SMP_AUTH_REQUIREMENTS;
smp->preq[0] = SMP_CMD_PAIRING_REQ;
......@@ -1679,7 +1679,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (conn->hcon->type == ACL_LINK) {
/* We must have a BR/EDR SC link */
if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
!test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return SMP_CROSS_TRANSP_NOT_ALLOWED;
set_bit(SMP_FLAG_SC, &smp->flags);
......@@ -1743,10 +1743,10 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
smp->remote_key_dist &= ~SMP_SC_NO_DIST;
/* Wait for Public Key from Initiating Device */
return 0;
} else {
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
}
SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
/* Request setup of TK */
ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
if (ret)
......@@ -1761,7 +1761,7 @@ static u8 sc_send_public_key(struct smp_chan *smp)
BT_DBG("");
if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) {
if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
BT_DBG("Using debug keys");
memcpy(smp->local_pk, debug_pk, 64);
memcpy(smp->local_sk, debug_sk, 32);
......@@ -1816,7 +1816,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
auth = rsp->auth_req & AUTH_REQ_MASK(hdev);
if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
return SMP_AUTH_REQUIREMENTS;
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
......@@ -1926,8 +1926,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
return smp_confirm(smp);
else
set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
return 0;
}
......@@ -2086,7 +2086,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
auth = rp->auth_req & AUTH_REQ_MASK(hdev);
if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC))
if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
return SMP_AUTH_REQUIREMENTS;
if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
......@@ -2107,7 +2107,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (!smp)
return SMP_UNSPECIFIED;
if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) &&
if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
(auth & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
......@@ -2141,7 +2141,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
chan = conn->smp;
if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
return 1;
if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
......@@ -2170,7 +2170,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
authreq = seclevel_to_authreq(sec_level);
if (test_bit(HCI_SC_ENABLED, &hcon->hdev->dev_flags))
if (hci_dev_test_flag(hcon->hdev, HCI_SC_ENABLED))
authreq |= SMP_AUTH_SC;
/* Require MITM if IO Capability allows or the security level
......@@ -2606,7 +2606,7 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
if (skb->len < 1)
return -EILSEQ;
if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) {
reason = SMP_PAIRING_NOTSUPP;
goto done;
}
......@@ -2744,16 +2744,16 @@ static void bredr_pairing(struct l2cap_chan *chan)
return;
/* Secure Connections support must be enabled */
if (!test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_SC_ENABLED))
return;
/* BR/EDR must use Secure Connections for SMP */
if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
!test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return;
/* If our LE support is not enabled don't do anything */
if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
/* Don't bother if remote LE support is not enabled */
......@@ -3003,7 +3003,7 @@ static ssize_t force_bredr_smp_read(struct file *file,
struct hci_dev *hdev = file->private_data;
char buf[3];
buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N';
buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y': 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
......@@ -3025,7 +3025,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
if (strtobool(buf, &enable))
return -EINVAL;
if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
return -EALREADY;
if (enable) {
......@@ -3044,7 +3044,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
smp_del_chan(chan);
}
change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags);
hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP);
return count;
}
......
......@@ -113,7 +113,7 @@ static void lowpan_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
dev->type = ARPHRD_IEEE802154;
dev->type = ARPHRD_6LOWPAN;
/* Frame Control + Sequence Number + Address fields + Security Header */
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
......
......@@ -25,6 +25,9 @@
#include "sysfs.h"
#include "core.h"
/* name for sysfs, %d is appended */
#define PHY_NAME "phy"
/* RCU-protected (and RTNL for writers) */
LIST_HEAD(cfg802154_rdev_list);
int cfg802154_rdev_list_generation;
......@@ -122,7 +125,7 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&rdev->wpan_dev_list);
device_initialize(&rdev->wpan_phy.dev);
dev_set_name(&rdev->wpan_phy.dev, "wpan-phy%d", rdev->wpan_phy_idx);
dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
rdev->wpan_phy.dev.class = &wpan_phy_class;
rdev->wpan_phy.dev.platform_data = rdev;
......
......@@ -76,7 +76,6 @@ static int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
nlmsg_free(msg);
return -ENOBUFS;
}
EXPORT_SYMBOL(ieee802154_nl_start_confirm);
static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
u32 seq, int flags, struct net_device *dev)
......
......@@ -48,49 +48,6 @@ static ssize_t name_show(struct device *dev,
}
static DEVICE_ATTR_RO(name);
#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
static ssize_t name ## _show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
int ret; \
\
mutex_lock(&phy->pib_lock); \
ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
mutex_unlock(&phy->pib_lock); \
return ret; \
} \
static DEVICE_ATTR_RO(name)
#define MASTER_SHOW(field, format_string) \
MASTER_SHOW_COMPLEX(field, format_string, phy->field)
MASTER_SHOW(current_channel, "%d");
MASTER_SHOW(current_page, "%d");
MASTER_SHOW(transmit_power, "%d +- 1 dB");
MASTER_SHOW_COMPLEX(cca_mode, "%d", phy->cca.mode);
static ssize_t channels_supported_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
int ret;
int i, len = 0;
mutex_lock(&phy->pib_lock);
for (i = 0; i < 32; i++) {
ret = snprintf(buf + len, PAGE_SIZE - len,
"%#09x\n", phy->channels_supported[i]);
if (ret < 0)
break;
len += ret;
}
mutex_unlock(&phy->pib_lock);
return len;
}
static DEVICE_ATTR_RO(channels_supported);
static void wpan_phy_release(struct device *dev)
{
struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
......@@ -101,12 +58,6 @@ static void wpan_phy_release(struct device *dev)
static struct attribute *pmib_attrs[] = {
&dev_attr_index.attr,
&dev_attr_name.attr,
/* below will be removed soon */
&dev_attr_current_channel.attr,
&dev_attr_current_page.attr,
&dev_attr_channels_supported.attr,
&dev_attr_transmit_power.attr,
&dev_attr_cca_mode.attr,
NULL,
};
ATTRIBUTE_GROUPS(pmib);
......
......@@ -65,8 +65,19 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
{
if (ifs_handling) {
struct ieee802154_local *local = hw_to_local(hw);
u8 max_sifs_size;
if (skb->len > 18)
/* If transceiver sets CRC on his own we need to use lifs
* threshold len above 16 otherwise 18, because it's not
* part of skb->len.
*/
if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
IEEE802154_FCS_LEN;
else
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
if (skb->len > max_sifs_size)
hrtimer_start(&local->ifs_timer,
ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC),
HRTIMER_MODE_REL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment