Commit df67e97a authored by David S. Miller's avatar David S. Miller

Merge tag 'wireless-drivers-for-davem-2016-10-30' of...

Merge tag 'wireless-drivers-for-davem-2016-10-30' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers

Kalle Valo says:

====================
wireless-drivers fixes for 4.9

iwlwifi

* some fixes for suspend/resume with unified FW images
* a fix for a false-positive lockdep report
* a fix for multi-queue that caused an unnecessary 1 second latency
* a fix for an ACPI parsing bug that caused a misleading error message

brcmfmac

* fix a variable uninitialised warning in brcmf_cfg80211_start_ap()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 46d0847c d3532ea6
......@@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
/* store current 11d setting */
if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
&ifp->vif->is_11d)) {
supports_11d = false;
is_11d = supports_11d = false;
} else {
country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
settings->beacon.tail_len,
......
......@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
return ret;
} else {
/* In theory, we wouldn't have to stop a running sched
* scan in order to start another one (for
* net-detect). But in practice this doesn't seem to
* work properly, so stop any running sched_scan now.
*/
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
if (ret)
return ret;
}
/* rfkill release can be either for wowlan or netdetect */
......@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
out:
if (ret < 0) {
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
if (mvm->restart_fw > 0) {
mvm->restart_fw--;
ieee80211_restart_hw(mvm->hw);
}
iwl_mvm_free_nd(mvm);
}
out_noreset:
......@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_update_changed_regdom(mvm);
if (mvm->net_detect) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that
* fails, continue and try to get the wake-up reasons,
* but trigger a HW restart by keeping a failure code
* in ret.
*/
if (unified_image)
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
false);
iwl_mvm_query_netdetect_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
......@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
{
struct iwl_mvm *mvm = inode->i_private;
int remaining_time = 10;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
mvm->d3_test_active = false;
......@@ -2282,6 +2305,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_abort_notification_waits(&mvm->notif_wait);
if (!unified_image) {
int remaining_time = 10;
ieee80211_restart_hw(mvm->hw);
/* wait for restart and disconnect all interfaces */
......@@ -2292,7 +2318,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
}
if (remaining_time == 0)
IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
}
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
......
......@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
.data = { &cmd, },
.len = { sizeof(cmd) },
};
size_t delta, len;
ssize_t ret;
size_t delta;
ssize_t ret, len;
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
DEBUG_GROUP, 0);
......
......@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif,
u32 size)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
......@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
}
if (notif->sync)
ret = wait_event_timeout(notif_waitq,
ret = wait_event_timeout(mvm->rx_sync_waitq,
atomic_read(&mvm->queue_sync_counter) == 0,
HZ);
WARN_ON_ONCE(!ret);
......
......@@ -937,6 +937,7 @@ struct iwl_mvm {
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
wait_queue_head_t rx_sync_waitq;
/* BT-Coex */
struct iwl_bt_coex_profile_notif last_bt_notif;
......
......@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
spin_lock_init(&mvm->refs_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
init_waitqueue_head(&mvm->rx_sync_waitq);
atomic_set(&mvm->queue_sync_counter, 0);
......
......@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
"Received expired RX queue sync message\n");
return;
}
atomic_dec(&mvm->queue_sync_counter);
if (!atomic_dec_return(&mvm->queue_sync_counter))
wake_up(&mvm->rx_sync_waitq);
}
switch (internal_notif->type) {
......
......@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
{
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
/* This looks a bit arbitrary, but the idea is that if we run
* out of possible simultaneous scans and the userspace is
* trying to run a scan type that is already running, we
......@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EBUSY;
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
case IWL_MVM_SCAN_NETDETECT:
/* No need to stop anything for net-detect since the
* firmware is restarted anyway. This way, any sched
* scans that were running will be restarted when we
* resume.
/* For non-unified images, there's no need to stop
* anything for net-detect since the firmware is
* restarted anyway. This way, any sched scans that
* were running will be restarted when we resume.
*/
if (!unified_image)
return 0;
/* If this is a unified image and we ran out of scans,
* we need to stop something. Prefer stopping regular
* scans, because the results are useless at this
* point, and we should be able to keep running
* another scheduled scan while suspended.
*/
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
true);
if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
true);
/* fall through, something is wrong if no scan was
* running but we ran out of scans.
*/
default:
WARN_ON(1);
break;
......
......@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#ifdef CONFIG_ACPI
#define SPL_METHOD "SPLC"
#define SPL_DOMAINTYPE_MODULE BIT(0)
#define SPL_DOMAINTYPE_WIFI BIT(1)
#define SPL_DOMAINTYPE_WIGIG BIT(2)
#define SPL_DOMAINTYPE_RFEM BIT(3)
#define ACPI_SPLC_METHOD "SPLC"
#define ACPI_SPLC_DOMAIN_WIFI (0x07)
static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
{
union acpi_object *limits, *domain_type, *power_limit;
union acpi_object *data_pkg, *dflt_pwr_limit;
int i;
if (splx->type != ACPI_TYPE_PACKAGE ||
splx->package.count != 2 ||
splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
splx->package.elements[0].integer.value != 0) {
IWL_ERR(trans, "Unsupported splx structure\n");
/* We need at least two elements, one for the revision and one
* for the data itself. Also check that the revision is
* supported (currently only revision 0).
*/
if (splc->type != ACPI_TYPE_PACKAGE ||
splc->package.count < 2 ||
splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
splc->package.elements[0].integer.value != 0) {
IWL_DEBUG_INFO(trans,
"Unsupported structure returned by the SPLC method. Ignoring.\n");
return 0;
}
limits = &splx->package.elements[1];
if (limits->type != ACPI_TYPE_PACKAGE ||
limits->package.count < 2 ||
limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
IWL_ERR(trans, "Invalid limits element\n");
return 0;
/* loop through all the packages to find the one for WiFi */
for (i = 1; i < splc->package.count; i++) {
union acpi_object *domain;
data_pkg = &splc->package.elements[i];
/* Skip anything that is not a package with the right
* amount of elements (i.e. at least 2 integers).
*/
if (data_pkg->type != ACPI_TYPE_PACKAGE ||
data_pkg->package.count < 2 ||
data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
continue;
domain = &data_pkg->package.elements[0];
if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
break;
data_pkg = NULL;
}
domain_type = &limits->package.elements[0];
power_limit = &limits->package.elements[1];
if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
if (!data_pkg) {
IWL_DEBUG_INFO(trans,
"No element for the WiFi domain returned by the SPLC method.\n");
return 0;
}
return power_limit->integer.value;
dflt_pwr_limit = &data_pkg->package.elements[1];
return dflt_pwr_limit->integer.value;
}
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
{
acpi_handle pxsx_handle;
acpi_handle handle;
struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
pxsx_handle = ACPI_HANDLE(&pdev->dev);
......@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
}
/* Get the method's handle */
status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
&handle);
if (ACPI_FAILURE(status)) {
IWL_DEBUG_INFO(trans, "SPL method not found\n");
IWL_DEBUG_INFO(trans, "SPLC method not found\n");
return;
}
/* Call SPLC with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &splx);
status = acpi_evaluate_object(handle, NULL, NULL, &splc);
if (ACPI_FAILURE(status)) {
IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
return;
}
trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
trans->dflt_pwr_limit);
kfree(splx.pointer);
kfree(splc.pointer);
}
#else /* CONFIG_ACPI */
......
......@@ -592,6 +592,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, u32 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
txq->need_update = false;
......@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
return ret;
spin_lock_init(&txq->lock);
if (txq_id == trans_pcie->cmd_queue) {
static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
}
__skb_queue_head_init(&txq->overflow_q);
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment