Commit 62d83681 authored by David S. Miller's avatar David S. Miller
parents 230f9bb7 e7fec0bb
......@@ -54,7 +54,7 @@
* i2400m_set_init_config()
* i2400m_cmd_get_state()
* i2400m_dev_shutdown() Called by i2400m_dev_stop()
* i2400m->bus_reset()
* i2400m_reset()
*
* i2400m_{cmd,get,set}_*()
* i2400m_msg_to_dev()
......@@ -82,6 +82,13 @@
#define D_SUBMODULE control
#include "debug-levels.h"
int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
MODULE_PARM_DESC(passive_mode,
"If true, the driver will not do any device setup "
"and leave it up to user space, who must be properly "
"setup.");
/*
* Return if a TLV is of a give type and size
......@@ -263,7 +270,7 @@ int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
if (status == 0)
return 0;
if (status > ARRAY_SIZE(ms_to_errno)) {
if (status >= ARRAY_SIZE(ms_to_errno)) {
str = "unknown status code";
result = -EBADR;
} else {
......@@ -336,7 +343,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
/* Huh? just in case, shut it down */
dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
i2400m_state);
i2400m->bus_reset(i2400m, I2400M_RT_WARM);
i2400m_reset(i2400m, I2400M_RT_WARM);
break;
};
d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
......@@ -1335,6 +1342,8 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
unsigned argc = 0;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
if (i2400m_passive_mode)
goto out_passive;
/* Disable idle mode? (enabled by default) */
if (i2400m_idle_mode_disabled) {
if (i2400m_le_v1_3(i2400m)) {
......@@ -1377,6 +1386,7 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
result = i2400m_set_init_config(i2400m, args, argc);
if (result < 0)
goto error;
out_passive:
/*
* Update state: Here it just calls a get state; parsing the
* result (System State TLV and RF Status TLV [done in the rx
......
......@@ -214,7 +214,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
case I2400M_RT_WARM:
case I2400M_RT_COLD:
case I2400M_RT_BUS:
result = i2400m->bus_reset(i2400m, rt);
result = i2400m_reset(i2400m, rt);
if (result >= 0)
result = 0;
default:
......
This diff is collapsed.
This diff is collapsed.
......@@ -67,6 +67,7 @@
/* Host-Device interface for SDIO */
enum {
I2400M_SDIO_BOOT_RETRIES = 3,
I2400MS_BLK_SIZE = 256,
I2400MS_PL_SIZE_MAX = 0x3E00,
......@@ -77,9 +78,11 @@ enum {
I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
/* The number of ticks to wait for the device to signal that
* it is ready */
I2400MS_INIT_SLEEP_INTERVAL = 10,
I2400MS_INIT_SLEEP_INTERVAL = 100,
/* How long to wait for the device to settle after reset */
I2400MS_SETTLE_TIME = 40,
/* The number of msec to wait for IOR after sending IOE */
IWMC3200_IOR_TIMEOUT = 10,
};
......@@ -97,6 +100,14 @@ enum {
* @tx_workqueue: workqeueue used for data TX; we don't use the
* system's workqueue as that might cause deadlocks with code in
* the bus-generic driver.
*
* @debugfs_dentry: dentry for the SDIO specific debugfs files
*
* Note this value is set to NULL upon destruction; this is
* because some routinges use it to determine if we are inside the
* probe() path or some other path. When debugfs is disabled,
* creation sets the dentry to '(void*) -ENODEV', which is valid
* for the test.
*/
struct i2400ms {
struct i2400m i2400m; /* FIRST! See doc */
......@@ -111,6 +122,9 @@ struct i2400ms {
wait_queue_head_t bm_wfa_wq;
int bm_wait_result;
size_t bm_ack_size;
/* Device is any of the iwmc3200 SKUs */
unsigned iwmc3200:1;
};
......
......@@ -88,6 +88,13 @@ struct edc {
u16 errorcount;
};
struct i2400m_endpoint_cfg {
unsigned char bulk_out;
unsigned char notification;
unsigned char reset_cold;
unsigned char bulk_in;
};
static inline void edc_init(struct edc *edc)
{
edc->timestart = jiffies;
......@@ -137,15 +144,13 @@ static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
/* Host-Device interface for USB */
enum {
I2400M_USB_BOOT_RETRIES = 3,
I2400MU_MAX_NOTIFICATION_LEN = 256,
I2400MU_BLK_SIZE = 16,
I2400MU_PL_SIZE_MAX = 0x3EFF,
/* Endpoints */
I2400MU_EP_BULK_OUT = 0,
I2400MU_EP_NOTIFICATION,
I2400MU_EP_RESET_COLD,
I2400MU_EP_BULK_IN,
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
};
......@@ -215,6 +220,7 @@ struct i2400mu {
struct usb_device *usb_dev;
struct usb_interface *usb_iface;
struct edc urb_edc; /* Error density counter */
struct i2400m_endpoint_cfg endpoint_cfg;
struct urb *notif_urb;
struct task_struct *tx_kthread;
......
This diff is collapsed.
......@@ -74,6 +74,7 @@
*/
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include "i2400m.h"
......@@ -88,7 +89,10 @@ enum {
* The MTU is 1400 or less
*/
I2400M_MAX_MTU = 1400,
I2400M_TX_TIMEOUT = HZ,
/* 20 secs? yep, this is the maximum timeout that the device
* might take to get out of IDLE / negotiate it with the base
* station. We add 1sec for good measure. */
I2400M_TX_TIMEOUT = 21 * HZ,
I2400M_TX_QLEN = 5,
};
......@@ -101,22 +105,19 @@ int i2400m_open(struct net_device *net_dev)
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
if (i2400m->ready == 0) {
dev_err(dev, "Device is still initializing\n");
result = -EBUSY;
} else
/* Make sure we wait until init is complete... */
mutex_lock(&i2400m->init_mutex);
if (i2400m->updown)
result = 0;
else
result = -EBUSY;
mutex_unlock(&i2400m->init_mutex);
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
net_dev, i2400m, result);
return result;
}
/*
*
* On kernel versions where cancel_work_sync() didn't return anything,
* we rely on wake_tx_skb() being non-NULL.
*/
static
int i2400m_stop(struct net_device *net_dev)
{
......@@ -124,21 +125,7 @@ int i2400m_stop(struct net_device *net_dev)
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
/* See i2400m_hard_start_xmit(), references are taken there
* and here we release them if the work was still
* pending. Note we can't differentiate work not pending vs
* never scheduled, so the NULL check does that. */
if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
&& i2400m->wake_tx_skb != NULL) {
unsigned long flags;
struct sk_buff *wake_tx_skb;
spin_lock_irqsave(&i2400m->tx_lock, flags);
wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
i2400m->wake_tx_skb = NULL; /* compat help */
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
i2400m_put(i2400m);
kfree_skb(wake_tx_skb);
}
i2400m_net_wake_stop(i2400m);
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
return 0;
}
......@@ -167,6 +154,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
{
int result;
struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct device *dev = i2400m_dev(i2400m);
struct sk_buff *skb = i2400m->wake_tx_skb;
unsigned long flags;
......@@ -182,27 +170,36 @@ void i2400m_wake_tx_work(struct work_struct *ws)
dev_err(dev, "WAKE&TX: skb dissapeared!\n");
goto out_put;
}
/* If we have, somehow, lost the connection after this was
* queued, don't do anything; this might be the device got
* reset or just disconnected. */
if (unlikely(!netif_carrier_ok(net_dev)))
goto out_kfree;
result = i2400m_cmd_exit_idle(i2400m);
if (result == -EILSEQ)
result = 0;
if (result < 0) {
dev_err(dev, "WAKE&TX: device didn't get out of idle: "
"%d\n", result);
goto error;
"%d - resetting\n", result);
i2400m_reset(i2400m, I2400M_RT_BUS);
goto error;
}
result = wait_event_timeout(i2400m->state_wq,
i2400m->state != I2400M_SS_IDLE, 5 * HZ);
i2400m->state != I2400M_SS_IDLE,
net_dev->watchdog_timeo - HZ/2);
if (result == 0)
result = -ETIMEDOUT;
if (result < 0) {
dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
"%d\n", result);
"%d - resetting\n", result);
i2400m_reset(i2400m, I2400M_RT_BUS);
goto error;
}
msleep(20); /* device still needs some time or it drops it */
result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
netif_wake_queue(i2400m->wimax_dev.net_dev);
error:
netif_wake_queue(net_dev);
out_kfree:
kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */
out_put:
i2400m_put(i2400m);
......@@ -229,6 +226,38 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
}
/*
* Cleanup resources acquired during i2400m_net_wake_tx()
*
* This is called by __i2400m_dev_stop and means we have to make sure
* the workqueue is flushed from any pending work.
*/
void i2400m_net_wake_stop(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
/* See i2400m_hard_start_xmit(), references are taken there
* and here we release them if the work was still
* pending. Note we can't differentiate work not pending vs
* never scheduled, so the NULL check does that. */
if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
&& i2400m->wake_tx_skb != NULL) {
unsigned long flags;
struct sk_buff *wake_tx_skb;
spin_lock_irqsave(&i2400m->tx_lock, flags);
wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
i2400m->wake_tx_skb = NULL; /* compat help */
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
i2400m_put(i2400m);
kfree_skb(wake_tx_skb);
}
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
return;
}
/*
* TX an skb to an idle device
*
......@@ -342,6 +371,20 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
int result;
d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
if (skb_header_cloned(skb)) {
/*
* Make tcpdump/wireshark happy -- if they are
* running, the skb is cloned and we will overwrite
* the mac fields in i2400m_tx_prep_header. Expand
* seems to fix this...
*/
result = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (result) {
result = NETDEV_TX_BUSY;
goto error_expand;
}
}
if (i2400m->state == I2400M_SS_IDLE)
result = i2400m_net_wake_tx(i2400m, net_dev, skb);
else
......@@ -352,10 +395,11 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
net_dev->stats.tx_packets++;
net_dev->stats.tx_bytes += skb->len;
}
result = NETDEV_TX_OK;
error_expand:
kfree_skb(skb);
d_fnend(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
return NETDEV_TX_OK;
d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
return result;
}
......@@ -559,6 +603,22 @@ static const struct net_device_ops i2400m_netdev_ops = {
.ndo_change_mtu = i2400m_change_mtu,
};
static void i2400m_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
if (net_dev->dev.parent)
strncpy(info->bus_info, dev_name(net_dev->dev.parent),
sizeof(info->bus_info) - 1);
}
static const struct ethtool_ops i2400m_ethtool_ops = {
.get_drvinfo = i2400m_get_drvinfo,
.get_link = ethtool_op_get_link,
};
/**
* i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
......@@ -580,6 +640,7 @@ void i2400m_netdev_setup(struct net_device *net_dev)
& ~IFF_MULTICAST);
net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
net_dev->netdev_ops = &i2400m_netdev_ops;
net_dev->ethtool_ops = &i2400m_ethtool_ops;
d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
}
EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
......
......@@ -158,30 +158,104 @@ struct i2400m_report_hook_args {
struct sk_buff *skb_rx;
const struct i2400m_l3l4_hdr *l3l4_hdr;
size_t size;
struct list_head list_node;
};
/*
* Execute i2400m_report_hook in a workqueue
*
* Unpacks arguments from the deferred call, executes it and then
* drops the references.
* Goes over the list of queued reports in i2400m->rx_reports and
* processes them.
*
* Obvious NOTE: References are needed because we are a separate
* thread; otherwise the buffer changes under us because it is
* released by the original caller.
* NOTE: refcounts on i2400m are not needed because we flush the
* workqueue this runs on (i2400m->work_queue) before destroying
* i2400m.
*/
static
void i2400m_report_hook_work(struct work_struct *ws)
{
struct i2400m_work *iw =
container_of(ws, struct i2400m_work, ws);
struct i2400m_report_hook_args *args = (void *) iw->pl;
if (iw->i2400m->ready)
i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
kfree_skb(args->skb_rx);
i2400m_put(iw->i2400m);
kfree(iw);
struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
struct device *dev = i2400m_dev(i2400m);
struct i2400m_report_hook_args *args, *args_next;
LIST_HEAD(list);
unsigned long flags;
while (1) {
spin_lock_irqsave(&i2400m->rx_lock, flags);
list_splice_init(&i2400m->rx_reports, &list);
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
if (list_empty(&list))
break;
else
d_printf(1, dev, "processing queued reports\n");
list_for_each_entry_safe(args, args_next, &list, list_node) {
d_printf(2, dev, "processing queued report %p\n", args);
i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
kfree_skb(args->skb_rx);
list_del(&args->list_node);
kfree(args);
}
}
}
/*
* Flush the list of queued reports
*/
static
void i2400m_report_hook_flush(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_report_hook_args *args, *args_next;
LIST_HEAD(list);
unsigned long flags;
d_printf(1, dev, "flushing queued reports\n");
spin_lock_irqsave(&i2400m->rx_lock, flags);
list_splice_init(&i2400m->rx_reports, &list);
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
list_for_each_entry_safe(args, args_next, &list, list_node) {
d_printf(2, dev, "flushing queued report %p\n", args);
kfree_skb(args->skb_rx);
list_del(&args->list_node);
kfree(args);
}
}
/*
* Queue a report for later processing
*
* @i2400m: device descriptor
* @skb_rx: skb that contains the payload (for reference counting)
* @l3l4_hdr: pointer to the control
* @size: size of the message
*/
static
void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
const void *l3l4_hdr, size_t size)
{
struct device *dev = i2400m_dev(i2400m);
unsigned long flags;
struct i2400m_report_hook_args *args;
args = kzalloc(sizeof(*args), GFP_NOIO);
if (args) {
args->skb_rx = skb_get(skb_rx);
args->l3l4_hdr = l3l4_hdr;
args->size = size;
spin_lock_irqsave(&i2400m->rx_lock, flags);
list_add_tail(&args->list_node, &i2400m->rx_reports);
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
d_printf(2, dev, "queued report %p\n", args);
rmb(); /* see i2400m->ready's documentation */
if (likely(i2400m->ready)) /* only send if up */
queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
} else {
if (printk_ratelimit())
dev_err(dev, "%s:%u: Can't allocate %zu B\n",
__func__, __LINE__, sizeof(*args));
}
}
......@@ -295,21 +369,29 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
msg_type, size);
d_dump(2, dev, l3l4_hdr, size);
if (msg_type & I2400M_MT_REPORT_MASK) {
/* These hooks have to be ran serialized; as well, the
* handling might force the execution of commands, and
* that might cause reentrancy issues with
* bus-specific subdrivers and workqueues. So we run
* it in a separate workqueue. */
struct i2400m_report_hook_args args = {
.skb_rx = skb_rx,
.l3l4_hdr = l3l4_hdr,
.size = size
};
if (unlikely(i2400m->ready == 0)) /* only send if up */
return;
skb_get(skb_rx);
i2400m_queue_work(i2400m, i2400m_report_hook_work,
GFP_KERNEL, &args, sizeof(args));
/*
* Process each report
*
* - has to be ran serialized as well
*
* - the handling might force the execution of
* commands. That might cause reentrancy issues with
* bus-specific subdrivers and workqueues, so the we
* run it in a separate workqueue.
*
* - when the driver is not yet ready to handle them,
* they are queued and at some point the queue is
* restarted [NOTE: we can't queue SKBs directly, as
* this might be a piece of a SKB, not the whole
* thing, and this is cheaper than cloning the
* SKB].
*
* Note we don't do refcounting for the device
* structure; this is because before destroying
* 'i2400m', we make sure to flush the
* i2400m->work_queue, so there are no issues.
*/
i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
if (unlikely(i2400m->trace_msg_from_user))
wimax_msg(&i2400m->wimax_dev, "echo",
l3l4_hdr, size, GFP_KERNEL);
......@@ -363,8 +445,6 @@ void i2400m_rx_trace(struct i2400m *i2400m,
msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
msg_type, size);
d_dump(2, dev, l3l4_hdr, size);
if (unlikely(i2400m->ready == 0)) /* only send if up */
return;
result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
if (result < 0)
dev_err(dev, "error sending trace to userspace: %d\n",
......@@ -748,7 +828,7 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
nsn, lbn, roq->ws);
i2400m_roq_log_dump(i2400m, roq);
i2400m->bus_reset(i2400m, I2400M_RT_WARM);
i2400m_reset(i2400m, I2400M_RT_WARM);
} else {
__i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
......@@ -814,7 +894,7 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
nsn, sn, roq->ws);
i2400m_roq_log_dump(i2400m, roq);
i2400m->bus_reset(i2400m, I2400M_RT_WARM);
i2400m_reset(i2400m, I2400M_RT_WARM);
} else {
/* if the queue is empty, don't bother as we'd queue
* it and inmediately unqueue it -- just deliver it */
......@@ -1194,6 +1274,28 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(i2400m_rx);
void i2400m_unknown_barker(struct i2400m *i2400m,
const void *buf, size_t size)
{
struct device *dev = i2400m_dev(i2400m);
char prefix[64];
const __le32 *barker = buf;
dev_err(dev, "RX: HW BUG? unknown barker %08x, "
"dropping %zu bytes\n", le32_to_cpu(*barker), size);
snprintf(prefix, sizeof(prefix), "%s %s: ",
dev_driver_string(dev), dev_name(dev));
if (size > 64) {
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
8, 4, buf, 64, 0);
printk(KERN_ERR "%s... (only first 64 bytes "
"dumped)\n", prefix);
} else
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
8, 4, buf, size, 0);
}
EXPORT_SYMBOL(i2400m_unknown_barker);
/*
* Initialize the RX queue and infrastructure
*
......@@ -1261,4 +1363,6 @@ void i2400m_rx_release(struct i2400m *i2400m)
kfree(i2400m->rx_roq[0].log);
kfree(i2400m->rx_roq);
}
/* at this point, nothing can be received... */
i2400m_report_hook_flush(i2400m);
}
......@@ -118,7 +118,8 @@ ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
goto error_too_big;
memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size); /* Prep command */
if (_cmd != i2400m->bm_cmd_buf)
memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
cmd = i2400m->bm_cmd_buf;
if (cmd_size_a > cmd_size) /* Zero pad space */
memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
......@@ -177,10 +178,6 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
i2400m, ack, ack_size);
spin_lock(&i2400m->rx_lock);
i2400ms->bm_ack_size = -EINPROGRESS;
spin_unlock(&i2400m->rx_lock);
result = wait_event_timeout(i2400ms->bm_wfa_wq,
i2400ms->bm_ack_size != -EINPROGRESS,
2 * HZ);
......@@ -199,6 +196,10 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
size = min(ack_size, i2400ms->bm_ack_size);
memcpy(ack, i2400m->bm_ack_buf, size);
}
/*
* Remember always to clear the bm_ack_size to -EINPROGRESS
* after the RX data is processed
*/
i2400ms->bm_ack_size = -EINPROGRESS;
spin_unlock(&i2400m->rx_lock);
......
......@@ -53,6 +53,7 @@
* i2400ms_irq()
* i2400ms_rx()
* __i2400ms_rx_get_size()
* i2400m_is_boot_barker()
* i2400m_rx()
*
* i2400ms_rx_setup()
......@@ -138,6 +139,11 @@ void i2400ms_rx(struct i2400ms *i2400ms)
ret = rx_size;
goto error_get_size;
}
/*
* Hardware quirk: make sure to clear the INTR status register
* AFTER getting the data transfer size.
*/
sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
ret = -ENOMEM;
skb = alloc_skb(rx_size, GFP_ATOMIC);
......@@ -153,25 +159,34 @@ void i2400ms_rx(struct i2400ms *i2400ms)
}
rmb(); /* make sure we get boot_mode from dev_reset_handle */
if (i2400m->boot_mode == 1) {
if (unlikely(i2400m->boot_mode == 1)) {
spin_lock(&i2400m->rx_lock);
i2400ms->bm_ack_size = rx_size;
spin_unlock(&i2400m->rx_lock);
memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
wake_up(&i2400ms->bm_wfa_wq);
dev_err(dev, "RX: SDIO boot mode message\n");
d_printf(5, dev, "RX: SDIO boot mode message\n");
kfree_skb(skb);
} else if (unlikely(!memcmp(skb->data, i2400m_NBOOT_BARKER,
sizeof(i2400m_NBOOT_BARKER))
|| !memcmp(skb->data, i2400m_SBOOT_BARKER,
sizeof(i2400m_SBOOT_BARKER)))) {
ret = i2400m_dev_reset_handle(i2400m);
goto out;
}
ret = -EIO;
if (unlikely(rx_size < sizeof(__le32))) {
dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
goto error_bad_size;
}
if (likely(i2400m_is_d2h_barker(skb->data))) {
skb_put(skb, rx_size);
i2400m_rx(i2400m, skb);
} else if (unlikely(i2400m_is_boot_barker(i2400m,
skb->data, rx_size))) {
ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
dev_err(dev, "RX: SDIO reboot barker\n");
kfree_skb(skb);
} else {
skb_put(skb, rx_size);
i2400m_rx(i2400m, skb);
i2400m_unknown_barker(i2400m, skb->data, rx_size);
kfree_skb(skb);
}
out:
d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
return;
......@@ -179,6 +194,7 @@ void i2400ms_rx(struct i2400ms *i2400ms)
kfree_skb(skb);
error_alloc_skb:
error_get_size:
error_bad_size:
d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
return;
}
......@@ -209,7 +225,6 @@ void i2400ms_irq(struct sdio_func *func)
dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
goto error_no_irq;
}
sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
i2400ms_rx(i2400ms);
error_no_irq:
d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
......@@ -234,6 +249,13 @@ int i2400ms_rx_setup(struct i2400ms *i2400ms)
init_waitqueue_head(&i2400ms->bm_wfa_wq);
spin_lock(&i2400m->rx_lock);
i2400ms->bm_wait_result = -EINPROGRESS;
/*
* Before we are about to enable the RX interrupt, make sure
* bm_ack_size is cleared to -EINPROGRESS which indicates
* no RX interrupt happened yet or the previous interrupt
* has been handled, we are ready to take the new interrupt
*/
i2400ms->bm_ack_size = -EINPROGRESS;
spin_unlock(&i2400m->rx_lock);
sdio_claim_host(func);
......
......@@ -149,5 +149,8 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
void i2400ms_tx_release(struct i2400ms *i2400ms)
{
destroy_workqueue(i2400ms->tx_workqueue);
if (i2400ms->tx_workqueue) {
destroy_workqueue(i2400ms->tx_workqueue);
i2400ms->tx_workqueue = NULL;
}
}
This diff is collapsed.
......@@ -310,7 +310,7 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
size_t tail_room;
size_t tx_in;
if (unlikely(i2400m->tx_in) == 0)
if (unlikely(i2400m->tx_in == 0))
return I2400M_TX_BUF_SIZE;
tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
tail_room = I2400M_TX_BUF_SIZE - tx_in;
......@@ -642,6 +642,9 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
* current one is out of payload slots or we have a singleton,
* close it and start a new one */
spin_lock_irqsave(&i2400m->tx_lock, flags);
result = -ESHUTDOWN;
if (i2400m->tx_buf == NULL)
goto error_tx_new;
try_new:
if (unlikely(i2400m->tx_msg == NULL))
i2400m_tx_new(i2400m);
......@@ -697,7 +700,10 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
}
error_tx_new:
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
i2400m->bus_tx_kick(i2400m); /* always kick, might free up space */
/* kick in most cases, except when the TX subsys is down, as
* it might free space */
if (likely(result != -ESHUTDOWN))
i2400m->bus_tx_kick(i2400m);
d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
i2400m, buf, buf_len, pl_type, result);
return result;
......@@ -740,6 +746,9 @@ struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
spin_lock_irqsave(&i2400m->tx_lock, flags);
tx_msg_moved = NULL;
if (i2400m->tx_buf == NULL)
goto out_unlock;
skip:
tx_msg_moved = NULL;
if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */
......@@ -829,6 +838,8 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
spin_lock_irqsave(&i2400m->tx_lock, flags);
if (i2400m->tx_buf == NULL)
goto out_unlock;
i2400m->tx_out += i2400m->tx_msg_size;
d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
i2400m->tx_msg_size = 0;
......@@ -837,6 +848,7 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
i2400m->tx_out %= I2400M_TX_BUF_SIZE;
i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
out_unlock:
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
......@@ -876,5 +888,9 @@ int i2400m_tx_setup(struct i2400m *i2400m)
*/
void i2400m_tx_release(struct i2400m *i2400m)
{
unsigned long flags;
spin_lock_irqsave(&i2400m->tx_lock, flags);
kfree(i2400m->tx_buf);
i2400m->tx_buf = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
}
......@@ -99,10 +99,10 @@ ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
dev_err(dev, "BM-CMD: can't get autopm: %d\n", result);
do_autopm = 0;
}
epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
retry:
result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, HZ);
result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, 200);
switch (result) {
case 0:
if (len != buf_size) {
......@@ -113,6 +113,28 @@ ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
}
result = len;
break;
case -EPIPE:
/*
* Stall -- maybe the device is choking with our
* requests. Clear it and give it some time. If they
* happen to often, it might be another symptom, so we
* reset.
*
* No error handling for usb_clear_halt(0; if it
* works, the retry works; if it fails, this switch
* does the error handling for us.
*/
if (edc_inc(&i2400mu->urb_edc,
10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
usb_queue_reset_device(i2400mu->usb_iface);
/* fallthrough */
} else {
usb_clear_halt(i2400mu->usb_dev, pipe);
msleep(10); /* give the device some time */
goto retry;
}
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
......@@ -135,7 +157,6 @@ ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
result);
goto retry;
}
result = len;
if (do_autopm)
usb_autopm_put_interface(i2400mu->usb_iface);
return result;
......@@ -172,7 +193,8 @@ ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
result = -E2BIG;
if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
goto error_too_big;
memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size);
if (_cmd != i2400m->bm_cmd_buf)
memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
cmd = i2400m->bm_cmd_buf;
if (cmd_size_a > cmd_size) /* Zero pad space */
memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
......@@ -226,7 +248,8 @@ int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb,
struct usb_endpoint_descriptor *epd;
int pipe;
epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
epd = usb_get_epd(i2400mu->usb_iface,
i2400mu->endpoint_cfg.notification);
pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
usb_fill_int_urb(urb, i2400mu->usb_dev, pipe,
i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE,
......@@ -328,8 +351,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
out:
if (do_autopm)
usb_autopm_put_interface(i2400mu->usb_iface);
d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %zd\n",
i2400m, ack, ack_size, result);
d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
i2400m, ack, ack_size, (long) result);
return result;
error_exceeded:
......
......@@ -51,6 +51,7 @@
*
* i2400mu_usb_notification_cb() Called when a URB is ready
* i2400mu_notif_grok()
* i2400m_is_boot_barker()
* i2400m_dev_reset_handle()
* i2400mu_rx_kick()
*/
......@@ -87,32 +88,21 @@ int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
i2400mu, buf, buf_len);
ret = -EIO;
if (buf_len < sizeof(i2400m_NBOOT_BARKER))
if (buf_len < sizeof(i2400m_ZERO_BARKER))
/* Not a bug, just ignore */
goto error_bad_size;
if (!memcmp(i2400m_NBOOT_BARKER, buf, sizeof(i2400m_NBOOT_BARKER))
|| !memcmp(i2400m_SBOOT_BARKER, buf, sizeof(i2400m_SBOOT_BARKER)))
ret = i2400m_dev_reset_handle(i2400m);
else if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
ret = 0;
if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
i2400mu_rx_kick(i2400mu);
ret = 0;
} else { /* Unknown or unexpected data in the notif message */
char prefix[64];
ret = -EIO;
dev_err(dev, "HW BUG? Unknown/unexpected data in notification "
"message (%zu bytes)\n", buf_len);
snprintf(prefix, sizeof(prefix), "%s %s: ",
dev_driver_string(dev), dev_name(dev));
if (buf_len > 64) {
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
8, 4, buf, 64, 0);
printk(KERN_ERR "%s... (only first 64 bytes "
"dumped)\n", prefix);
} else
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
8, 4, buf, buf_len, 0);
goto out;
}
ret = i2400m_is_boot_barker(i2400m, buf, buf_len);
if (unlikely(ret >= 0))
ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
else /* Unknown or unexpected data in the notif message */
i2400m_unknown_barker(i2400m, buf, buf_len);
error_bad_size:
out:
d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
i2400mu, buf, buf_len, ret);
return ret;
......@@ -220,7 +210,8 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu)
dev_err(dev, "notification: cannot allocate URB\n");
goto error_alloc_urb;
}
epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
epd = usb_get_epd(i2400mu->usb_iface,
i2400mu->endpoint_cfg.notification);
usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
buf, I2400MU_MAX_NOTIFICATION_LEN,
......
......@@ -204,7 +204,7 @@ struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
dev_err(dev, "RX: can't get autopm: %d\n", result);
do_autopm = 0;
}
epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_IN);
epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
retry:
rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
......@@ -214,7 +214,7 @@ struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
}
result = usb_bulk_msg(
i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
rx_size, &read_size, HZ);
rx_size, &read_size, 200);
usb_mark_last_busy(i2400mu->usb_dev);
switch (result) {
case 0:
......@@ -222,6 +222,26 @@ struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
goto retry; /* ZLP, just resubmit */
skb_put(rx_skb, read_size);
break;
case -EPIPE:
/*
* Stall -- maybe the device is choking with our
* requests. Clear it and give it some time. If they
* happen to often, it might be another symptom, so we
* reset.
*
* No error handling for usb_clear_halt(0; if it
* works, the retry works; if it fails, this switch
* does the error handling for us.
*/
if (edc_inc(&i2400mu->urb_edc,
10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
goto do_reset;
}
usb_clear_halt(i2400mu->usb_dev, usb_pipe);
msleep(10); /* give the device some time */
goto retry;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
......@@ -283,6 +303,7 @@ struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
error_reset:
dev_err(dev, "RX: maximum errors in URB exceeded; "
"resetting device\n");
do_reset:
usb_queue_reset_device(i2400mu->usb_iface);
rx_skb = ERR_PTR(result);
goto out;
......@@ -316,10 +337,15 @@ int i2400mu_rxd(void *_i2400mu)
size_t pending;
int rx_size;
struct sk_buff *rx_skb;
unsigned long flags;
d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
spin_lock_irqsave(&i2400m->rx_lock, flags);
BUG_ON(i2400mu->rx_kthread != NULL);
i2400mu->rx_kthread = current;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
while (1) {
d_printf(2, dev, "TX: waiting for messages\n");
d_printf(2, dev, "RX: waiting for messages\n");
pending = 0;
wait_event_interruptible(
i2400mu->rx_wq,
......@@ -367,6 +393,9 @@ int i2400mu_rxd(void *_i2400mu)
}
result = 0;
out:
spin_lock_irqsave(&i2400m->rx_lock, flags);
i2400mu->rx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
return result;
......@@ -403,18 +432,33 @@ int i2400mu_rx_setup(struct i2400mu *i2400mu)
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct task_struct *kthread;
i2400mu->rx_kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
wimax_dev->name);
if (IS_ERR(i2400mu->rx_kthread)) {
result = PTR_ERR(i2400mu->rx_kthread);
kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
wimax_dev->name);
/* the kthread function sets i2400mu->rx_thread */
if (IS_ERR(kthread)) {
result = PTR_ERR(kthread);
dev_err(dev, "RX: cannot start thread: %d\n", result);
}
return result;
}
void i2400mu_rx_release(struct i2400mu *i2400mu)
{
kthread_stop(i2400mu->rx_kthread);
unsigned long flags;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = i2400m_dev(i2400m);
struct task_struct *kthread;
spin_lock_irqsave(&i2400m->rx_lock, flags);
kthread = i2400mu->rx_kthread;
i2400mu->rx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
if (kthread)
kthread_stop(kthread);
else
d_printf(1, dev, "RX: kthread had already exited\n");
}
......@@ -101,11 +101,11 @@ int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
dev_err(dev, "TX: can't get autopm: %d\n", result);
do_autopm = 0;
}
epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
retry:
result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
tx_msg, tx_msg_size, &sent_size, HZ);
tx_msg, tx_msg_size, &sent_size, 200);
usb_mark_last_busy(i2400mu->usb_dev);
switch (result) {
case 0:
......@@ -115,6 +115,28 @@ int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
result = -EIO;
}
break;
case -EPIPE:
/*
* Stall -- maybe the device is choking with our
* requests. Clear it and give it some time. If they
* happen to often, it might be another symptom, so we
* reset.
*
* No error handling for usb_clear_halt(0; if it
* works, the retry works; if it fails, this switch
* does the error handling for us.
*/
if (edc_inc(&i2400mu->urb_edc,
10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
usb_queue_reset_device(i2400mu->usb_iface);
/* fallthrough */
} else {
usb_clear_halt(i2400mu->usb_dev, usb_pipe);
msleep(10); /* give the device some time */
goto retry;
}
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
......@@ -161,9 +183,15 @@ int i2400mu_txd(void *_i2400mu)
struct device *dev = &i2400mu->usb_iface->dev;
struct i2400m_msg_hdr *tx_msg;
size_t tx_msg_size;
unsigned long flags;
d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
spin_lock_irqsave(&i2400m->tx_lock, flags);
BUG_ON(i2400mu->tx_kthread != NULL);
i2400mu->tx_kthread = current;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
while (1) {
d_printf(2, dev, "TX: waiting for messages\n");
tx_msg = NULL;
......@@ -183,6 +211,11 @@ int i2400mu_txd(void *_i2400mu)
if (result < 0)
break;
}
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400mu->tx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
return result;
}
......@@ -213,11 +246,13 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct task_struct *kthread;
i2400mu->tx_kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
wimax_dev->name);
if (IS_ERR(i2400mu->tx_kthread)) {
result = PTR_ERR(i2400mu->tx_kthread);
kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
wimax_dev->name);
/* the kthread function sets i2400mu->tx_thread */
if (IS_ERR(kthread)) {
result = PTR_ERR(kthread);
dev_err(dev, "TX: cannot start thread: %d\n", result);
}
return result;
......@@ -225,5 +260,17 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
void i2400mu_tx_release(struct i2400mu *i2400mu)
{
kthread_stop(i2400mu->tx_kthread);
unsigned long flags;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = i2400m_dev(i2400m);
struct task_struct *kthread;
spin_lock_irqsave(&i2400m->tx_lock, flags);
kthread = i2400mu->tx_kthread;
i2400mu->tx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
if (kthread)
kthread_stop(kthread);
else
d_printf(1, dev, "TX: kthread had already exited\n");
}
This diff is collapsed.
......@@ -28,6 +28,7 @@
#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405
#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
......
......@@ -450,4 +450,76 @@ do { \
})
static inline
void d_submodule_set(struct d_level *d_level, size_t d_level_size,
const char *submodule, u8 level, const char *tag)
{
struct d_level *itr, *top;
int index = -1;
for (itr = d_level, top = itr + d_level_size; itr < top; itr++) {
index++;
if (itr->name == NULL) {
printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n",
tag, itr, index);
continue;
}
if (!strcmp(itr->name, submodule)) {
itr->level = level;
return;
}
}
printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule);
}
/**
* d_parse_params - Parse a string with debug parameters from the
* command line
*
* @d_level: level structure (D_LEVEL)
* @d_level_size: number of items in the level structure
* (D_LEVEL_SIZE).
* @_params: string with the parameters; this is a space (not tab!)
* separated list of NAME:VALUE, where value is the debug level
* and NAME is the name of the submodule.
* @tag: string for error messages (example: MODULE.ARGNAME).
*/
static inline
void d_parse_params(struct d_level *d_level, size_t d_level_size,
const char *_params, const char *tag)
{
char submodule[130], *params, *params_orig, *token, *colon;
unsigned level, tokens;
if (_params == NULL)
return;
params_orig = kstrdup(_params, GFP_KERNEL);
params = params_orig;
while (1) {
token = strsep(&params, " ");
if (token == NULL)
break;
if (*token == '\0') /* eat joint spaces */
continue;
/* kernel's sscanf %s eats until whitespace, so we
* replace : by \n so it doesn't get eaten later by
* strsep */
colon = strchr(token, ':');
if (colon != NULL)
*colon = '\n';
tokens = sscanf(token, "%s\n%u", submodule, &level);
if (colon != NULL)
*colon = ':'; /* set back, for error messages */
if (tokens == 2)
d_submodule_set(d_level, d_level_size,
submodule, level, tag);
else
printk(KERN_ERR "%s: can't parse '%s' as a "
"SUBMODULE:LEVEL (%d tokens)\n",
tag, token, tokens);
}
kfree(params_orig);
}
#endif /* #ifndef __debug__h__ */
......@@ -138,7 +138,7 @@ struct i2400m_bcf_hdr {
__le32 module_id;
__le32 module_vendor;
__le32 date; /* BCD YYYMMDD */
__le32 size;
__le32 size; /* in dwords */
__le32 key_size; /* in dwords */
__le32 modulus_size; /* in dwords */
__le32 exponent_size; /* in dwords */
......@@ -168,16 +168,6 @@ enum i2400m_brh {
};
/* Constants for bcf->module_id */
enum i2400m_bcf_mod_id {
/* Firmware file carries its own pokes -- pokes are a set of
* magical values that have to be written in certain memory
* addresses to get the device up and ready for firmware
* download when it is in non-signed boot mode. */
I2400M_BCF_MOD_ID_POKES = 0x000000001,
};
/**
* i2400m_bootrom_header - Header for a boot-mode command
*
......@@ -276,6 +266,7 @@ enum {
I2400M_WARM_RESET_BARKER = 0x50f750f7,
I2400M_NBOOT_BARKER = 0xdeadbeef,
I2400M_SBOOT_BARKER = 0x0ff1c1a1,
I2400M_SBOOT_BARKER_6050 = 0x80000001,
I2400M_ACK_BARKER = 0xfeedbabe,
I2400M_D2H_MSG_BARKER = 0xbeefbabe,
};
......
......@@ -195,6 +195,12 @@
* defining the `struct nla_policy` for each message, it has to have
* an array size of WIMAX_GNL_ATTR_MAX+1.
*
* The op_*() function pointers will not be called if the wimax_dev is
* in a state <= %WIMAX_ST_UNINITIALIZED. The exception is:
*
* - op_reset: can be called at any time after wimax_dev_add() has
* been called.
*
* THE PIPE INTERFACE:
*
* This interface is kept intentionally simple. The driver can send
......
......@@ -388,6 +388,8 @@ int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&wimax_dev->mutex);
result = wimax_dev_is_ready(wimax_dev);
if (result == -ENOMEDIUM)
result = 0;
if (result < 0)
goto error_not_ready;
result = -ENOSYS;
......
......@@ -305,8 +305,15 @@ int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
mutex_lock(&wimax_dev->mutex);
result = wimax_dev_is_ready(wimax_dev);
if (result < 0)
if (result < 0) {
/* While initializing, < 1.4.3 wimax-tools versions use
* this call to check if the device is a valid WiMAX
* device; so we allow it to proceed always,
* considering the radios are all off. */
if (result == -ENOMEDIUM && state == WIMAX_RF_QUERY)
result = WIMAX_RF_OFF << 1 | WIMAX_RF_OFF;
goto error_not_ready;
}
switch (state) {
case WIMAX_RF_ON:
case WIMAX_RF_OFF:
......@@ -355,6 +362,7 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
wimax_dev->rfkill = rfkill;
rfkill_init_sw_state(rfkill, 1);
result = rfkill_register(wimax_dev->rfkill);
if (result < 0)
goto error_rfkill_register;
......
......@@ -60,6 +60,14 @@
#define D_SUBMODULE stack
#include "debug-levels.h"
static char wimax_debug_params[128];
module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params),
0644);
MODULE_PARM_DESC(debug,
"String of space-separated NAME:VALUE pairs, where NAMEs "
"are the different debug submodules and VALUE are the "
"initial debug value to set.");
/*
* Authoritative source for the RE_STATE_CHANGE attribute policy
*
......@@ -562,6 +570,9 @@ int __init wimax_subsys_init(void)
int result, cnt;
d_fnstart(4, NULL, "()\n");
d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
"wimax.debug");
snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
"WiMAX");
result = genl_register_family(&wimax_gnl_family);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment