Commit 1cdc5abf authored by David S. Miller's avatar David S. Miller
parents e0f43752 0fb0a4f0
...@@ -83,6 +83,21 @@ ...@@ -83,6 +83,21 @@
#define D_SUBMODULE control #define D_SUBMODULE control
#include "debug-levels.h" #include "debug-levels.h"
static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
MODULE_PARM_DESC(idle_mode_disabled,
"If true, the device will not enable idle mode negotiation "
"with the base station (when connected) to save power.");
/* 0 (power saving enabled) by default */
static int i2400m_power_save_disabled;
module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
MODULE_PARM_DESC(power_save_disabled,
"If true, the driver will not tell the device to enter "
"power saving mode when it reports it is ready for it. "
"False by default (so the device is told to do power "
"saving).");
int i2400m_passive_mode; /* 0 (passive mode disabled) by default */ int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
module_param_named(passive_mode, i2400m_passive_mode, int, 0644); module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
MODULE_PARM_DESC(passive_mode, MODULE_PARM_DESC(passive_mode,
......
...@@ -75,25 +75,6 @@ ...@@ -75,25 +75,6 @@
#include "debug-levels.h" #include "debug-levels.h"
int i2400m_idle_mode_disabled; /* 0 (idle mode enabled) by default */
module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
MODULE_PARM_DESC(idle_mode_disabled,
"If true, the device will not enable idle mode negotiation "
"with the base station (when connected) to save power.");
int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
MODULE_PARM_DESC(rx_reorder_disabled,
"If true, RX reordering will be disabled.");
int i2400m_power_save_disabled; /* 0 (power saving enabled) by default */
module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
MODULE_PARM_DESC(power_save_disabled,
"If true, the driver will not tell the device to enter "
"power saving mode when it reports it is ready for it. "
"False by default (so the device is told to do power "
"saving).");
static char i2400m_debug_params[128]; static char i2400m_debug_params[128];
module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params), module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
0644); 0644);
...@@ -395,6 +376,16 @@ int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags) ...@@ -395,6 +376,16 @@ int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags)
result = i2400m_dev_initialize(i2400m); result = i2400m_dev_initialize(i2400m);
if (result < 0) if (result < 0)
goto error_dev_initialize; goto error_dev_initialize;
/* We don't want any additional unwanted error recovery triggered
* from any other context so if anything went wrong before we come
* here, let's keep i2400m->error_recovery untouched and leave it to
* dev_reset_handle(). See dev_reset_handle(). */
atomic_dec(&i2400m->error_recovery);
/* Every thing works so far, ok, now we are ready to
* take error recovery if it's required. */
/* At this point, reports will come for the device and set it /* At this point, reports will come for the device and set it
* to the right state if it is different than UNINITIALIZED */ * to the right state if it is different than UNINITIALIZED */
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
...@@ -403,10 +394,10 @@ int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags) ...@@ -403,10 +394,10 @@ int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags)
error_dev_initialize: error_dev_initialize:
error_check_mac_addr: error_check_mac_addr:
error_fw_check:
i2400m->ready = 0; i2400m->ready = 0;
wmb(); /* see i2400m->ready's documentation */ wmb(); /* see i2400m->ready's documentation */
flush_workqueue(i2400m->work_queue); flush_workqueue(i2400m->work_queue);
error_fw_check:
if (i2400m->bus_dev_stop) if (i2400m->bus_dev_stop)
i2400m->bus_dev_stop(i2400m); i2400m->bus_dev_stop(i2400m);
error_bus_dev_start: error_bus_dev_start:
...@@ -436,7 +427,8 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags) ...@@ -436,7 +427,8 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
result = __i2400m_dev_start(i2400m, bm_flags); result = __i2400m_dev_start(i2400m, bm_flags);
if (result >= 0) { if (result >= 0) {
i2400m->updown = 1; i2400m->updown = 1;
wmb(); /* see i2400m->updown's documentation */ i2400m->alive = 1;
wmb();/* see i2400m->updown and i2400m->alive's doc */
} }
} }
mutex_unlock(&i2400m->init_mutex); mutex_unlock(&i2400m->init_mutex);
...@@ -497,7 +489,8 @@ void i2400m_dev_stop(struct i2400m *i2400m) ...@@ -497,7 +489,8 @@ void i2400m_dev_stop(struct i2400m *i2400m)
if (i2400m->updown) { if (i2400m->updown) {
__i2400m_dev_stop(i2400m); __i2400m_dev_stop(i2400m);
i2400m->updown = 0; i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */ i2400m->alive = 0;
wmb(); /* see i2400m->updown and i2400m->alive's doc */
} }
mutex_unlock(&i2400m->init_mutex); mutex_unlock(&i2400m->init_mutex);
} }
...@@ -617,12 +610,12 @@ int i2400m_post_reset(struct i2400m *i2400m) ...@@ -617,12 +610,12 @@ int i2400m_post_reset(struct i2400m *i2400m)
error_dev_start: error_dev_start:
if (i2400m->bus_release) if (i2400m->bus_release)
i2400m->bus_release(i2400m); i2400m->bus_release(i2400m);
error_bus_setup:
/* even if the device was up, it could not be recovered, so we /* even if the device was up, it could not be recovered, so we
* mark it as down. */ * mark it as down. */
i2400m->updown = 0; i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */ wmb(); /* see i2400m->updown's documentation */
mutex_unlock(&i2400m->init_mutex); mutex_unlock(&i2400m->init_mutex);
error_bus_setup:
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
return result; return result;
} }
...@@ -669,6 +662,9 @@ void __i2400m_dev_reset_handle(struct work_struct *ws) ...@@ -669,6 +662,9 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
i2400m->boot_mode = 1;
wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
result = 0; result = 0;
if (mutex_trylock(&i2400m->init_mutex) == 0) { if (mutex_trylock(&i2400m->init_mutex) == 0) {
/* We are still in i2400m_dev_start() [let it fail] or /* We are still in i2400m_dev_start() [let it fail] or
...@@ -679,32 +675,62 @@ void __i2400m_dev_reset_handle(struct work_struct *ws) ...@@ -679,32 +675,62 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
complete(&i2400m->msg_completion); complete(&i2400m->msg_completion);
goto out; goto out;
} }
if (i2400m->updown == 0) {
dev_info(dev, "%s: device is down, doing nothing\n", reason);
goto out_unlock;
}
dev_err(dev, "%s: reinitializing driver\n", reason); dev_err(dev, "%s: reinitializing driver\n", reason);
__i2400m_dev_stop(i2400m); rmb();
result = __i2400m_dev_start(i2400m, if (i2400m->updown) {
I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); __i2400m_dev_stop(i2400m);
if (result < 0) {
i2400m->updown = 0; i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */ wmb(); /* see i2400m->updown's documentation */
dev_err(dev, "%s: cannot start the device: %d\n",
reason, result);
result = -EUCLEAN;
} }
out_unlock:
if (i2400m->alive) {
result = __i2400m_dev_start(i2400m,
I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
if (result < 0) {
dev_err(dev, "%s: cannot start the device: %d\n",
reason, result);
result = -EUCLEAN;
if (atomic_read(&i2400m->bus_reset_retries)
>= I2400M_BUS_RESET_RETRIES) {
result = -ENODEV;
dev_err(dev, "tried too many times to "
"reset the device, giving up\n");
}
}
}
if (i2400m->reset_ctx) { if (i2400m->reset_ctx) {
ctx->result = result; ctx->result = result;
complete(&ctx->completion); complete(&ctx->completion);
} }
mutex_unlock(&i2400m->init_mutex); mutex_unlock(&i2400m->init_mutex);
if (result == -EUCLEAN) { if (result == -EUCLEAN) {
/*
* We come here because the reset during operational mode
* wasn't successully done and need to proceed to a bus
* reset. For the dev_reset_handle() to be able to handle
* the reset event later properly, we restore boot_mode back
* to the state before previous reset. ie: just like we are
* issuing the bus reset for the first time
*/
i2400m->boot_mode = 0;
wmb();
atomic_inc(&i2400m->bus_reset_retries);
/* ops, need to clean up [w/ init_mutex not held] */ /* ops, need to clean up [w/ init_mutex not held] */
result = i2400m_reset(i2400m, I2400M_RT_BUS); result = i2400m_reset(i2400m, I2400M_RT_BUS);
if (result >= 0) if (result >= 0)
result = -ENODEV; result = -ENODEV;
} else {
rmb();
if (i2400m->alive) {
/* great, we expect the device state up and
* dev_start() actually brings the device state up */
i2400m->updown = 1;
wmb();
atomic_set(&i2400m->bus_reset_retries, 0);
}
} }
out: out:
i2400m_put(i2400m); i2400m_put(i2400m);
...@@ -728,14 +754,72 @@ void __i2400m_dev_reset_handle(struct work_struct *ws) ...@@ -728,14 +754,72 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
*/ */
int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
{ {
i2400m->boot_mode = 1;
wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
GFP_ATOMIC, &reason, sizeof(reason)); GFP_ATOMIC, &reason, sizeof(reason));
} }
EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
/*
* The actual work of error recovery.
*
* The current implementation of error recovery is to trigger a bus reset.
*/
static
void __i2400m_error_recovery(struct work_struct *ws)
{
struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
struct i2400m *i2400m = iw->i2400m;
i2400m_reset(i2400m, I2400M_RT_BUS);
i2400m_put(i2400m);
kfree(iw);
return;
}
/*
* Schedule a work struct for error recovery.
*
* The intention of error recovery is to bring back the device to some
* known state whenever TX sees -110 (-ETIMEOUT) on copying the data to
* the device. The TX failure could mean a device bus stuck, so the current
* error recovery implementation is to trigger a bus reset to the device
* and hopefully it can bring back the device.
*
* The actual work of error recovery has to be in a thread context because
* it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be
* destroyed by the error recovery mechanism (currently a bus reset).
*
* Also, there may be already a queue of TX works that all hit
* the -ETIMEOUT error condition because the device is stuck already.
* Since bus reset is used as the error recovery mechanism and we don't
* want consecutive bus resets simply because the multiple TX works
* in the queue all hit the same device erratum, the flag "error_recovery"
* is introduced for preventing unwanted consecutive bus resets.
*
* Error recovery shall only be invoked again if previous one was completed.
* The flag error_recovery is set when error recovery mechanism is scheduled,
* and is checked when we need to schedule another error recovery. If it is
* in place already, then we shouldn't schedule another one.
*/
void i2400m_error_recovery(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
GFP_ATOMIC, NULL, 0) < 0) {
dev_err(dev, "run out of memory for "
"scheduling an error recovery ?\n");
atomic_dec(&i2400m->error_recovery);
}
} else
atomic_dec(&i2400m->error_recovery);
return;
}
EXPORT_SYMBOL_GPL(i2400m_error_recovery);
/* /*
* Alloc the command and ack buffers for boot mode * Alloc the command and ack buffers for boot mode
* *
...@@ -802,6 +886,13 @@ void i2400m_init(struct i2400m *i2400m) ...@@ -802,6 +886,13 @@ void i2400m_init(struct i2400m *i2400m)
mutex_init(&i2400m->init_mutex); mutex_init(&i2400m->init_mutex);
/* wake_tx_ws is initialized in i2400m_tx_setup() */ /* wake_tx_ws is initialized in i2400m_tx_setup() */
atomic_set(&i2400m->bus_reset_retries, 0);
i2400m->alive = 0;
/* initialize error_recovery to 1 for denoting we
* are not yet ready to take any error recovery */
atomic_set(&i2400m->error_recovery, 1);
} }
EXPORT_SYMBOL_GPL(i2400m_init); EXPORT_SYMBOL_GPL(i2400m_init);
......
...@@ -99,7 +99,10 @@ enum { ...@@ -99,7 +99,10 @@ enum {
* *
* @tx_workqueue: workqeueue used for data TX; we don't use the * @tx_workqueue: workqeueue used for data TX; we don't use the
* system's workqueue as that might cause deadlocks with code in * system's workqueue as that might cause deadlocks with code in
* the bus-generic driver. * the bus-generic driver. The read/write operation to the queue
* is protected with spinlock (tx_lock in struct i2400m) to avoid
* the queue being destroyed in the middle of a the queue read/write
* operation.
* *
* @debugfs_dentry: dentry for the SDIO specific debugfs files * @debugfs_dentry: dentry for the SDIO specific debugfs files
* *
......
...@@ -160,6 +160,16 @@ ...@@ -160,6 +160,16 @@
#include <linux/wimax/i2400m.h> #include <linux/wimax/i2400m.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
enum {
/* netdev interface */
/*
* Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
*
* The MTU is 1400 or less
*/
I2400M_MAX_MTU = 1400,
};
/* Misc constants */ /* Misc constants */
enum { enum {
/* Size of the Boot Mode Command buffer */ /* Size of the Boot Mode Command buffer */
...@@ -167,6 +177,11 @@ enum { ...@@ -167,6 +177,11 @@ enum {
I2400M_BM_ACK_BUF_SIZE = 256, I2400M_BM_ACK_BUF_SIZE = 256,
}; };
enum {
/* Maximum number of bus reset can be retried */
I2400M_BUS_RESET_RETRIES = 3,
};
/** /**
* struct i2400m_poke_table - Hardware poke table for the Intel 2400m * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
* *
...@@ -227,6 +242,11 @@ struct i2400m_barker_db; ...@@ -227,6 +242,11 @@ struct i2400m_barker_db;
* so we have a tx_blk_size variable that the bus layer sets to * so we have a tx_blk_size variable that the bus layer sets to
* tell the engine how much of that we need. * tell the engine how much of that we need.
* *
* @bus_tx_room_min: [fill] Minimum room required while allocating
* TX queue's buffer space for message header. SDIO requires
* 224 bytes and USB 16 bytes. Refer bus specific driver code
* for details.
*
* @bus_pl_size_max: [fill] Maximum payload size. * @bus_pl_size_max: [fill] Maximum payload size.
* *
* @bus_setup: [optional fill] Function called by the bus-generic code * @bus_setup: [optional fill] Function called by the bus-generic code
...@@ -397,7 +417,7 @@ struct i2400m_barker_db; ...@@ -397,7 +417,7 @@ struct i2400m_barker_db;
* *
* @tx_size_max: biggest TX message sent. * @tx_size_max: biggest TX message sent.
* *
* @rx_lock: spinlock to protect RX members * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
* *
* @rx_pl_num: total number of payloads received * @rx_pl_num: total number of payloads received
* *
...@@ -421,6 +441,10 @@ struct i2400m_barker_db; ...@@ -421,6 +441,10 @@ struct i2400m_barker_db;
* delivered. Then the driver can release them to the host. See * delivered. Then the driver can release them to the host. See
* drivers/net/i2400m/rx.c for details. * drivers/net/i2400m/rx.c for details.
* *
* @rx_roq_refcount: refcount rx_roq. This refcounts any access to
* rx_roq thus preventing rx_roq being destroyed when rx_roq
* is being accessed. rx_roq_refcount is protected by rx_lock.
*
* @rx_reports: reports received from the device that couldn't be * @rx_reports: reports received from the device that couldn't be
* processed because the driver wasn't still ready; when ready, * processed because the driver wasn't still ready; when ready,
* they are pulled from here and chewed. * they are pulled from here and chewed.
...@@ -507,6 +531,38 @@ struct i2400m_barker_db; ...@@ -507,6 +531,38 @@ struct i2400m_barker_db;
* same. * same.
* *
* @pm_notifier: used to register for PM events * @pm_notifier: used to register for PM events
*
* @bus_reset_retries: counter for the number of bus resets attempted for
* this boot. It's not for tracking the number of bus resets during
* the whole driver life cycle (from insmod to rmmod) but for the
* number of dev_start() executed until dev_start() returns a success
* (ie: a good boot means a dev_stop() followed by a successful
* dev_start()). dev_reset_handler() increments this counter whenever
* it is triggering a bus reset. It checks this counter to decide if a
* subsequent bus reset should be retried. dev_reset_handler() retries
* the bus reset until dev_start() succeeds or the counter reaches
* I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in
* dev_reset_handle() when dev_start() returns a success,
* ie: a successul boot is completed.
*
* @alive: flag to denote if the device *should* be alive. This flag is
* everything like @updown (see doc for @updown) except reflecting
* the device state *we expect* rather than the actual state as denoted
* by @updown. It is set 1 whenever @updown is set 1 in dev_start().
* Then the device is expected to be alive all the time
* (i2400m->alive remains 1) until the driver is removed. Therefore
* all the device reboot events detected can be still handled properly
* by either dev_reset_handle() or .pre_reset/.post_reset as long as
* the driver presents. It is set 0 along with @updown in dev_stop().
*
* @error_recovery: flag to denote if we are ready to take an error recovery.
* 0 for ready to take an error recovery; 1 for not ready. It is
* initialized to 1 while probe() since we don't tend to take any error
* recovery during probe(). It is decremented by 1 whenever dev_start()
* succeeds to indicate we are ready to take error recovery from now on.
* It is checked every time we wanna schedule an error recovery. If an
* error recovery is already in place (error_recovery was set 1), we
* should not schedule another one until the last one is done.
*/ */
struct i2400m { struct i2400m {
struct wimax_dev wimax_dev; /* FIRST! See doc */ struct wimax_dev wimax_dev; /* FIRST! See doc */
...@@ -522,6 +578,7 @@ struct i2400m { ...@@ -522,6 +578,7 @@ struct i2400m {
wait_queue_head_t state_wq; /* Woken up when on state updates */ wait_queue_head_t state_wq; /* Woken up when on state updates */
size_t bus_tx_block_size; size_t bus_tx_block_size;
size_t bus_tx_room_min;
size_t bus_pl_size_max; size_t bus_pl_size_max;
unsigned bus_bm_retries; unsigned bus_bm_retries;
...@@ -550,10 +607,12 @@ struct i2400m { ...@@ -550,10 +607,12 @@ struct i2400m {
tx_num, tx_size_acc, tx_size_min, tx_size_max; tx_num, tx_size_acc, tx_size_min, tx_size_max;
/* RX stuff */ /* RX stuff */
spinlock_t rx_lock; /* protect RX state */ /* protect RX state and rx_roq_refcount */
spinlock_t rx_lock;
unsigned rx_pl_num, rx_pl_max, rx_pl_min, unsigned rx_pl_num, rx_pl_max, rx_pl_min,
rx_num, rx_size_acc, rx_size_min, rx_size_max; rx_num, rx_size_acc, rx_size_min, rx_size_max;
struct i2400m_roq *rx_roq; /* not under rx_lock! */ struct i2400m_roq *rx_roq; /* access is refcounted */
struct kref rx_roq_refcount; /* refcount access to rx_roq */
u8 src_mac_addr[ETH_HLEN]; u8 src_mac_addr[ETH_HLEN];
struct list_head rx_reports; /* under rx_lock! */ struct list_head rx_reports; /* under rx_lock! */
struct work_struct rx_report_ws; struct work_struct rx_report_ws;
...@@ -581,6 +640,16 @@ struct i2400m { ...@@ -581,6 +640,16 @@ struct i2400m {
struct i2400m_barker_db *barker; struct i2400m_barker_db *barker;
struct notifier_block pm_notifier; struct notifier_block pm_notifier;
/* counting bus reset retries in this boot */
atomic_t bus_reset_retries;
/* if the device is expected to be alive */
unsigned alive;
/* 0 if we are ready for error recovery; 1 if not ready */
atomic_t error_recovery;
}; };
...@@ -803,6 +872,7 @@ void i2400m_put(struct i2400m *i2400m) ...@@ -803,6 +872,7 @@ void i2400m_put(struct i2400m *i2400m)
extern int i2400m_dev_reset_handle(struct i2400m *, const char *); extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
extern int i2400m_pre_reset(struct i2400m *); extern int i2400m_pre_reset(struct i2400m *);
extern int i2400m_post_reset(struct i2400m *); extern int i2400m_post_reset(struct i2400m *);
extern void i2400m_error_recovery(struct i2400m *);
/* /*
* _setup()/_release() are called by the probe/disconnect functions of * _setup()/_release() are called by the probe/disconnect functions of
...@@ -815,7 +885,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *); ...@@ -815,7 +885,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
extern void i2400m_tx_msg_sent(struct i2400m *); extern void i2400m_tx_msg_sent(struct i2400m *);
extern int i2400m_power_save_disabled;
/* /*
* Utility functions * Utility functions
...@@ -922,10 +991,5 @@ extern int i2400m_barker_db_init(const char *); ...@@ -922,10 +991,5 @@ extern int i2400m_barker_db_init(const char *);
extern void i2400m_barker_db_exit(void); extern void i2400m_barker_db_exit(void);
/* Module parameters */
extern int i2400m_idle_mode_disabled;
extern int i2400m_rx_reorder_disabled;
#endif /* #ifndef __I2400M_H__ */ #endif /* #ifndef __I2400M_H__ */
...@@ -84,17 +84,15 @@ ...@@ -84,17 +84,15 @@
enum { enum {
/* netdev interface */ /* netdev interface */
/*
* Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
*
* The MTU is 1400 or less
*/
I2400M_MAX_MTU = 1400,
/* 20 secs? yep, this is the maximum timeout that the device /* 20 secs? yep, this is the maximum timeout that the device
* might take to get out of IDLE / negotiate it with the base * might take to get out of IDLE / negotiate it with the base
* station. We add 1sec for good measure. */ * station. We add 1sec for good measure. */
I2400M_TX_TIMEOUT = 21 * HZ, I2400M_TX_TIMEOUT = 21 * HZ,
I2400M_TX_QLEN = 5, /*
* Experimentation has determined that, 20 to be a good value
* for minimizing the jitter in the throughput.
*/
I2400M_TX_QLEN = 20,
}; };
......
...@@ -155,6 +155,11 @@ ...@@ -155,6 +155,11 @@
#define D_SUBMODULE rx #define D_SUBMODULE rx
#include "debug-levels.h" #include "debug-levels.h"
static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
MODULE_PARM_DESC(rx_reorder_disabled,
"If true, RX reordering will be disabled.");
struct i2400m_report_hook_args { struct i2400m_report_hook_args {
struct sk_buff *skb_rx; struct sk_buff *skb_rx;
const struct i2400m_l3l4_hdr *l3l4_hdr; const struct i2400m_l3l4_hdr *l3l4_hdr;
...@@ -300,17 +305,16 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m, ...@@ -300,17 +305,16 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m,
d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
goto error_waiter_cancelled; goto error_waiter_cancelled;
} }
if (ack_skb == NULL) { if (IS_ERR(ack_skb))
dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
i2400m->ack_skb = ERR_PTR(-ENOMEM); i2400m->ack_skb = ack_skb;
} else
i2400m->ack_skb = ack_skb;
spin_unlock_irqrestore(&i2400m->rx_lock, flags); spin_unlock_irqrestore(&i2400m->rx_lock, flags);
complete(&i2400m->msg_completion); complete(&i2400m->msg_completion);
return; return;
error_waiter_cancelled: error_waiter_cancelled:
kfree_skb(ack_skb); if (!IS_ERR(ack_skb))
kfree_skb(ack_skb);
error_no_waiter: error_no_waiter:
spin_unlock_irqrestore(&i2400m->rx_lock, flags); spin_unlock_irqrestore(&i2400m->rx_lock, flags);
} }
...@@ -741,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, ...@@ -741,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
unsigned new_nws, nsn_itr; unsigned new_nws, nsn_itr;
new_nws = __i2400m_roq_nsn(roq, sn); new_nws = __i2400m_roq_nsn(roq, sn);
if (unlikely(new_nws >= 1024) && d_test(1)) { /*
dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n", * For type 2(update_window_start) rx messages, there is no
new_nws, sn, roq->ws); * need to check if the normalized sequence number is greater 1023.
WARN_ON(1); * Simply insert and deliver all packets to the host up to the
i2400m_roq_log_dump(i2400m, roq); * window start.
} */
skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
...@@ -885,31 +889,51 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, ...@@ -885,31 +889,51 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
i2400m, roq, skb, sn); i2400m, roq, skb, sn);
len = skb_queue_len(&roq->queue); len = skb_queue_len(&roq->queue);
nsn = __i2400m_roq_nsn(roq, sn); nsn = __i2400m_roq_nsn(roq, sn);
/*
* For type 3(queue_update_window_start) rx messages, there is no
* need to check if the normalized sequence number is greater 1023.
* Simply insert and deliver all packets to the host up to the
* window start.
*/
old_ws = roq->ws; old_ws = roq->ws;
if (unlikely(nsn >= 1024)) { /* If the queue is empty, don't bother as we'd queue
dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", * it and immediately unqueue it -- just deliver it.
nsn, sn, roq->ws); */
i2400m_roq_log_dump(i2400m, roq); if (len == 0) {
i2400m_reset(i2400m, I2400M_RT_WARM); struct i2400m_roq_data *roq_data;
} else { roq_data = (struct i2400m_roq_data *) &skb->cb;
/* if the queue is empty, don't bother as we'd queue i2400m_net_erx(i2400m, skb, roq_data->cs);
* it and inmediately unqueue it -- just deliver it */ } else
if (len == 0) { __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
struct i2400m_roq_data *roq_data;
roq_data = (struct i2400m_roq_data *) &skb->cb; __i2400m_roq_update_ws(i2400m, roq, sn + 1);
i2400m_net_erx(i2400m, skb, roq_data->cs); i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
} old_ws, len, sn, nsn, roq->ws);
else
__i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
__i2400m_roq_update_ws(i2400m, roq, sn + 1);
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
old_ws, len, sn, nsn, roq->ws);
}
d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
i2400m, roq, skb, sn); i2400m, roq, skb, sn);
} }
/*
* This routine destroys the memory allocated for rx_roq, when no
* other thread is accessing it. Access to rx_roq is refcounted by
* rx_roq_refcount, hence memory allocated must be destroyed when
* rx_roq_refcount becomes zero. This routine gets executed when
* rx_roq_refcount becomes zero.
*/
void i2400m_rx_roq_destroy(struct kref *ref)
{
unsigned itr;
struct i2400m *i2400m
= container_of(ref, struct i2400m, rx_roq_refcount);
for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
__skb_queue_purge(&i2400m->rx_roq[itr].queue);
kfree(i2400m->rx_roq[0].log);
kfree(i2400m->rx_roq);
i2400m->rx_roq = NULL;
}
/* /*
* Receive and send up an extended data packet * Receive and send up an extended data packet
* *
...@@ -963,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, ...@@ -963,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
unsigned ro_needed, ro_type, ro_cin, ro_sn; unsigned ro_needed, ro_type, ro_cin, ro_sn;
struct i2400m_roq *roq; struct i2400m_roq *roq;
struct i2400m_roq_data *roq_data; struct i2400m_roq_data *roq_data;
unsigned long flags;
BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
...@@ -1001,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, ...@@ -1001,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
spin_lock_irqsave(&i2400m->rx_lock, flags);
roq = &i2400m->rx_roq[ro_cin]; roq = &i2400m->rx_roq[ro_cin];
if (roq == NULL) {
kfree_skb(skb); /* rx_roq is already destroyed */
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
goto error;
}
kref_get(&i2400m->rx_roq_refcount);
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
roq_data = (struct i2400m_roq_data *) &skb->cb; roq_data = (struct i2400m_roq_data *) &skb->cb;
roq_data->sn = ro_sn; roq_data->sn = ro_sn;
roq_data->cs = cs; roq_data->cs = cs;
...@@ -1028,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, ...@@ -1028,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
default: default:
dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
} }
spin_lock_irqsave(&i2400m->rx_lock, flags);
kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
} }
else else
i2400m_net_erx(i2400m, skb, cs); i2400m_net_erx(i2400m, skb, cs);
...@@ -1337,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m) ...@@ -1337,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
__i2400m_roq_init(&i2400m->rx_roq[itr]); __i2400m_roq_init(&i2400m->rx_roq[itr]);
i2400m->rx_roq[itr].log = &rd[itr]; i2400m->rx_roq[itr].log = &rd[itr];
} }
kref_init(&i2400m->rx_roq_refcount);
} }
return 0; return 0;
...@@ -1350,12 +1389,12 @@ int i2400m_rx_setup(struct i2400m *i2400m) ...@@ -1350,12 +1389,12 @@ int i2400m_rx_setup(struct i2400m *i2400m)
/* Tear down the RX queue and infrastructure */ /* Tear down the RX queue and infrastructure */
void i2400m_rx_release(struct i2400m *i2400m) void i2400m_rx_release(struct i2400m *i2400m)
{ {
unsigned long flags;
if (i2400m->rx_reorder) { if (i2400m->rx_reorder) {
unsigned itr; spin_lock_irqsave(&i2400m->rx_lock, flags);
for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
__skb_queue_purge(&i2400m->rx_roq[itr].queue); spin_unlock_irqrestore(&i2400m->rx_lock, flags);
kfree(i2400m->rx_roq[0].log);
kfree(i2400m->rx_roq);
} }
/* at this point, nothing can be received... */ /* at this point, nothing can be received... */
i2400m_report_hook_flush(i2400m); i2400m_report_hook_flush(i2400m);
......
...@@ -98,6 +98,10 @@ void i2400ms_tx_submit(struct work_struct *ws) ...@@ -98,6 +98,10 @@ void i2400ms_tx_submit(struct work_struct *ws)
tx_msg_size, result); tx_msg_size, result);
} }
if (result == -ETIMEDOUT) {
i2400m_error_recovery(i2400m);
break;
}
d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size); d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
} }
...@@ -114,13 +118,17 @@ void i2400ms_bus_tx_kick(struct i2400m *i2400m) ...@@ -114,13 +118,17 @@ void i2400ms_bus_tx_kick(struct i2400m *i2400m)
{ {
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = &i2400ms->func->dev; struct device *dev = &i2400ms->func->dev;
unsigned long flags;
d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m); d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
/* schedule tx work, this is because tx may block, therefore /* schedule tx work, this is because tx may block, therefore
* it has to run in a thread context. * it has to run in a thread context.
*/ */
queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker); spin_lock_irqsave(&i2400m->tx_lock, flags);
if (i2400ms->tx_workqueue != NULL)
queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
} }
...@@ -130,27 +138,40 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms) ...@@ -130,27 +138,40 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
int result; int result;
struct device *dev = &i2400ms->func->dev; struct device *dev = &i2400ms->func->dev;
struct i2400m *i2400m = &i2400ms->i2400m; struct i2400m *i2400m = &i2400ms->i2400m;
struct workqueue_struct *tx_workqueue;
unsigned long flags;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms); d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit); INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name), snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
"%s-tx", i2400m->wimax_dev.name); "%s-tx", i2400m->wimax_dev.name);
i2400ms->tx_workqueue = tx_workqueue =
create_singlethread_workqueue(i2400ms->tx_wq_name); create_singlethread_workqueue(i2400ms->tx_wq_name);
if (NULL == i2400ms->tx_workqueue) { if (tx_workqueue == NULL) {
dev_err(dev, "TX: failed to create workqueue\n"); dev_err(dev, "TX: failed to create workqueue\n");
result = -ENOMEM; result = -ENOMEM;
} else } else
result = 0; result = 0;
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400ms->tx_workqueue = tx_workqueue;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result); d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
return result; return result;
} }
void i2400ms_tx_release(struct i2400ms *i2400ms) void i2400ms_tx_release(struct i2400ms *i2400ms)
{ {
if (i2400ms->tx_workqueue) { struct i2400m *i2400m = &i2400ms->i2400m;
destroy_workqueue(i2400ms->tx_workqueue); struct workqueue_struct *tx_workqueue;
i2400ms->tx_workqueue = NULL; unsigned long flags;
}
tx_workqueue = i2400ms->tx_workqueue;
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400ms->tx_workqueue = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
if (tx_workqueue)
destroy_workqueue(tx_workqueue);
} }
...@@ -483,6 +483,13 @@ int i2400ms_probe(struct sdio_func *func, ...@@ -483,6 +483,13 @@ int i2400ms_probe(struct sdio_func *func,
sdio_set_drvdata(func, i2400ms); sdio_set_drvdata(func, i2400ms);
i2400m->bus_tx_block_size = I2400MS_BLK_SIZE; i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
/*
* Room required in the TX queue for SDIO message to accommodate
* a smallest payload while allocating header space is 224 bytes,
* which is the smallest message size(the block size 256 bytes)
* minus the smallest message header size(32 bytes).
*/
i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX; i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
i2400m->bus_setup = i2400ms_bus_setup; i2400m->bus_setup = i2400ms_bus_setup;
i2400m->bus_dev_start = i2400ms_bus_dev_start; i2400m->bus_dev_start = i2400ms_bus_dev_start;
......
This diff is collapsed.
...@@ -82,6 +82,8 @@ MODULE_PARM_DESC(debug, ...@@ -82,6 +82,8 @@ MODULE_PARM_DESC(debug,
/* Our firmware file name */ /* Our firmware file name */
static const char *i2400mu_bus_fw_names_5x50[] = { static const char *i2400mu_bus_fw_names_5x50[] = {
#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
I2400MU_FW_FILE_NAME_v1_5,
#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf" #define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
I2400MU_FW_FILE_NAME_v1_4, I2400MU_FW_FILE_NAME_v1_4,
NULL, NULL,
...@@ -467,6 +469,13 @@ int i2400mu_probe(struct usb_interface *iface, ...@@ -467,6 +469,13 @@ int i2400mu_probe(struct usb_interface *iface,
usb_set_intfdata(iface, i2400mu); usb_set_intfdata(iface, i2400mu);
i2400m->bus_tx_block_size = I2400MU_BLK_SIZE; i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
/*
* Room required in the Tx queue for USB message to accommodate
* a smallest payload while allocating header space is 16 bytes.
* Adding this room for the new tx message increases the
* possibilities of including any payload with size <= 16 bytes.
*/
i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX; i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
i2400m->bus_setup = NULL; i2400m->bus_setup = NULL;
i2400m->bus_dev_start = i2400mu_bus_dev_start; i2400m->bus_dev_start = i2400mu_bus_dev_start;
...@@ -778,4 +787,5 @@ MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); ...@@ -778,4 +787,5 @@ MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M " MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
"(5x50 & 6050)"); "(5x50 & 6050)");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4); MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);
...@@ -315,7 +315,7 @@ void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) ...@@ -315,7 +315,7 @@ void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
BUG(); BUG();
} }
__wimax_state_set(wimax_dev, new_state); __wimax_state_set(wimax_dev, new_state);
if (stch_skb) if (!IS_ERR(stch_skb))
wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header); wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
out: out:
d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n", d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment