Commit 7aa1402e authored by Kees Cook's avatar Kees Cook Committed by David S. Miller

net: ethernet/sfc: Convert timers to use timer_setup()

In preparation for unconditionally passing the struct timer_list pointer to
all timer callbacks, switch to using the new timer_setup() and from_timer()
to pass the timer pointer explicitly.

Cc: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
Cc: Edward Cree <ecree@solarflare.com>
Cc: Bert Kenward <bkenward@solarflare.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: netdev@vger.kernel.org
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Acked-by: default avatarBert Kenward <bkenward@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fc8bcaa0
...@@ -471,8 +471,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) ...@@ -471,8 +471,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
rx_queue = &channel->rx_queue; rx_queue = &channel->rx_queue;
rx_queue->efx = efx; rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
(unsigned long)rx_queue);
return channel; return channel;
} }
...@@ -511,8 +510,7 @@ efx_copy_channel(const struct efx_channel *old_channel) ...@@ -511,8 +510,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
rx_queue = &channel->rx_queue; rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL; rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
(unsigned long)rx_queue);
return channel; return channel;
} }
......
...@@ -46,7 +46,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); ...@@ -46,7 +46,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue); void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void efx_rx_slow_fill(unsigned long context); void efx_rx_slow_fill(struct timer_list *t);
void __efx_rx_packet(struct efx_channel *channel); void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags); unsigned int n_frags, unsigned int len, u16 flags);
......
...@@ -449,8 +449,7 @@ ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel) ...@@ -449,8 +449,7 @@ ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
rx_queue = &channel->rx_queue; rx_queue = &channel->rx_queue;
rx_queue->efx = efx; rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill, timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
(unsigned long)rx_queue);
return channel; return channel;
} }
...@@ -489,8 +488,7 @@ ef4_copy_channel(const struct ef4_channel *old_channel) ...@@ -489,8 +488,7 @@ ef4_copy_channel(const struct ef4_channel *old_channel)
rx_queue = &channel->rx_queue; rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL; rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill, timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
(unsigned long)rx_queue);
return channel; return channel;
} }
......
...@@ -45,7 +45,7 @@ void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue); ...@@ -45,7 +45,7 @@ void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue); void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue); void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic); void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
void ef4_rx_slow_fill(unsigned long context); void ef4_rx_slow_fill(struct timer_list *t);
void __ef4_rx_packet(struct ef4_channel *channel); void __ef4_rx_packet(struct ef4_channel *channel);
void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index, void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags); unsigned int n_frags, unsigned int len, u16 flags);
......
...@@ -1454,10 +1454,11 @@ static void falcon_stats_complete(struct ef4_nic *efx) ...@@ -1454,10 +1454,11 @@ static void falcon_stats_complete(struct ef4_nic *efx)
} }
} }
static void falcon_stats_timer_func(unsigned long context) static void falcon_stats_timer_func(struct timer_list *t)
{ {
struct ef4_nic *efx = (struct ef4_nic *)context; struct falcon_nic_data *nic_data = from_timer(nic_data, t,
struct falcon_nic_data *nic_data = efx->nic_data; stats_timer);
struct ef4_nic *efx = nic_data->efx;
spin_lock(&efx->stats_lock); spin_lock(&efx->stats_lock);
...@@ -2295,6 +2296,7 @@ static int falcon_probe_nic(struct ef4_nic *efx) ...@@ -2295,6 +2296,7 @@ static int falcon_probe_nic(struct ef4_nic *efx)
if (!nic_data) if (!nic_data)
return -ENOMEM; return -ENOMEM;
efx->nic_data = nic_data; efx->nic_data = nic_data;
nic_data->efx = efx;
rc = -ENODEV; rc = -ENODEV;
...@@ -2402,8 +2404,7 @@ static int falcon_probe_nic(struct ef4_nic *efx) ...@@ -2402,8 +2404,7 @@ static int falcon_probe_nic(struct ef4_nic *efx)
} }
nic_data->stats_disable_count = 1; nic_data->stats_disable_count = 1;
setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0);
(unsigned long)efx);
return 0; return 0;
......
...@@ -267,6 +267,7 @@ enum { ...@@ -267,6 +267,7 @@ enum {
/** /**
* struct falcon_nic_data - Falcon NIC state * struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A * @pci_dev2: Secondary function of Falcon A
* @efx: ef4_nic pointer
* @board: Board state and functions * @board: Board state and functions
* @stats: Hardware statistics * @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches * @stats_disable_count: Nest count for disabling statistics fetches
...@@ -280,6 +281,7 @@ enum { ...@@ -280,6 +281,7 @@ enum {
*/ */
struct falcon_nic_data { struct falcon_nic_data {
struct pci_dev *pci_dev2; struct pci_dev *pci_dev2;
struct ef4_nic *efx;
struct falcon_board board; struct falcon_board board;
u64 stats[FALCON_STAT_COUNT]; u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count; unsigned int stats_disable_count;
......
...@@ -376,9 +376,9 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic) ...@@ -376,9 +376,9 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
ef4_nic_notify_rx_desc(rx_queue); ef4_nic_notify_rx_desc(rx_queue);
} }
void ef4_rx_slow_fill(unsigned long context) void ef4_rx_slow_fill(struct timer_list *t)
{ {
struct ef4_rx_queue *rx_queue = (struct ef4_rx_queue *)context; struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
/* Post an event to cause NAPI to run and refill the queue */ /* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue); ef4_nic_generate_fill_event(rx_queue);
......
...@@ -48,7 +48,7 @@ struct efx_mcdi_async_param { ...@@ -48,7 +48,7 @@ struct efx_mcdi_async_param {
/* followed by request/response buffer */ /* followed by request/response buffer */
}; };
static void efx_mcdi_timeout_async(unsigned long context); static void efx_mcdi_timeout_async(struct timer_list *t);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out); bool *was_attached_out);
static bool efx_mcdi_poll_once(struct efx_nic *efx); static bool efx_mcdi_poll_once(struct efx_nic *efx);
...@@ -87,8 +87,7 @@ int efx_mcdi_init(struct efx_nic *efx) ...@@ -87,8 +87,7 @@ int efx_mcdi_init(struct efx_nic *efx)
mcdi->mode = MCDI_MODE_POLL; mcdi->mode = MCDI_MODE_POLL;
spin_lock_init(&mcdi->async_lock); spin_lock_init(&mcdi->async_lock);
INIT_LIST_HEAD(&mcdi->async_list); INIT_LIST_HEAD(&mcdi->async_list);
setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
(unsigned long)mcdi);
(void) efx_mcdi_poll_reboot(efx); (void) efx_mcdi_poll_reboot(efx);
mcdi->new_epoch = true; mcdi->new_epoch = true;
...@@ -608,9 +607,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, ...@@ -608,9 +607,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
} }
} }
static void efx_mcdi_timeout_async(unsigned long context) static void efx_mcdi_timeout_async(struct timer_list *t)
{ {
struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true); efx_mcdi_complete_async(mcdi, true);
} }
......
...@@ -376,9 +376,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) ...@@ -376,9 +376,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
efx_nic_notify_rx_desc(rx_queue); efx_nic_notify_rx_desc(rx_queue);
} }
void efx_rx_slow_fill(unsigned long context) void efx_rx_slow_fill(struct timer_list *t)
{ {
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
/* Post an event to cause NAPI to run and refill the queue */ /* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue); efx_nic_generate_fill_event(rx_queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment