Commit 5f1475b1 authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-couple-more-ARFS-tidy-ups'

Edward Cree says:

====================
couple more ARFS tidy-ups

Tie up some loose ends from the recent ARFS work.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bd706ff8 025c5a0b
...@@ -150,24 +150,6 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, ...@@ -150,24 +150,6 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id); u16 rxq_index, u32 flow_id);
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
static inline void efx_filter_rfs_expire(struct work_struct *data)
{
struct delayed_work *dwork = to_delayed_work(data);
struct efx_channel *channel;
unsigned int time, quota;
channel = container_of(dwork, struct efx_channel, filter_work);
time = jiffies - channel->rfs_last_expiry;
quota = channel->rfs_filter_count * time / (30 * HZ);
if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
channel->rfs_last_expiry += time;
/* Ensure we do more work eventually even if NAPI poll is not happening */
schedule_delayed_work(dwork, 30 * HZ);
}
#define efx_filter_rfs_enabled() 1
#else
static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#define efx_filter_rfs_enabled() 0
#endif #endif
/* RSS contexts */ /* RSS contexts */
......
...@@ -485,6 +485,23 @@ void efx_remove_eventq(struct efx_channel *channel) ...@@ -485,6 +485,23 @@ void efx_remove_eventq(struct efx_channel *channel)
* *
*************************************************************************/ *************************************************************************/
#ifdef CONFIG_RFS_ACCEL
static void efx_filter_rfs_expire(struct work_struct *data)
{
struct delayed_work *dwork = to_delayed_work(data);
struct efx_channel *channel;
unsigned int time, quota;
channel = container_of(dwork, struct efx_channel, filter_work);
time = jiffies - channel->rfs_last_expiry;
quota = channel->rfs_filter_count * time / (30 * HZ);
if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
channel->rfs_last_expiry += time;
/* Ensure we do more work eventually even if NAPI poll is not happening */
schedule_delayed_work(dwork, 30 * HZ);
}
#endif
/* Allocate and initialise a channel structure. */ /* Allocate and initialise a channel structure. */
struct efx_channel * struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
...@@ -1166,6 +1183,9 @@ static int efx_poll(struct napi_struct *napi, int budget) ...@@ -1166,6 +1183,9 @@ static int efx_poll(struct napi_struct *napi, int budget)
struct efx_channel *channel = struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str); container_of(napi, struct efx_channel, napi_str);
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
#ifdef CONFIG_RFS_ACCEL
unsigned int time;
#endif
int spent; int spent;
netif_vdbg(efx, intr, efx->net_dev, netif_vdbg(efx, intr, efx->net_dev,
...@@ -1185,6 +1205,9 @@ static int efx_poll(struct napi_struct *napi, int budget) ...@@ -1185,6 +1205,9 @@ static int efx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
/* Perhaps expire some ARFS filters */ /* Perhaps expire some ARFS filters */
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
mod_delayed_work(system_wq, &channel->filter_work, 0); mod_delayed_work(system_wq, &channel->filter_work, 0);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment