Commit 4b22d841 authored by Alex Elder's avatar Alex Elder Committed by David S. Miller

net: ipa: increment backlog in replenish caller

Three spots call ipa_endpoint_replenish(), and just one of those
requests that the backlog be incremented after completing the
replenish operation.

Instead, have the caller increment the backlog, and get rid of the
add_one argument to ipa_endpoint_replenish().
Signed-off-by: default avatarAlex Elder <elder@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b4061c13
...@@ -1084,7 +1084,6 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) ...@@ -1084,7 +1084,6 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
/** /**
* ipa_endpoint_replenish() - Replenish endpoint receive buffers * ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished * @endpoint: Endpoint to be replenished
* @add_one: Whether this is replacing a just-consumed buffer
* *
* The IPA hardware can hold a fixed number of receive buffers for an RX * The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring * endpoint, based on the number of entries in the underlying channel ring
...@@ -1093,24 +1092,17 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) ...@@ -1093,24 +1092,17 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
* an endpoint can be disabled, in which case buffers are not queued to * an endpoint can be disabled, in which case buffers are not queued to
* the hardware. * the hardware.
*/ */
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{ {
struct gsi *gsi; struct gsi *gsi;
u32 backlog; u32 backlog;
int delta;
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
return; return;
}
/* If already active, just update the backlog */ /* Skip it if it's already active */
if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
return; return;
}
while (atomic_dec_not_zero(&endpoint->replenish_backlog)) while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint)) if (ipa_endpoint_replenish_one(endpoint))
...@@ -1118,17 +1110,13 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) ...@@ -1118,17 +1110,13 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
return; return;
try_again_later: try_again_later:
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* The last one didn't succeed, so fix the backlog */ /* The last one didn't succeed, so fix the backlog */
delta = add_one ? 2 : 1; backlog = atomic_inc_return(&endpoint->replenish_backlog);
backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to /* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even * replenish again. It's unlikely, but if we fail to supply even
...@@ -1152,7 +1140,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) ...@@ -1152,7 +1140,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
/* Start replenishing if hardware currently has no buffers */ /* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog) if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
ipa_endpoint_replenish(endpoint, false); ipa_endpoint_replenish(endpoint);
} }
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
...@@ -1167,7 +1155,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work) ...@@ -1167,7 +1155,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
ipa_endpoint_replenish(endpoint, false); ipa_endpoint_replenish(endpoint);
} }
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
...@@ -1372,7 +1360,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, ...@@ -1372,7 +1360,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{ {
struct page *page; struct page *page;
ipa_endpoint_replenish(endpoint, true); ipa_endpoint_replenish(endpoint);
atomic_inc(&endpoint->replenish_backlog);
if (trans->cancelled) if (trans->cancelled)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment