Commit 09b337de authored by Alex Elder's avatar Alex Elder Committed by David S. Miller

net: ipa: kill replenish_backlog

We no longer use the replenish_backlog atomic variable to decide
when we've got work to do providing receive buffers to hardware.
Basically, we try to keep the hardware as full as possible, all the
time.  We keep supplying buffers until the hardware has no more
space for them.

As a result, we can get rid of the replenish_backlog field and the
atomic operations performed on it.
Signed-off-by: default avatarAlex Elder <elder@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5fc7f9ba
...@@ -1086,7 +1086,6 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) ...@@ -1086,7 +1086,6 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
return; return;
while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
WARN_ON(!atomic_dec_not_zero(&endpoint->replenish_backlog));
if (ipa_endpoint_replenish_one(endpoint, trans)) if (ipa_endpoint_replenish_one(endpoint, trans))
goto try_again_later; goto try_again_later;
...@@ -1105,9 +1104,6 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) ...@@ -1105,9 +1104,6 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
gsi_trans_free(trans); gsi_trans_free(trans);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* The last one didn't succeed, so fix the backlog */
atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to /* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even * replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt. * one buffer, nothing will trigger another replenish attempt.
...@@ -1346,7 +1342,6 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, ...@@ -1346,7 +1342,6 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
struct page *page; struct page *page;
ipa_endpoint_replenish(endpoint); ipa_endpoint_replenish(endpoint);
atomic_inc(&endpoint->replenish_backlog);
if (trans->cancelled) if (trans->cancelled)
return; return;
...@@ -1693,8 +1688,6 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) ...@@ -1693,8 +1688,6 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
*/ */
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
atomic_set(&endpoint->replenish_backlog,
gsi_channel_tre_max(gsi, endpoint->channel_id));
INIT_DELAYED_WORK(&endpoint->replenish_work, INIT_DELAYED_WORK(&endpoint->replenish_work,
ipa_endpoint_replenish_work); ipa_endpoint_replenish_work);
} }
......
...@@ -66,7 +66,6 @@ enum ipa_replenish_flag { ...@@ -66,7 +66,6 @@ enum ipa_replenish_flag {
* @netdev: Network device pointer, if endpoint uses one * @netdev: Network device pointer, if endpoint uses one
* @replenish_flags: Replenishing state flags * @replenish_flags: Replenishing state flags
* @replenish_ready: Number of replenish transactions without doorbell * @replenish_ready: Number of replenish transactions without doorbell
* @replenish_backlog: Number of buffers needed to fill hardware queue
* @replenish_work: Work item used for repeated replenish failures * @replenish_work: Work item used for repeated replenish failures
*/ */
struct ipa_endpoint { struct ipa_endpoint {
...@@ -86,7 +85,6 @@ struct ipa_endpoint { ...@@ -86,7 +85,6 @@ struct ipa_endpoint {
/* Receive buffer replenishing for RX endpoints */ /* Receive buffer replenishing for RX endpoints */
DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT); DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
u32 replenish_ready; u32 replenish_ready;
atomic_t replenish_backlog;
struct delayed_work replenish_work; /* global wq */ struct delayed_work replenish_work; /* global wq */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment