Commit 5fc7f9ba authored by Alex Elder's avatar Alex Elder Committed by David S. Miller

net: ipa: introduce gsi_channel_trans_idle()

Create a new function that returns true if all transactions for a
channel are available for use.

Use it in ipa_endpoint_replenish_enable() to see whether to start
replenishing, and in ipa_endpoint_replenish() to determine whether
it's necessary after a failure to schedule delayed work to ensure a
future replenish attempt occurs.
Signed-off-by: default avatarAlex Elder <elder@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d0ac30e7
...@@ -320,6 +320,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count) ...@@ -320,6 +320,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
atomic_add(tre_count, &trans_info->tre_avail); atomic_add(tre_count, &trans_info->tre_avail);
} }
/* Return true if no transactions are allocated, false otherwise */
bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
{
u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
struct gsi_trans_info *trans_info;
trans_info = &gsi->channel[channel_id].trans_info;
return atomic_read(&trans_info->tre_avail) == tre_max;
}
/* Allocate a GSI transaction on a channel */ /* Allocate a GSI transaction on a channel */
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
u32 tre_count, u32 tre_count,
......
...@@ -129,6 +129,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr); ...@@ -129,6 +129,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
*/ */
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool); void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
/**
* gsi_channel_trans_idle() - Return whether no transactions are allocated
* @gsi: GSI pointer
* @channel_id: Channel the transaction is associated with
*
* Return: True if no transactions are allocated, false otherwise
*
*/
bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
/** /**
* gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel * gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
* @gsi: GSI pointer * @gsi: GSI pointer
......
...@@ -1077,8 +1077,6 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, ...@@ -1077,8 +1077,6 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{ {
struct gsi_trans *trans; struct gsi_trans *trans;
struct gsi *gsi;
u32 backlog;
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
return; return;
...@@ -1108,30 +1106,25 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) ...@@ -1108,30 +1106,25 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* The last one didn't succeed, so fix the backlog */ /* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog); atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to /* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even * replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt. * one buffer, nothing will trigger another replenish attempt.
* Receive buffer transactions use one TRE, so schedule work to * If the hardware has no receive buffers queued, schedule work to
* try replenishing again if our backlog is *all* available TREs. * try replenishing again.
*/ */
gsi = &endpoint->ipa->gsi; if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work, schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1)); msecs_to_jiffies(1));
} }
static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{ {
struct gsi *gsi = &endpoint->ipa->gsi;
u32 max_backlog;
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
/* Start replenishing if hardware currently has no buffers */ /* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
ipa_endpoint_replenish(endpoint); ipa_endpoint_replenish(endpoint);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment