Commit 33395f4a authored by David S. Miller's avatar David S. Miller

Merge branch 'net-ipa-kill-endpoint-stop-workaround'

Alex Elder says:

====================
net: ipa: kill endpoint stop workaround

It turns out that a workaround that performs a small DMA operation
between retried attempts to stop a GSI channel is not needed for any
supported hardware.  The hardware quirk that required the extra DMA
operation was fixed after IPA v3.1.  So this series gets rid of that
workaround code, along with some other code that was only present to
support it.

NOTE:  This series depends on (and includes/duplicates) another patch
       that has already been committed in the net tree:
         713b6ebb net: ipa: fix a bug in ipa_endpoint_stop()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6a5dc76a da1a782a
...@@ -103,28 +103,6 @@ struct ipa_cmd_ip_packet_init { ...@@ -103,28 +103,6 @@ struct ipa_cmd_ip_packet_init {
/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */ /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0) #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
/* IPA_CMD_DMA_TASK_32B_ADDR */
/* This opcode gets modified with a DMA operation count */
#define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8)
struct ipa_cmd_hw_dma_task_32b_addr {
__le16 flags;
__le16 size;
__le32 addr;
__le16 packet_size;
u8 reserved[6];
};
/* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */
#define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0)
#define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11)
#define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12)
#define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13)
#define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14)
#define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15)
/* IPA_CMD_DMA_SHARED_MEM */ /* IPA_CMD_DMA_SHARED_MEM */
/* For IPA v4.0+, this opcode gets modified with pipeline clear options */ /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
...@@ -163,7 +141,6 @@ union ipa_cmd_payload { ...@@ -163,7 +141,6 @@ union ipa_cmd_payload {
struct ipa_cmd_hw_hdr_init_local hdr_init_local; struct ipa_cmd_hw_hdr_init_local hdr_init_local;
struct ipa_cmd_register_write register_write; struct ipa_cmd_register_write register_write;
struct ipa_cmd_ip_packet_init ip_packet_init; struct ipa_cmd_ip_packet_init ip_packet_init;
struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr;
struct ipa_cmd_hw_dma_mem_mem dma_shared_mem; struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status; struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
}; };
...@@ -508,42 +485,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) ...@@ -508,42 +485,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
direction, opcode); direction, opcode);
} }
/* Use a 32-bit DMA command to zero a block of memory */
void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
dma_addr_t addr, bool toward_ipa)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR;
struct ipa_cmd_hw_dma_task_32b_addr *payload;
union ipa_cmd_payload *cmd_payload;
enum dma_data_direction direction;
dma_addr_t payload_addr;
u16 flags;
/* assert(addr <= U32_MAX); */
addr &= GENMASK_ULL(31, 0);
/* The opcode encodes the number of DMA operations in the high byte */
opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK);
direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
/* complete: 0 = don't interrupt; eof: 0 = don't assert eot */
flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK;
/* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->dma_task_32b_addr;
payload->flags = cpu_to_le16(flags);
payload->size = cpu_to_le16(size);
payload->addr = cpu_to_le32((u32)addr);
payload->packet_size = cpu_to_le16(size);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
}
/* Use a DMA command to read or write a block of IPA-resident memory */ /* Use a DMA command to read or write a block of IPA-resident memory */
void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
dma_addr_t addr, bool toward_ipa) dma_addr_t addr, bool toward_ipa)
......
...@@ -35,7 +35,6 @@ enum ipa_cmd_opcode { ...@@ -35,7 +35,6 @@ enum ipa_cmd_opcode {
IPA_CMD_HDR_INIT_LOCAL = 9, IPA_CMD_HDR_INIT_LOCAL = 9,
IPA_CMD_REGISTER_WRITE = 12, IPA_CMD_REGISTER_WRITE = 12,
IPA_CMD_IP_PACKET_INIT = 16, IPA_CMD_IP_PACKET_INIT = 16,
IPA_CMD_DMA_TASK_32B_ADDR = 17,
IPA_CMD_DMA_SHARED_MEM = 19, IPA_CMD_DMA_SHARED_MEM = 19,
IPA_CMD_IP_PACKET_TAG_STATUS = 20, IPA_CMD_IP_PACKET_TAG_STATUS = 20,
}; };
...@@ -147,16 +146,6 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size, ...@@ -147,16 +146,6 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
u32 mask, bool clear_full); u32 mask, bool clear_full);
/**
* ipa_cmd_dma_task_32b_addr_add() - Add a 32-bit DMA command to a transaction
* @trans: GSi transaction
* @size: Number of bytes to be memory to be transferred
* @addr: DMA address of buffer to be read into or written from
* @toward_ipa: true means write to IPA memory; false means read
*/
void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
dma_addr_t addr, bool toward_ipa);
/** /**
* ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction * ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction
* @trans: GSI transaction * @trans: GSI transaction
......
...@@ -32,14 +32,9 @@ ...@@ -32,14 +32,9 @@
/* The amount of RX buffer space consumed by standard skb overhead */ /* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
#define IPA_ENDPOINT_STOP_RX_RETRIES 10
#define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */ #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
#define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */
/** enum ipa_status_opcode - status element opcode hardware values */ /** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode { enum ipa_status_opcode {
IPA_STATUS_OPCODE_PACKET = 0x01, IPA_STATUS_OPCODE_PACKET = 0x01,
...@@ -1219,7 +1214,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) ...@@ -1219,7 +1214,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
gsi_trans_read_byte_done(gsi, endpoint->channel_id); gsi_trans_read_byte_done(gsi, endpoint->channel_id);
ret = ipa_endpoint_stop(endpoint); ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret) if (ret)
goto out_suspend_again; goto out_suspend_again;
...@@ -1236,7 +1231,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) ...@@ -1236,7 +1231,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
goto out_suspend_again; goto out_suspend_again;
err_endpoint_stop: err_endpoint_stop:
ipa_endpoint_stop(endpoint); (void)gsi_channel_stop(gsi, endpoint->channel_id);
out_suspend_again: out_suspend_again:
if (suspended) if (suspended)
(void)ipa_endpoint_program_suspend(endpoint, true); (void)ipa_endpoint_program_suspend(endpoint, true);
...@@ -1274,70 +1269,6 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) ...@@ -1274,70 +1269,6 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
ret, endpoint->channel_id, endpoint->endpoint_id); ret, endpoint->channel_id, endpoint->endpoint_id);
} }
static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
{
u16 size = IPA_ENDPOINT_STOP_RX_SIZE;
struct gsi_trans *trans;
dma_addr_t addr;
int ret;
trans = ipa_cmd_trans_alloc(ipa, 1);
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction for RX endpoint STOP workaround\n");
return -EBUSY;
}
/* Read into the highest part of the zero memory area */
addr = ipa->zero_addr + ipa->zero_size - size;
ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false);
ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT);
if (ret)
gsi_trans_free(trans);
return ret;
}
/**
* ipa_endpoint_stop() - Stops a GSI channel in IPA
* @client: Client whose endpoint should be stopped
*
* This function implements the sequence to stop a GSI channel
* in IPA. This function returns when the channel is is STOP state.
*
* Return value: 0 on success, negative otherwise
*/
int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
{
u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES;
int ret;
do {
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret != -EAGAIN)
break;
if (endpoint->toward_ipa)
continue;
/* For IPA v3.5.1, send a DMA read task and check again */
if (ipa->version == IPA_VERSION_3_5_1) {
ret = ipa_endpoint_stop_rx_dma(ipa);
if (ret)
break;
}
msleep(1);
} while (retries--);
return retries ? ret : -EIO;
}
static void ipa_endpoint_program(struct ipa_endpoint *endpoint) static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{ {
if (endpoint->toward_ipa) { if (endpoint->toward_ipa) {
...@@ -1390,12 +1321,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) ...@@ -1390,12 +1321,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{ {
u32 mask = BIT(endpoint->endpoint_id); u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa; struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
int ret; int ret;
if (!(endpoint->ipa->enabled & mask)) if (!(ipa->enabled & mask))
return; return;
endpoint->ipa->enabled ^= mask; ipa->enabled ^= mask;
if (!endpoint->toward_ipa) { if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint); ipa_endpoint_replenish_disable(endpoint);
...@@ -1404,7 +1336,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) ...@@ -1404,7 +1336,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
} }
/* Note that if stop fails, the channel's state is not well-defined */ /* Note that if stop fails, the channel's state is not well-defined */
ret = ipa_endpoint_stop(endpoint); ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret) if (ret)
dev_err(&ipa->pdev->dev, dev_err(&ipa->pdev->dev,
"error %d attempting to stop endpoint %u\n", ret, "error %d attempting to stop endpoint %u\n", ret,
......
...@@ -76,8 +76,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa); ...@@ -76,8 +76,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb); int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
int ipa_endpoint_stop(struct ipa_endpoint *endpoint);
void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint); void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint); int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment