Commit 1e0083bd authored by Arnd Bergmann's avatar Arnd Bergmann Committed by David S. Miller

gve: DQO: avoid unused variable warnings

The use of dma_unmap_addr()/dma_unmap_len() in the driver causes
multiple warnings when these macros are defined as empty, e.g.
in an ARCH=i386 allmodconfig build:

drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'gve_tx_add_skb_no_copy_dqo':
drivers/net/ethernet/google/gve/gve_tx_dqo.c:494:40: error: unused variable 'buf' [-Werror=unused-variable]
  494 |                 struct gve_tx_dma_buf *buf =

This is not how the NEED_DMA_MAP_STATE macros are meant to work,
as they rely on never using local variables or a temporary structure
like gve_tx_dma_buf.

Remote the gve_tx_dma_buf definition and open-code the contents
in all places to avoid the warning. This causes some rather long
lines but otherwise ends up making the driver slightly smaller.

Fixes: a57e5de4 ("gve: DQO: Add TX path")
Link: https://lore.kernel.org/netdev/20210723231957.1113800-1-bcf@google.com/
Link: https://lore.kernel.org/netdev/20210721151100.2042139-1-arnd@kernel.org/Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent af3826db
...@@ -224,11 +224,6 @@ struct gve_tx_iovec { ...@@ -224,11 +224,6 @@ struct gve_tx_iovec {
u32 iov_padding; /* padding associated with this segment */ u32 iov_padding; /* padding associated with this segment */
}; };
struct gve_tx_dma_buf {
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
};
/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
* ring entry but only used for a pkt_desc not a seg_desc * ring entry but only used for a pkt_desc not a seg_desc
*/ */
...@@ -236,7 +231,10 @@ struct gve_tx_buffer_state { ...@@ -236,7 +231,10 @@ struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */ struct sk_buff *skb; /* skb for this pkt */
union { union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
struct gve_tx_dma_buf buf; struct {
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
};
}; };
}; };
...@@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo { ...@@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo {
* All others correspond to `skb`'s frags and should be unmapped with * All others correspond to `skb`'s frags and should be unmapped with
* `dma_unmap_page`. * `dma_unmap_page`.
*/ */
struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1]; DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
u16 num_bufs; u16 num_bufs;
/* Linked list index to next element in the list, or -1 if none */ /* Linked list index to next element in the list, or -1 if none */
......
...@@ -303,15 +303,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, ...@@ -303,15 +303,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{ {
if (info->skb) { if (info->skb) {
dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), dma_unmap_single(dev, dma_unmap_addr(info, dma),
dma_unmap_len(&info->buf, len), dma_unmap_len(info, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_len_set(&info->buf, len, 0); dma_unmap_len_set(info, len, 0);
} else { } else {
dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), dma_unmap_page(dev, dma_unmap_addr(info, dma),
dma_unmap_len(&info->buf, len), dma_unmap_len(info, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_len_set(&info->buf, len, 0); dma_unmap_len_set(info, len, 0);
} }
} }
...@@ -491,7 +491,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -491,7 +491,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct gve_tx_buffer_state *info; struct gve_tx_buffer_state *info;
bool is_gso = skb_is_gso(skb); bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask; u32 idx = tx->req & tx->mask;
struct gve_tx_dma_buf *buf;
u64 addr; u64 addr;
u32 len; u32 len;
int i; int i;
...@@ -515,9 +514,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -515,9 +514,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++; tx->dma_mapping_error++;
goto drop; goto drop;
} }
buf = &info->buf; dma_unmap_len_set(info, len, len);
dma_unmap_len_set(buf, len, len); dma_unmap_addr_set(info, dma, addr);
dma_unmap_addr_set(buf, dma, addr);
payload_nfrags = shinfo->nr_frags; payload_nfrags = shinfo->nr_frags;
if (hlen < len) { if (hlen < len) {
...@@ -549,10 +547,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -549,10 +547,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++; tx->dma_mapping_error++;
goto unmap_drop; goto unmap_drop;
} }
buf = &tx->info[idx].buf;
tx->info[idx].skb = NULL; tx->info[idx].skb = NULL;
dma_unmap_len_set(buf, len, len); dma_unmap_len_set(&tx->info[idx], len, len);
dma_unmap_addr_set(buf, dma, addr); dma_unmap_addr_set(&tx->info[idx], dma, addr);
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
} }
......
...@@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) ...@@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
int j; int j;
for (j = 0; j < cur_state->num_bufs; j++) { for (j = 0; j < cur_state->num_bufs; j++) {
struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
if (j == 0) { if (j == 0) {
dma_unmap_single(tx->dev, dma_unmap_single(tx->dev,
dma_unmap_addr(buf, dma), dma_unmap_addr(cur_state, dma[j]),
dma_unmap_len(buf, len), dma_unmap_len(cur_state, len[j]),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} else { } else {
dma_unmap_page(tx->dev, dma_unmap_page(tx->dev,
dma_unmap_addr(buf, dma), dma_unmap_addr(cur_state, dma[j]),
dma_unmap_len(buf, len), dma_unmap_len(cur_state, len[j]),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
} }
if (cur_state->skb) { if (cur_state->skb) {
...@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, ...@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
const bool is_gso = skb_is_gso(skb); const bool is_gso = skb_is_gso(skb);
u32 desc_idx = tx->dqo_tx.tail; u32 desc_idx = tx->dqo_tx.tail;
struct gve_tx_pending_packet_dqo *pending_packet; struct gve_tx_pending_packet_dqo *pkt;
struct gve_tx_metadata_dqo metadata; struct gve_tx_metadata_dqo metadata;
s16 completion_tag; s16 completion_tag;
int i; int i;
pending_packet = gve_alloc_pending_packet(tx); pkt = gve_alloc_pending_packet(tx);
pending_packet->skb = skb; pkt->skb = skb;
pending_packet->num_bufs = 0; pkt->num_bufs = 0;
completion_tag = pending_packet - tx->dqo.pending_packets; completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata); gve_extract_tx_metadata_dqo(skb, &metadata);
if (is_gso) { if (is_gso) {
...@@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, ...@@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
/* Map the linear portion of skb */ /* Map the linear portion of skb */
{ {
struct gve_tx_dma_buf *buf =
&pending_packet->bufs[pending_packet->num_bufs];
u32 len = skb_headlen(skb); u32 len = skb_headlen(skb);
dma_addr_t addr; dma_addr_t addr;
...@@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, ...@@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr))) if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err; goto err;
dma_unmap_len_set(buf, len, len); dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
dma_unmap_addr_set(buf, dma, addr); dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pending_packet->num_bufs; ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag, completion_tag,
...@@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, ...@@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
} }
for (i = 0; i < shinfo->nr_frags; i++) { for (i = 0; i < shinfo->nr_frags; i++) {
struct gve_tx_dma_buf *buf =
&pending_packet->bufs[pending_packet->num_bufs];
const skb_frag_t *frag = &shinfo->frags[i]; const skb_frag_t *frag = &shinfo->frags[i];
bool is_eop = i == (shinfo->nr_frags - 1); bool is_eop = i == (shinfo->nr_frags - 1);
u32 len = skb_frag_size(frag); u32 len = skb_frag_size(frag);
...@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, ...@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr))) if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err; goto err;
dma_unmap_len_set(buf, len, len); dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
dma_unmap_addr_set(buf, dma, addr); dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pending_packet->num_bufs; ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag, is_eop, is_gso); completion_tag, is_eop, is_gso);
...@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, ...@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
return 0; return 0;
err: err:
for (i = 0; i < pending_packet->num_bufs; i++) { for (i = 0; i < pkt->num_bufs; i++) {
struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
if (i == 0) { if (i == 0) {
dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma), dma_unmap_single(tx->dev,
dma_unmap_len(buf, len), dma_unmap_addr(pkt, dma[i]),
dma_unmap_len(pkt, len[i]),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} else { } else {
dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma), dma_unmap_page(tx->dev,
dma_unmap_len(buf, len), DMA_TO_DEVICE); dma_unmap_addr(pkt, dma[i]),
dma_unmap_len(pkt, len[i]),
DMA_TO_DEVICE);
} }
} }
pending_packet->skb = NULL; pkt->skb = NULL;
pending_packet->num_bufs = 0; pkt->num_bufs = 0;
gve_free_pending_packet(tx, pending_packet); gve_free_pending_packet(tx, pkt);
return -1; return -1;
} }
...@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, ...@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
static void remove_from_list(struct gve_tx_ring *tx, static void remove_from_list(struct gve_tx_ring *tx,
struct gve_index_list *list, struct gve_index_list *list,
struct gve_tx_pending_packet_dqo *pending_packet) struct gve_tx_pending_packet_dqo *pkt)
{ {
s16 prev_index, next_index; s16 prev_index, next_index;
prev_index = pending_packet->prev; prev_index = pkt->prev;
next_index = pending_packet->next; next_index = pkt->next;
if (prev_index == -1) { if (prev_index == -1) {
/* Node is head */ /* Node is head */
...@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx, ...@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx,
} }
static void gve_unmap_packet(struct device *dev, static void gve_unmap_packet(struct device *dev,
struct gve_tx_pending_packet_dqo *pending_packet) struct gve_tx_pending_packet_dqo *pkt)
{ {
struct gve_tx_dma_buf *buf;
int i; int i;
/* SKB linear portion is guaranteed to be mapped */ /* SKB linear portion is guaranteed to be mapped */
buf = &pending_packet->bufs[0]; dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
dma_unmap_single(dev, dma_unmap_addr(buf, dma), dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
dma_unmap_len(buf, len), DMA_TO_DEVICE); for (i = 1; i < pkt->num_bufs; i++) {
for (i = 1; i < pending_packet->num_bufs; i++) { dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
buf = &pending_packet->bufs[i]; dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
dma_unmap_page(dev, dma_unmap_addr(buf, dma),
dma_unmap_len(buf, len), DMA_TO_DEVICE);
} }
pending_packet->num_bufs = 0; pkt->num_bufs = 0;
} }
/* Completion types and expected behavior: /* Completion types and expected behavior:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment