Commit 5a1ee270 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Merge all FCoE percpu values into a single structure

This change merges the 2 statistics values for noddp and noddp_ext_buff
and the dma_pool into a single structure that can be allocated per CPU.

The advantages to this are several fold.  First we only need to do one
alloc_percpu call now instead of 3, so that means less overhead for
handling memory allocation failures.  Secondly in the case of
ixgbe_fcoe_ddp_setup we only need to call get_cpu once which makes things a
bit cleaner since we can drop a put_cpu() from the exception path.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Tested-by: default avatarRoss Brattain <ross.b.brattain@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 81faddef
...@@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ...@@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct ixgbe_hw *hw; struct ixgbe_hw *hw;
struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp; struct ixgbe_fcoe_ddp *ddp;
struct ixgbe_fcoe_ddp_pool *ddp_pool;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int i, j, dmacount; unsigned int i, j, dmacount;
unsigned int len; unsigned int len;
...@@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ...@@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
unsigned int thislen = 0; unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0; dma_addr_t addr = 0;
struct dma_pool *pool;
unsigned int cpu;
if (!netdev || !sgl) if (!netdev || !sgl)
return 0; return 0;
...@@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ...@@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 0; return 0;
fcoe = &adapter->fcoe; fcoe = &adapter->fcoe;
if (!fcoe->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
return 0;
}
ddp = &fcoe->ddp[xid]; ddp = &fcoe->ddp[xid];
if (ddp->sgl) { if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
...@@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ...@@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
} }
ixgbe_fcoe_clear_ddp(ddp); ixgbe_fcoe_clear_ddp(ddp);
if (!fcoe->ddp_pool) {
e_warn(drv, "No ddp_pool resources allocated\n");
return 0;
}
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
if (!ddp_pool->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
goto out_noddp;
}
/* setup dma from scsi command sgl */ /* setup dma from scsi command sgl */
dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) { if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid); e_err(drv, "xid 0x%x DMA map error\n", xid);
return 0; goto out_noddp;
} }
/* alloc the udl from per cpu ddp pool */ /* alloc the udl from per cpu ddp pool */
cpu = get_cpu(); ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
pool = *per_cpu_ptr(fcoe->pool, cpu);
ddp->udl = dma_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) { if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n"); e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap; goto out_noddp_unmap;
} }
ddp->pool = pool; ddp->pool = ddp_pool->pool;
ddp->sgl = sgl; ddp->sgl = sgl;
ddp->sgc = sgc; ddp->sgc = sgc;
...@@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ...@@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
while (len) { while (len) {
/* max number of buffers allowed in one DDP context */ /* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) { if (j >= IXGBE_BUFFCNT_MAX) {
*per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1; ddp_pool->noddp++;
goto out_noddp_free; goto out_noddp_free;
} }
...@@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ...@@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
*/ */
if (lastsize == bufflen) { if (lastsize == bufflen) {
if (j >= IXGBE_BUFFCNT_MAX) { if (j >= IXGBE_BUFFCNT_MAX) {
*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1; ddp_pool->noddp_ext_buff++;
goto out_noddp_free; goto out_noddp_free;
} }
...@@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ...@@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 1; return 1;
out_noddp_free: out_noddp_free:
dma_pool_free(pool, ddp->udl, ddp->udp); dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp); ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap: out_noddp_unmap:
dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
out_noddp:
put_cpu(); put_cpu();
return 0; return 0;
} }
...@@ -563,44 +568,63 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, ...@@ -563,44 +568,63 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
return 0; return 0;
} }
static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
{
struct ixgbe_fcoe_ddp_pool *ddp_pool;
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
if (ddp_pool->pool)
dma_pool_destroy(ddp_pool->pool);
ddp_pool->pool = NULL;
}
static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
{ {
unsigned int cpu; unsigned int cpu;
struct dma_pool **pool;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu)
pool = per_cpu_ptr(fcoe->pool, cpu); ixgbe_fcoe_dma_pool_free(fcoe, cpu);
if (*pool)
dma_pool_destroy(*pool); free_percpu(fcoe->ddp_pool);
} fcoe->ddp_pool = NULL;
free_percpu(fcoe->pool); }
fcoe->pool = NULL;
static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
struct device *dev,
unsigned int cpu)
{
struct ixgbe_fcoe_ddp_pool *ddp_pool;
struct dma_pool *pool;
char pool_name[32];
snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
if (!pool)
return -ENOMEM;
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
ddp_pool->pool = pool;
ddp_pool->noddp = 0;
ddp_pool->noddp_ext_buff = 0;
return 0;
} }
static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct device *dev = &adapter->pdev->dev;
unsigned int cpu; unsigned int cpu;
struct dma_pool **pool;
char pool_name[32];
fcoe->pool = alloc_percpu(struct dma_pool *); fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
if (!fcoe->pool) if (!fcoe->ddp_pool)
return; return;
/* allocate pci pool for each cpu */ /* allocate pci pool for each cpu */
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu)
snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
pool = per_cpu_ptr(fcoe->pool, cpu);
*pool = dma_pool_create(pool_name, &adapter->pdev->dev,
IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN,
PAGE_SIZE);
if (!*pool) {
e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
ixgbe_fcoe_ddp_pools_free(fcoe);
return;
}
}
} }
/** /**
...@@ -617,14 +641,13 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -617,14 +641,13 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
unsigned int cpu;
u32 etqf; u32 etqf;
if (!fcoe->pool) { if (!fcoe->ddp_pool) {
spin_lock_init(&fcoe->lock); spin_lock_init(&fcoe->lock);
ixgbe_fcoe_ddp_pools_alloc(adapter); ixgbe_fcoe_ddp_pools_alloc(adapter);
if (!fcoe->pool) { if (!fcoe->ddp_pool) {
e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
return; return;
} }
...@@ -646,24 +669,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -646,24 +669,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
e_err(drv, "failed to map extra DDP buffer\n"); e_err(drv, "failed to map extra DDP buffer\n");
goto out_extra_ddp_buffer; goto out_extra_ddp_buffer;
} }
/* Alloc per cpu mem to count the ddp alloc failure number */
fcoe->pcpu_noddp = alloc_percpu(u64);
if (!fcoe->pcpu_noddp) {
e_err(drv, "failed to alloc noddp counter\n");
goto out_pcpu_noddp_alloc_fail;
}
fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
if (!fcoe->pcpu_noddp_ext_buff) {
e_err(drv, "failed to alloc noddp extra buff cnt\n");
goto out_pcpu_noddp_extra_buff_alloc_fail;
}
for_each_possible_cpu(cpu) {
*per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
*per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
}
} }
/* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */
...@@ -704,13 +709,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -704,13 +709,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
return; return;
out_pcpu_noddp_extra_buff_alloc_fail:
free_percpu(fcoe->pcpu_noddp);
out_pcpu_noddp_alloc_fail:
dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
out_extra_ddp_buffer: out_extra_ddp_buffer:
kfree(fcoe->extra_ddp_buffer); kfree(fcoe->extra_ddp_buffer);
out_ddp_pools: out_ddp_pools:
...@@ -730,18 +728,18 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) ...@@ -730,18 +728,18 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
int i; int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!fcoe->pool) if (!fcoe->ddp_pool)
return; return;
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i); ixgbe_fcoe_ddp_put(adapter->netdev, i);
dma_unmap_single(&adapter->pdev->dev, dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma, fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN, IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
free_percpu(fcoe->pcpu_noddp);
free_percpu(fcoe->pcpu_noddp_ext_buff);
kfree(fcoe->extra_ddp_buffer); kfree(fcoe->extra_ddp_buffer);
ixgbe_fcoe_ddp_pools_free(fcoe); ixgbe_fcoe_ddp_pools_free(fcoe);
} }
......
...@@ -65,16 +65,21 @@ struct ixgbe_fcoe_ddp { ...@@ -65,16 +65,21 @@ struct ixgbe_fcoe_ddp {
struct dma_pool *pool; struct dma_pool *pool;
}; };
/* per cpu variables */
struct ixgbe_fcoe_ddp_pool {
struct dma_pool *pool;
u64 noddp;
u64 noddp_ext_buff;
};
struct ixgbe_fcoe { struct ixgbe_fcoe {
struct dma_pool **pool; struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
atomic_t refcnt; atomic_t refcnt;
spinlock_t lock; spinlock_t lock;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
unsigned char *extra_ddp_buffer; unsigned char *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma; dma_addr_t extra_ddp_buffer_dma;
unsigned long mode; unsigned long mode;
u64 __percpu *pcpu_noddp;
u64 __percpu *pcpu_noddp_ext_buff;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
u8 up; u8 up;
#endif #endif
......
...@@ -5052,11 +5052,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) ...@@ -5052,11 +5052,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
#ifdef IXGBE_FCOE
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
unsigned int cpu;
u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
#endif /* IXGBE_FCOE */
if (test_bit(__IXGBE_DOWN, &adapter->state) || if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state)) test_bit(__IXGBE_RESETTING, &adapter->state))
...@@ -5187,17 +5182,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) ...@@ -5187,17 +5182,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
/* Add up per cpu counters for total ddp aloc fail */ /* Add up per cpu counters for total ddp aloc fail */
if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) { if (adapter->fcoe.ddp_pool) {
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_fcoe_ddp_pool *ddp_pool;
unsigned int cpu;
u64 noddp = 0, noddp_ext_buff = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
fcoe_noddp_counts_sum += ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
*per_cpu_ptr(fcoe->pcpu_noddp, cpu); noddp += ddp_pool->noddp;
fcoe_noddp_ext_buff_counts_sum += noddp_ext_buff += ddp_pool->noddp_ext_buff;
*per_cpu_ptr(fcoe->
pcpu_noddp_ext_buff, cpu);
} }
hwstats->fcoe_noddp = noddp;
hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
} }
hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment