Commit a12f801d authored by Sandeep Gopalpet's avatar Sandeep Gopalpet Committed by David S. Miller

gianfar: Add per queue structure support

This patch introduces per tx and per rx queue structures.
Earlier the members of these structures were inside the
gfar_private structure.

Moving forward if we want to support multiple queues, we need
to refactor the gfar_private structure so that introduction of
multiple queues is easier.
Signed-off-by: default avatarSandeep Gopalpet <Sandeep.Kumar@freescale.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 123b43e9
This diff is collapsed.
......@@ -7,8 +7,9 @@
*
* Author: Andy Fleming
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
* Copyright 2002-2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -699,62 +700,95 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
* (Ok, that's not so true anymore, but there is a family resemblence)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
* and tx_bd_base always point to the currently available buffer.
* The dirty_tx tracks the current buffer that is being sent by the
* controller. The cur_tx and dirty_tx are equal under both completely
* empty and completely full conditions. The empty/ready indicator in
* the buffer descriptor determines the actual condition.
/**
* struct gfar_priv_tx_q - per tx queue structure
* @txlock: per queue tx spin lock
* @tx_skbuff:skb pointers
* @skb_curtx: to be used skb pointer
* @skb_dirtytx:the last used skb pointer
* @qindex: index of this queue
* @dev: back pointer to the dev structure
* @grp: back pointer to the group to which this queue belongs
* @tx_bd_base: First tx buffer descriptor
* @cur_tx: Next free ring entry
* @dirty_tx: First buffer in line to be transmitted
* @tx_ring_size: Tx ring size
* @num_txbdfree: number of free TxBds
* @txcoalescing: enable/disable tx coalescing
* @txic: transmit interrupt coalescing value
* @txcount: coalescing value if based on tx frame count
* @txtime: coalescing value if based on time
*/
struct gfar_private {
/* Fields controlled by TX lock */
spinlock_t txlock;
/* Pointer to the array of skbuffs */
struct gfar_priv_tx_q {
spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
struct sk_buff ** tx_skbuff;
/* next free skb in the array */
/* Buffer descriptor pointers */
dma_addr_t tx_bd_dma_base;
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
struct txbd8 *dirty_tx;
struct net_device *dev;
u16 skb_curtx;
/* First skb in line to be transmitted */
u16 skb_dirtytx;
u16 qindex;
unsigned int tx_ring_size;
unsigned int num_txbdfree;
/* Configuration info for the coalescing features */
unsigned char txcoalescing;
unsigned long txic;
unsigned short txcount;
unsigned short txtime;
};
/* Buffer descriptor pointers */
dma_addr_t tx_bd_dma_base;
struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
struct txbd8 *cur_tx; /* Next free ring entry */
struct txbd8 *dirty_tx; /* First buffer in line
to be transmitted */
unsigned int tx_ring_size;
unsigned int num_txbdfree; /* number of TxBDs free */
/* RX Locked fields */
spinlock_t rxlock;
/**
* struct gfar_priv_rx_q - per rx queue structure
* @rxlock: per queue rx spin lock
* @napi: the napi poll function
* @rx_skbuff: skb pointers
* @skb_currx: currently use skb pointer
* @rx_bd_base: First rx buffer descriptor
* @cur_rx: Next free rx ring entry
* @qindex: index of this queue
* @dev: back pointer to the dev structure
* @rx_ring_size: Rx ring size
* @rxcoalescing: enable/disable rx-coalescing
* @rxic: receive interrupt coalescing vlaue
*/
struct device_node *node;
struct net_device *ndev;
struct of_device *ofdev;
struct gfar_priv_rx_q {
spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
struct napi_struct napi;
/* skb array and index */
struct sk_buff ** rx_skbuff;
struct rxbd8 *rx_bd_base;
struct rxbd8 *cur_rx;
struct net_device *dev;
u16 skb_currx;
u16 qindex;
unsigned int rx_ring_size;
/* RX Coalescing values */
unsigned char rxcoalescing;
unsigned long rxic;
};
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
* (Ok, that's not so true anymore, but there is a family resemblence)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
* and tx_bd_base always point to the currently available buffer.
* The dirty_tx tracks the current buffer that is being sent by the
* controller. The cur_tx and dirty_tx are equal under both completely
* empty and completely full conditions. The empty/ready indicator in
* the buffer descriptor determines the actual condition.
*/
struct gfar_private {
struct rxbd8 *rx_bd_base; /* First Rx buffers */
struct rxbd8 *cur_rx; /* Next free rx ring entry */
struct device_node *node;
struct net_device *ndev;
struct of_device *ofdev;
/* RX parameters */
unsigned int rx_ring_size;
struct gfar_priv_tx_q *tx_queue;
struct gfar_priv_rx_q *rx_queue;
/* RX per device parameters */
unsigned int rx_buffer_size;
unsigned int rx_stash_size;
unsigned int rx_stash_index;
......
......@@ -7,8 +7,9 @@
*
* Author: Andy Fleming
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright (c) 2003,2004 Freescale Semiconductor, Inc.
* Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
*
* This software may be used and distributed according to
* the terms of the GNU Public License, Version 2, incorporated herein
......@@ -41,7 +42,7 @@
#include "gianfar.h"
extern void gfar_start(struct net_device *dev);
extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
......@@ -197,12 +198,16 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct gfar_private *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
struct gfar_priv_tx_q *tx_queue = NULL;
if (NULL == phydev)
return -ENODEV;
tx_queue = priv->tx_queue;
rx_queue = priv->rx_queue;
cmd->maxtxpkt = get_icft_value(priv->txic);
cmd->maxrxpkt = get_icft_value(priv->rxic);
cmd->maxtxpkt = get_icft_value(tx_queue->txic);
cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
return phy_ethtool_gset(phydev, cmd);
}
......@@ -279,6 +284,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
struct gfar_priv_tx_q *tx_queue = NULL;
unsigned long rxtime;
unsigned long rxcount;
unsigned long txtime;
......@@ -290,10 +297,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
if (NULL == priv->phydev)
return -ENODEV;
rxtime = get_ictt_value(priv->rxic);
rxcount = get_icft_value(priv->rxic);
txtime = get_ictt_value(priv->txic);
txcount = get_icft_value(priv->txic);
rx_queue = priv->rx_queue;
tx_queue = priv->tx_queue;
rxtime = get_ictt_value(rx_queue->rxic);
rxcount = get_icft_value(rx_queue->rxic);
txtime = get_ictt_value(tx_queue->txic);
txcount = get_icft_value(tx_queue->txic);
cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
cvals->rx_max_coalesced_frames = rxcount;
......@@ -339,16 +349,21 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
tx_queue = priv->tx_queue;
rx_queue = priv->rx_queue;
/* Set up rx coalescing */
if ((cvals->rx_coalesce_usecs == 0) ||
(cvals->rx_max_coalesced_frames == 0))
priv->rxcoalescing = 0;
rx_queue->rxcoalescing = 0;
else
priv->rxcoalescing = 1;
rx_queue->rxcoalescing = 1;
if (NULL == priv->phydev)
return -ENODEV;
......@@ -366,15 +381,15 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
return -EINVAL;
}
priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
rx_queue->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
/* Set up tx coalescing */
if ((cvals->tx_coalesce_usecs == 0) ||
(cvals->tx_max_coalesced_frames == 0))
priv->txcoalescing = 0;
tx_queue->txcoalescing = 0;
else
priv->txcoalescing = 1;
tx_queue->txcoalescing = 1;
/* Check the bounds of the values */
if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
......@@ -389,16 +404,16 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
return -EINVAL;
}
priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
tx_queue->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
gfar_write(&priv->regs->rxic, 0);
if (priv->rxcoalescing)
gfar_write(&priv->regs->rxic, priv->rxic);
if (rx_queue->rxcoalescing)
gfar_write(&priv->regs->rxic, rx_queue->rxic);
gfar_write(&priv->regs->txic, 0);
if (priv->txcoalescing)
gfar_write(&priv->regs->txic, priv->txic);
if (tx_queue->txcoalescing)
gfar_write(&priv->regs->txic, tx_queue->txic);
return 0;
}
......@@ -409,6 +424,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
tx_queue = priv->tx_queue;
rx_queue = priv->rx_queue;
rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
......@@ -418,10 +438,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
/* Values changeable by the user. The valid values are
* in the range 1 to the "*_max_pending" counterpart above.
*/
rvals->rx_pending = priv->rx_ring_size;
rvals->rx_mini_pending = priv->rx_ring_size;
rvals->rx_jumbo_pending = priv->rx_ring_size;
rvals->tx_pending = priv->tx_ring_size;
rvals->rx_pending = rx_queue->rx_ring_size;
rvals->rx_mini_pending = rx_queue->rx_ring_size;
rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
rvals->tx_pending = tx_queue->tx_ring_size;
}
/* Change the current ring parameters, stopping the controller if
......@@ -431,6 +451,8 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
int err = 0;
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
......@@ -451,29 +473,32 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return -EINVAL;
}
tx_queue = priv->tx_queue;
rx_queue = priv->rx_queue;
if (dev->flags & IFF_UP) {
unsigned long flags;
/* Halt TX and RX, and process the frames which
* have already been received */
spin_lock_irqsave(&priv->txlock, flags);
spin_lock(&priv->rxlock);
spin_lock_irqsave(&tx_queue->txlock, flags);
spin_lock(&rx_queue->rxlock);
gfar_halt(dev);
spin_unlock(&priv->rxlock);
spin_unlock_irqrestore(&priv->txlock, flags);
spin_unlock(&rx_queue->rxlock);
spin_unlock_irqrestore(&tx_queue->txlock, flags);
gfar_clean_rx_ring(dev, priv->rx_ring_size);
gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
}
/* Change the size */
priv->rx_ring_size = rvals->rx_pending;
priv->tx_ring_size = rvals->tx_pending;
priv->num_txbdfree = priv->tx_ring_size;
rx_queue->rx_ring_size = rvals->rx_pending;
tx_queue->tx_ring_size = rvals->tx_pending;
tx_queue->num_txbdfree = tx_queue->tx_ring_size;
/* Rebuild the rings with the new size */
if (dev->flags & IFF_UP) {
......@@ -486,24 +511,29 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
struct gfar_priv_tx_q *tx_queue = NULL;
unsigned long flags;
int err = 0;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
return -EOPNOTSUPP;
tx_queue = priv->tx_queue;
rx_queue = priv->rx_queue;
if (dev->flags & IFF_UP) {
/* Halt TX and RX, and process the frames which
* have already been received */
spin_lock_irqsave(&priv->txlock, flags);
spin_lock(&priv->rxlock);
spin_lock_irqsave(&tx_queue->txlock, flags);
spin_lock(&rx_queue->rxlock);
gfar_halt(dev);
spin_unlock(&priv->rxlock);
spin_unlock_irqrestore(&priv->txlock, flags);
spin_unlock(&rx_queue->rxlock);
spin_unlock_irqrestore(&tx_queue->txlock, flags);
gfar_clean_rx_ring(dev, priv->rx_ring_size);
gfar_clean_rx_ring(rx_queue, rx_queue->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
......
......@@ -8,8 +8,9 @@
*
* Author: Andy Fleming
* Maintainer: Kumar Gala (galak@kernel.crashing.org)
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
* Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
* Copyright 2002-2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -49,6 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
const char *buf, size_t count)
{
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
struct gfar_priv_rx_q *rx_queue = NULL;
int new_setting = 0;
u32 temp;
unsigned long flags;
......@@ -56,6 +58,8 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
return count;
rx_queue = priv->rx_queue;
/* Find out the new setting */
if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
new_setting = 1;
......@@ -65,7 +69,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
else
return count;
spin_lock_irqsave(&priv->rxlock, flags);
spin_lock_irqsave(&rx_queue->rxlock, flags);
/* Set the new stashing value */
priv->bd_stash_en = new_setting;
......@@ -79,7 +83,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
gfar_write(&priv->regs->attr, temp);
spin_unlock_irqrestore(&priv->rxlock, flags);
spin_unlock_irqrestore(&rx_queue->rxlock, flags);
return count;
}
......@@ -99,6 +103,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
const char *buf, size_t count)
{
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
struct gfar_priv_rx_q *rx_queue = NULL;
unsigned int length = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
......@@ -106,7 +111,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
return count;
spin_lock_irqsave(&priv->rxlock, flags);
rx_queue = priv->rx_queue;
spin_lock_irqsave(&rx_queue->rxlock, flags);
if (length > priv->rx_buffer_size)
goto out;
......@@ -131,7 +138,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
gfar_write(&priv->regs->attr, temp);
out:
spin_unlock_irqrestore(&priv->rxlock, flags);
spin_unlock_irqrestore(&rx_queue->rxlock, flags);
return count;
}
......@@ -154,6 +161,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
const char *buf, size_t count)
{
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
struct gfar_priv_rx_q *rx_queue = NULL;
unsigned short index = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
......@@ -161,7 +169,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
return count;
spin_lock_irqsave(&priv->rxlock, flags);
rx_queue = priv->rx_queue;
spin_lock_irqsave(&rx_queue->rxlock, flags);
if (index > priv->rx_stash_size)
goto out;
......@@ -176,7 +186,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
gfar_write(&priv->regs->attreli, flags);
out:
spin_unlock_irqrestore(&priv->rxlock, flags);
spin_unlock_irqrestore(&rx_queue->rxlock, flags);
return count;
}
......@@ -198,6 +208,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
const char *buf, size_t count)
{
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
struct gfar_priv_tx_q *tx_queue = NULL;
unsigned int length = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
......@@ -205,7 +216,9 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
if (length > GFAR_MAX_FIFO_THRESHOLD)
return count;
spin_lock_irqsave(&priv->txlock, flags);
tx_queue = priv->tx_queue;
spin_lock_irqsave(&tx_queue->txlock, flags);
priv->fifo_threshold = length;
......@@ -214,7 +227,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
temp |= length;
gfar_write(&priv->regs->fifo_tx_thr, temp);
spin_unlock_irqrestore(&priv->txlock, flags);
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return count;
}
......@@ -235,6 +248,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
const char *buf, size_t count)
{
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
struct gfar_priv_tx_q *tx_queue = NULL;
unsigned int num = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
......@@ -242,7 +256,8 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
if (num > GFAR_MAX_FIFO_STARVE)
return count;
spin_lock_irqsave(&priv->txlock, flags);
tx_queue = priv->tx_queue;
spin_lock_irqsave(&tx_queue->txlock, flags);
priv->fifo_starve = num;
......@@ -251,7 +266,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
temp |= num;
gfar_write(&priv->regs->fifo_tx_starve, temp);
spin_unlock_irqrestore(&priv->txlock, flags);
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return count;
}
......@@ -273,6 +288,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
const char *buf, size_t count)
{
struct gfar_private *priv = netdev_priv(to_net_dev(dev));
struct gfar_priv_tx_q *tx_queue = NULL;
unsigned int num = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
......@@ -280,7 +296,8 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
if (num > GFAR_MAX_FIFO_STARVE_OFF)
return count;
spin_lock_irqsave(&priv->txlock, flags);
tx_queue = priv->tx_queue;
spin_lock_irqsave(&tx_queue->txlock, flags);
priv->fifo_starve_off = num;
......@@ -289,7 +306,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
temp |= num;
gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
spin_unlock_irqrestore(&priv->txlock, flags);
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return count;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment