Commit 7c436448 authored by David S. Miller's avatar David S. Miller

Merge branch 'liquidio-VF-offloads-and-stats'

Raghu Vatsavayi says:

====================
liquidio VF offloads and stats

Following is final patch series in completing the liquidio
VF driver support. These patches have minor changes related
to offloads and stats.

Please apply patches in following order as some of them
depend on earlier patches.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 107bc0aa b0d66369
...@@ -44,5 +44,7 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct); ...@@ -44,5 +44,7 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct); int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct); void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
#endif #endif
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "cn66xx_regs.h" #include "cn66xx_regs.h"
#include "cn66xx_device.h" #include "cn66xx_device.h"
#include "cn23xx_pf_device.h" #include "cn23xx_pf_device.h"
#include "cn23xx_vf_device.h"
static int octnet_get_link_stats(struct net_device *netdev); static int octnet_get_link_stats(struct net_device *netdev);
...@@ -72,6 +73,7 @@ enum { ...@@ -72,6 +73,7 @@ enum {
#define OCT_ETHTOOL_REGDUMP_LEN 4096 #define OCT_ETHTOOL_REGDUMP_LEN 4096
#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
#define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
#define OCT_ETHTOOL_REGSVER 1 #define OCT_ETHTOOL_REGSVER 1
/* statistics of PF */ /* statistics of PF */
...@@ -147,6 +149,19 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { ...@@ -147,6 +149,19 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"link_state_changes", "link_state_changes",
}; };
/* statistics of VF */
static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
"rx_packets",
"tx_packets",
"rx_bytes",
"tx_bytes",
"rx_errors", /* jabber_err + l2_err+frame_err */
"tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
"rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
"tx_dropped",
"link_state_changes",
};
/* statistics of host tx queue */ /* statistics of host tx queue */
static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
"packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
...@@ -192,25 +207,28 @@ static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { ...@@ -192,25 +207,28 @@ static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define OCTNIC_NCMD_AUTONEG_ON 0x1 #define OCTNIC_NCMD_AUTONEG_ON 0x1
#define OCTNIC_NCMD_PHY_ON 0x2 #define OCTNIC_NCMD_PHY_ON 0x2
static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) static int lio_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{ {
struct lio *lio = GET_LIO(netdev); struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev; struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo; struct oct_link_info *linfo;
u32 supported, advertising;
linfo = &lio->linfo; linfo = &lio->linfo;
if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_XFI) { linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
ecmd->port = PORT_FIBRE; ecmd->base.port = PORT_FIBRE;
ecmd->supported = supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
(SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
SUPPORTED_Pause); SUPPORTED_Pause);
ecmd->advertising = advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
(ADVERTISED_10000baseT_Full | ADVERTISED_Pause); ethtool_convert_legacy_u32_to_link_mode(
ecmd->transceiver = XCVR_EXTERNAL; ecmd->link_modes.supported, supported);
ecmd->autoneg = AUTONEG_DISABLE; ethtool_convert_legacy_u32_to_link_mode(
ecmd->link_modes.advertising, advertising);
ecmd->base.autoneg = AUTONEG_DISABLE;
} else { } else {
dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
...@@ -218,11 +236,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ...@@ -218,11 +236,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
} }
if (linfo->link.s.link_up) { if (linfo->link.s.link_up) {
ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); ecmd->base.speed = linfo->link.s.speed;
ecmd->duplex = linfo->link.s.duplex; ecmd->base.duplex = linfo->link.s.duplex;
} else { } else {
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ecmd->base.speed = SPEED_UNKNOWN;
ecmd->duplex = DUPLEX_UNKNOWN; ecmd->base.duplex = DUPLEX_UNKNOWN;
} }
return 0; return 0;
...@@ -245,6 +263,23 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -245,6 +263,23 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
} }
static void
lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct octeon_device *oct;
struct lio *lio;
lio = GET_LIO(netdev);
oct = lio->oct_dev;
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strcpy(drvinfo->driver, "liquidio_vf");
strcpy(drvinfo->version, LIQUIDIO_VERSION);
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
}
static void static void
lio_ethtool_get_channels(struct net_device *dev, lio_ethtool_get_channels(struct net_device *dev,
struct ethtool_channels *channel) struct ethtool_channels *channel)
...@@ -982,6 +1017,109 @@ lio_get_ethtool_stats(struct net_device *netdev, ...@@ -982,6 +1017,109 @@ lio_get_ethtool_stats(struct net_device *netdev,
} }
} }
static void lio_vf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats
__attribute__((unused)),
u64 *data)
{
struct net_device_stats *netstats = &netdev->stats;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
int i = 0, j, vj;
netdev->netdev_ops->ndo_get_stats(netdev);
/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
data[i++] = CVM_CAST64(netstats->rx_packets);
/* sum of oct->instr_queue[iq_no]->stats.tx_done */
data[i++] = CVM_CAST64(netstats->tx_packets);
/* sum of oct->droq[oq_no]->stats->rx_bytes_received */
data[i++] = CVM_CAST64(netstats->rx_bytes);
/* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
data[i++] = CVM_CAST64(netstats->tx_bytes);
data[i++] = CVM_CAST64(netstats->rx_errors);
data[i++] = CVM_CAST64(netstats->tx_errors);
/* sum of oct->droq[oq_no]->stats->rx_dropped +
* oct->droq[oq_no]->stats->dropped_nodispatch +
* oct->droq[oq_no]->stats->dropped_toomany +
* oct->droq[oq_no]->stats->dropped_nomem
*/
data[i++] = CVM_CAST64(netstats->rx_dropped);
/* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
data[i++] = CVM_CAST64(netstats->tx_dropped);
/* lio->link_changes */
data[i++] = CVM_CAST64(lio->link_changes);
for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
j = lio->linfo.txpciq[vj].s.q_no;
/* packets to network port */
/* # of packets tx to network */
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
/* # of bytes tx to network */
data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.tx_tot_bytes);
/* # of packets dropped */
data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.tx_dropped);
/* # of tx fails due to queue full */
data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.tx_iq_busy);
/* XXX gather entries sent */
data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.sgentry_sent);
/* instruction to firmware: data and control */
/* # of instructions to the queue */
data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.instr_posted);
/* # of instructions processed */
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
/* # of instructions could not be processed */
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
/* bytes sent through the queue */
data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.bytes_sent);
/* tso request */
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
/* vxlan request */
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
/* txq restart */
data[i++] = CVM_CAST64(
oct_dev->instr_queue[j]->stats.tx_restart);
}
/* RX */
for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
j = lio->linfo.rxpciq[vj].s.q_no;
/* packets send to TCP/IP network stack */
/* # of packets to network stack */
data[i++] = CVM_CAST64(
oct_dev->droq[j]->stats.rx_pkts_received);
/* # of bytes to network stack */
data[i++] = CVM_CAST64(
oct_dev->droq[j]->stats.rx_bytes_received);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
oct_dev->droq[j]->stats.dropped_toomany +
oct_dev->droq[j]->stats.rx_dropped);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
/* control and data path */
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
}
}
static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
{ {
struct octeon_device *oct_dev = lio->oct_dev; struct octeon_device *oct_dev = lio->oct_dev;
...@@ -989,6 +1127,7 @@ static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) ...@@ -989,6 +1127,7 @@ static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
switch (oct_dev->chip_id) { switch (oct_dev->chip_id) {
case OCTEON_CN23XX_PF_VID: case OCTEON_CN23XX_PF_VID:
case OCTEON_CN23XX_VF_VID:
for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
sprintf(data, "%s", oct_priv_flags_strings[i]); sprintf(data, "%s", oct_priv_flags_strings[i]);
data += ETH_GSTRING_LEN; data += ETH_GSTRING_LEN;
...@@ -1050,12 +1189,61 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -1050,12 +1189,61 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
} }
} }
static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
int num_iq_stats, num_oq_stats, i, j;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
int num_stats;
switch (stringset) {
case ETH_SS_STATS:
num_stats = ARRAY_SIZE(oct_vf_stats_strings);
for (j = 0; j < num_stats; j++) {
sprintf(data, "%s", oct_vf_stats_strings[j]);
data += ETH_GSTRING_LEN;
}
num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
continue;
for (j = 0; j < num_iq_stats; j++) {
sprintf(data, "tx-%d-%s", i,
oct_iq_stats_strings[j]);
data += ETH_GSTRING_LEN;
}
}
num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
continue;
for (j = 0; j < num_oq_stats; j++) {
sprintf(data, "rx-%d-%s", i,
oct_droq_stats_strings[j]);
data += ETH_GSTRING_LEN;
}
}
break;
case ETH_SS_PRIV_FLAGS:
lio_get_priv_flags_strings(lio, data);
break;
default:
netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
break;
}
}
static int lio_get_priv_flags_ss_count(struct lio *lio) static int lio_get_priv_flags_ss_count(struct lio *lio)
{ {
struct octeon_device *oct_dev = lio->oct_dev; struct octeon_device *oct_dev = lio->oct_dev;
switch (oct_dev->chip_id) { switch (oct_dev->chip_id) {
case OCTEON_CN23XX_PF_VID: case OCTEON_CN23XX_PF_VID:
case OCTEON_CN23XX_VF_VID:
return ARRAY_SIZE(oct_priv_flags_strings); return ARRAY_SIZE(oct_priv_flags_strings);
case OCTEON_CN68XX: case OCTEON_CN68XX:
case OCTEON_CN66XX: case OCTEON_CN66XX:
...@@ -1083,6 +1271,23 @@ static int lio_get_sset_count(struct net_device *netdev, int sset) ...@@ -1083,6 +1271,23 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
} }
} }
static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
switch (sset) {
case ETH_SS_STATS:
return (ARRAY_SIZE(oct_vf_stats_strings) +
ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
case ETH_SS_PRIV_FLAGS:
return lio_get_priv_flags_ss_count(lio);
default:
return -EOPNOTSUPP;
}
}
static int lio_get_intr_coalesce(struct net_device *netdev, static int lio_get_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *intr_coal) struct ethtool_coalesce *intr_coal)
{ {
...@@ -1095,6 +1300,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev, ...@@ -1095,6 +1300,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
switch (oct->chip_id) { switch (oct->chip_id) {
case OCTEON_CN23XX_PF_VID: case OCTEON_CN23XX_PF_VID:
case OCTEON_CN23XX_VF_VID:
if (!intrmod_cfg->rx_enable) { if (!intrmod_cfg->rx_enable) {
intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs; intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
intr_coal->rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames =
...@@ -1141,7 +1347,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev, ...@@ -1141,7 +1347,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intr_coal->rx_max_coalesced_frames_low = intr_coal->rx_max_coalesced_frames_low =
intrmod_cfg->rx_mincnt_trigger; intrmod_cfg->rx_mincnt_trigger;
} }
if (OCTEON_CN23XX_PF(oct) && if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
(intrmod_cfg->tx_enable)) { (intrmod_cfg->tx_enable)) {
intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable; intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
intr_coal->tx_max_coalesced_frames_high = intr_coal->tx_max_coalesced_frames_high =
...@@ -1499,6 +1705,26 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) ...@@ -1499,6 +1705,26 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
oct->intrmod.rx_frames = rx_max_coalesced_frames; oct->intrmod.rx_frames = rx_max_coalesced_frames;
break; break;
} }
case OCTEON_CN23XX_VF_VID: {
int q_no;
if (!intr_coal->rx_max_coalesced_frames)
rx_max_coalesced_frames = oct->intrmod.rx_frames;
else
rx_max_coalesced_frames =
intr_coal->rx_max_coalesced_frames;
for (q_no = 0; q_no < oct->num_oqs; q_no++) {
octeon_write_csr64(
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
(octeon_read_csr64(
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
(0x3fffff00000000UL)) |
rx_max_coalesced_frames);
/* consider writing to resend bit here */
}
oct->intrmod.rx_frames = rx_max_coalesced_frames;
break;
}
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -1552,6 +1778,27 @@ static int oct_cfg_rx_intrtime(struct lio *lio, ...@@ -1552,6 +1778,27 @@ static int oct_cfg_rx_intrtime(struct lio *lio,
oct->intrmod.rx_usecs = rx_coalesce_usecs; oct->intrmod.rx_usecs = rx_coalesce_usecs;
break; break;
} }
case OCTEON_CN23XX_VF_VID: {
u64 time_threshold;
int q_no;
if (!intr_coal->rx_coalesce_usecs)
rx_coalesce_usecs = oct->intrmod.rx_usecs;
else
rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
time_threshold =
cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
for (q_no = 0; q_no < oct->num_oqs; q_no++) {
octeon_write_csr64(
oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
(oct->intrmod.rx_frames |
(time_threshold << 32)));
/* consider setting resend bit */
}
oct->intrmod.rx_usecs = rx_coalesce_usecs;
break;
}
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -1573,6 +1820,7 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal ...@@ -1573,6 +1820,7 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
case OCTEON_CN68XX: case OCTEON_CN68XX:
case OCTEON_CN66XX: case OCTEON_CN66XX:
break; break;
case OCTEON_CN23XX_VF_VID:
case OCTEON_CN23XX_PF_VID: { case OCTEON_CN23XX_PF_VID: {
int q_no; int q_no;
...@@ -1631,6 +1879,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev, ...@@ -1631,6 +1879,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
} }
break; break;
case OCTEON_CN23XX_PF_VID: case OCTEON_CN23XX_PF_VID:
case OCTEON_CN23XX_VF_VID:
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -1693,86 +1942,6 @@ static int lio_get_ts_info(struct net_device *netdev, ...@@ -1693,86 +1942,6 @@ static int lio_get_ts_info(struct net_device *netdev,
return 0; return 0;
} }
static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
struct octnic_ctrl_pkt nctrl;
int ret = 0;
/* get the link info */
linfo = &lio->linfo;
if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
return -EINVAL;
if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
ecmd->speed != SPEED_10) ||
(ecmd->duplex != DUPLEX_HALF &&
ecmd->duplex != DUPLEX_FULL)))
return -EINVAL;
/* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
* as they operate at fixed Speed and Duplex settings
*/
if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
dev_info(&oct->pci_dev->dev,
"Autonegotiation, duplex and speed settings cannot be modified.\n");
return -EINVAL;
}
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 1000;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
* to SE core application using ncmd.s.more & ncmd.s.param
*/
if (ecmd->autoneg == AUTONEG_ENABLE) {
/* Autoneg ON */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
OCTNIC_NCMD_AUTONEG_ON;
nctrl.ncmd.s.param1 = ecmd->advertising;
} else {
/* Autoneg OFF */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
nctrl.ncmd.s.param2 = ecmd->duplex;
nctrl.ncmd.s.param1 = ecmd->speed;
}
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
return -1;
}
return 0;
}
static int lio_nway_reset(struct net_device *netdev)
{
if (netif_running(netdev)) {
struct ethtool_cmd ecmd;
memset(&ecmd, 0, sizeof(struct ethtool_cmd));
ecmd.autoneg = 0;
ecmd.speed = 0;
ecmd.duplex = 0;
lio_set_settings(netdev, &ecmd);
}
return 0;
}
/* Return register dump len. */ /* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev) static int lio_get_regs_len(struct net_device *dev)
{ {
...@@ -1782,6 +1951,8 @@ static int lio_get_regs_len(struct net_device *dev) ...@@ -1782,6 +1951,8 @@ static int lio_get_regs_len(struct net_device *dev)
switch (oct->chip_id) { switch (oct->chip_id) {
case OCTEON_CN23XX_PF_VID: case OCTEON_CN23XX_PF_VID:
return OCT_ETHTOOL_REGDUMP_LEN_23XX; return OCT_ETHTOOL_REGDUMP_LEN_23XX;
case OCTEON_CN23XX_VF_VID:
return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
default: default:
return OCT_ETHTOOL_REGDUMP_LEN; return OCT_ETHTOOL_REGDUMP_LEN;
} }
...@@ -2007,6 +2178,123 @@ static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) ...@@ -2007,6 +2178,123 @@ static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
return len; return len;
} }
static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
{
int len = 0;
u32 reg;
int i;
/* PCI Window Registers */
len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_OQ_SIZE(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_IQ_SIZE(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
len += sprintf(s + len,
"\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
reg, i, (u64)octeon_read_csr64(oct, reg));
}
return len;
}
static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
{ {
u32 reg; u32 reg;
...@@ -2153,6 +2441,10 @@ static void lio_get_regs(struct net_device *dev, ...@@ -2153,6 +2441,10 @@ static void lio_get_regs(struct net_device *dev,
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
len += cn23xx_read_csr_reg(regbuf + len, oct); len += cn23xx_read_csr_reg(regbuf + len, oct);
break; break;
case OCTEON_CN23XX_VF_VID:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
break;
case OCTEON_CN68XX: case OCTEON_CN68XX:
case OCTEON_CN66XX: case OCTEON_CN66XX:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
...@@ -2183,7 +2475,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -2183,7 +2475,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
} }
static const struct ethtool_ops lio_ethtool_ops = { static const struct ethtool_ops lio_ethtool_ops = {
.get_settings = lio_get_settings, .get_link_ksettings = lio_get_link_ksettings,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_drvinfo, .get_drvinfo = lio_get_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam, .get_ringparam = lio_ethtool_get_ringparam,
...@@ -2200,8 +2492,26 @@ static const struct ethtool_ops lio_ethtool_ops = { ...@@ -2200,8 +2492,26 @@ static const struct ethtool_ops lio_ethtool_ops = {
.get_msglevel = lio_get_msglevel, .get_msglevel = lio_get_msglevel,
.set_msglevel = lio_set_msglevel, .set_msglevel = lio_set_msglevel,
.get_sset_count = lio_get_sset_count, .get_sset_count = lio_get_sset_count,
.nway_reset = lio_nway_reset, .get_coalesce = lio_get_intr_coalesce,
.set_settings = lio_set_settings, .set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
.get_ts_info = lio_get_ts_info,
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
.get_link_ksettings = lio_get_link_ksettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_vf_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam,
.get_channels = lio_ethtool_get_channels,
.get_strings = lio_vf_get_strings,
.get_ethtool_stats = lio_vf_get_ethtool_stats,
.get_regs_len = lio_get_regs_len,
.get_regs = lio_get_regs,
.get_msglevel = lio_get_msglevel,
.set_msglevel = lio_set_msglevel,
.get_sset_count = lio_vf_get_sset_count,
.get_coalesce = lio_get_intr_coalesce, .get_coalesce = lio_get_intr_coalesce,
.set_coalesce = lio_set_intr_coalesce, .set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags, .get_priv_flags = lio_get_priv_flags,
...@@ -2211,5 +2521,11 @@ static const struct ethtool_ops lio_ethtool_ops = { ...@@ -2211,5 +2521,11 @@ static const struct ethtool_ops lio_ethtool_ops = {
void liquidio_set_ethtool_ops(struct net_device *netdev) void liquidio_set_ethtool_ops(struct net_device *netdev)
{ {
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
if (OCTEON_CN23XX_VF(oct))
netdev->ethtool_ops = &lio_vf_ethtool_ops;
else
netdev->ethtool_ops = &lio_ethtool_ops; netdev->ethtool_ops = &lio_ethtool_ops;
} }
...@@ -42,6 +42,7 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); ...@@ -42,6 +42,7 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
#define LIO_IFSTATE_DROQ_OPS 0x01 #define LIO_IFSTATE_DROQ_OPS 0x01
#define LIO_IFSTATE_REGISTERED 0x02 #define LIO_IFSTATE_REGISTERED 0x02
#define LIO_IFSTATE_RUNNING 0x04 #define LIO_IFSTATE_RUNNING 0x04
#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
struct liquidio_if_cfg_context { struct liquidio_if_cfg_context {
int octeon_id; int octeon_id;
...@@ -65,6 +66,12 @@ struct liquidio_rx_ctl_context { ...@@ -65,6 +66,12 @@ struct liquidio_rx_ctl_context {
int cond; int cond;
}; };
struct oct_timestamp_resp {
u64 rh;
u64 timestamp;
u64 status;
};
union tx_info { union tx_info {
u64 u64; u64 u64;
struct { struct {
...@@ -168,6 +175,144 @@ static int wait_for_pending_requests(struct octeon_device *oct) ...@@ -168,6 +175,144 @@ static int wait_for_pending_requests(struct octeon_device *oct)
return 0; return 0;
} }
/**
* \brief Cause device to go quiet so it can be safely removed/reset/etc
* @param oct Pointer to Octeon device
*/
static void pcierror_quiesce_device(struct octeon_device *oct)
{
int i;
/* Disable the input and output queues now. No more packets will
* arrive from Octeon, but we should wait for all packet processing
* to finish.
*/
/* To allow for in-flight requests */
schedule_timeout_uninterruptible(100);
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Force all requests waiting to be fetched by OCTEON to complete. */
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
struct octeon_instr_queue *iq;
if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
iq = oct->instr_queue[i];
if (atomic_read(&iq->instr_pending)) {
spin_lock_bh(&iq->lock);
iq->fill_cnt = 0;
iq->octeon_read_index = iq->host_write_index;
iq->stats.instr_processed +=
atomic_read(&iq->instr_pending);
lio_process_iq_request_list(oct, iq, 0);
spin_unlock_bh(&iq->lock);
}
}
/* Force all pending ordered list requests to time out. */
lio_process_ordered_list(oct, 1);
/* We do not need to wait for output queue packets to be processed. */
}
/**
* \brief Cleanup PCI AER uncorrectable error status
* @param dev Pointer to PCI device
*/
static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
u32 status, mask;
int pos = 0x100;
pr_info("%s :\n", __func__);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
if (dev->error_state == pci_channel_io_normal)
status &= ~mask; /* Clear corresponding nonfatal bits */
else
status &= mask; /* Clear corresponding fatal bits */
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
}
/**
* \brief Stop all PCI IO to a given device
* @param dev Pointer to Octeon device
*/
static void stop_pci_io(struct octeon_device *oct)
{
struct msix_entry *msix_entries;
int i;
/* No more instructions will be forwarded. */
atomic_set(&oct->status, OCT_DEV_IN_RESET);
for (i = 0; i < oct->ifcount; i++)
netif_device_detach(oct->props[i].netdev);
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
pcierror_quiesce_device(oct);
if (oct->msix_on) {
msix_entries = (struct msix_entry *)oct->msix_entries;
for (i = 0; i < oct->num_msix_irqs; i++) {
/* clear the affinity_cpumask */
irq_set_affinity_hint(msix_entries[i].vector,
NULL);
free_irq(msix_entries[i].vector,
&oct->ioq_vector[i]);
}
pci_disable_msix(oct->pci_dev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
octeon_free_ioq_vector(oct);
}
dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
lio_get_state_string(&oct->status));
/* making it a common function for all OCTEON models */
cleanup_aer_uncorrect_error_status(oct->pci_dev);
pci_disable_device(oct->pci_dev);
}
/**
* \brief called when PCI error is detected
* @param pdev Pointer to PCI device
* @param state The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct octeon_device *oct = pci_get_drvdata(pdev);
/* Non-correctable Non-fatal errors */
if (state == pci_channel_io_normal) {
dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
cleanup_aer_uncorrect_error_status(oct->pci_dev);
return PCI_ERS_RESULT_CAN_RECOVER;
}
/* Non-correctable Fatal errors */
dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
stop_pci_io(oct);
return PCI_ERS_RESULT_DISCONNECT;
}
/* For PCI-E Advanced Error Recovery (AER) Interface */
static const struct pci_error_handlers liquidio_vf_err_handler = {
.error_detected = liquidio_pcie_error_detected,
};
static const struct pci_device_id liquidio_vf_pci_tbl[] = { static const struct pci_device_id liquidio_vf_pci_tbl[] = {
{ {
PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
...@@ -184,6 +329,7 @@ static struct pci_driver liquidio_vf_pci_driver = { ...@@ -184,6 +329,7 @@ static struct pci_driver liquidio_vf_pci_driver = {
.id_table = liquidio_vf_pci_tbl, .id_table = liquidio_vf_pci_tbl,
.probe = liquidio_vf_probe, .probe = liquidio_vf_probe,
.remove = liquidio_vf_remove, .remove = liquidio_vf_remove,
.err_handler = &liquidio_vf_err_handler, /* For AER */
}; };
/** /**
...@@ -1350,6 +1496,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), ...@@ -1350,6 +1496,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
container_of(param, struct octeon_droq, napi); container_of(param, struct octeon_droq, napi);
struct net_device *netdev = (struct net_device *)arg; struct net_device *netdev = (struct net_device *)arg;
struct sk_buff *skb = (struct sk_buff *)skbuff; struct sk_buff *skb = (struct sk_buff *)skbuff;
u16 vtag = 0;
if (netdev) { if (netdev) {
struct lio *lio = GET_LIO(netdev); struct lio *lio = GET_LIO(netdev);
...@@ -1397,12 +1544,34 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), ...@@ -1397,12 +1544,34 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
skb->protocol = eth_type_trans(skb, skb->dev); skb->protocol = eth_type_trans(skb, skb->dev);
if ((netdev->features & NETIF_F_RXCSUM) && if ((netdev->features & NETIF_F_RXCSUM) &&
(rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)) (((rh->r_dh.encap_on) &&
(rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
(!(rh->r_dh.encap_on) &&
(rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
/* checksum has already been verified */ /* checksum has already been verified */
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else else
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
/* Setting Encapsulation field on basis of status received
* from the firmware
*/
if (rh->r_dh.encap_on) {
skb->encapsulation = 1;
skb->csum_level = 1;
droq->stats.rx_vxlan++;
}
/* inbound VLAN tag */
if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
rh->r_dh.vlan) {
u16 priority = rh->r_dh.priority;
u16 vid = rh->r_dh.vlan;
vtag = (priority << VLAN_PRIO_SHIFT) | vid;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
}
packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
if (packet_was_received) { if (packet_was_received) {
...@@ -1800,6 +1969,56 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) ...@@ -1800,6 +1969,56 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return 0; return 0;
} }
/**
* \brief Net device get_stats
* @param netdev network device
*/
static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct net_device_stats *stats = &netdev->stats;
u64 pkts = 0, drop = 0, bytes = 0;
struct oct_droq_stats *oq_stats;
struct oct_iq_stats *iq_stats;
struct octeon_device *oct;
int i, iq_no, oq_no;
oct = lio->oct_dev;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no;
iq_stats = &oct->instr_queue[iq_no]->stats;
pkts += iq_stats->tx_done;
drop += iq_stats->tx_dropped;
bytes += iq_stats->tx_tot_bytes;
}
stats->tx_packets = pkts;
stats->tx_bytes = bytes;
stats->tx_dropped = drop;
pkts = 0;
drop = 0;
bytes = 0;
for (i = 0; i < lio->linfo.num_rxpciq; i++) {
oq_no = lio->linfo.rxpciq[i].s.q_no;
oq_stats = &oct->droq[oq_no]->stats;
pkts += oq_stats->rx_pkts_received;
drop += (oq_stats->rx_dropped +
oq_stats->dropped_nodispatch +
oq_stats->dropped_toomany +
oq_stats->dropped_nomem);
bytes += oq_stats->rx_bytes_received;
}
stats->rx_bytes = bytes;
stats->rx_packets = pkts;
stats->rx_dropped = drop;
return stats;
}
/** /**
* \brief Net device change_mtu * \brief Net device change_mtu
* @param netdev network device * @param netdev network device
...@@ -1821,6 +2040,169 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1821,6 +2040,169 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
return 0; return 0;
} }
/**
* \brief Handler for SIOCSHWTSTAMP ioctl
* @param netdev network device
* @param ifr interface request
* @param cmd command
*/
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
struct lio *lio = GET_LIO(netdev);
struct hwtstamp_config conf;
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
if (conf.flags)
return -EINVAL;
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
break;
default:
return -ERANGE;
}
switch (conf.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
conf.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
}
/**
* \brief ioctl handler
* @param netdev network device
* @param ifr interface request
* @param cmd command
*/
static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
return hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
}
static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
{
struct sk_buff *skb = (struct sk_buff *)buf;
struct octnet_buf_free_info *finfo;
struct oct_timestamp_resp *resp;
struct octeon_soft_command *sc;
struct lio *lio;
finfo = (struct octnet_buf_free_info *)skb->cb;
lio = finfo->lio;
sc = finfo->sc;
oct = lio->oct_dev;
resp = (struct oct_timestamp_resp *)sc->virtrptr;
if (status != OCTEON_REQUEST_DONE) {
dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
CVM_CAST64(status));
resp->timestamp = 0;
}
octeon_swap_8B_data(&resp->timestamp, 1);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps ts;
u64 ns = resp->timestamp;
netif_info(lio, tx_done, lio->netdev,
"Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
skb, (unsigned long long)ns);
ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
skb_tstamp_tx(skb, &ts);
}
octeon_free_soft_command(oct, sc);
tx_buffer_free(skb);
}
/* \brief Send a data packet that will be timestamped
* @param oct octeon device
* @param ndata pointer to network data
* @param finfo pointer to private network data
*/
static int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
struct octnet_buf_free_info *finfo)
{
struct octeon_soft_command *sc;
int ring_doorbell;
struct lio *lio;
int retval;
u32 len;
lio = finfo->lio;
sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
sizeof(struct oct_timestamp_resp));
finfo->sc = sc;
if (!sc) {
dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
return IQ_SEND_FAILED;
}
if (ndata->reqtype == REQTYPE_NORESP_NET)
ndata->reqtype = REQTYPE_RESP_NET;
else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
ndata->reqtype = REQTYPE_RESP_NET_SG;
sc->callback = handle_timestamp;
sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no;
len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
ring_doorbell = 1;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
sc, len, ndata->reqtype);
if (retval == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
retval);
octeon_free_soft_command(oct, sc);
} else {
netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
}
return retval;
}
/** \brief Transmit networks packets to the Octeon interface /** \brief Transmit networks packets to the Octeon interface
* @param skbuff skbuff struct to be passed to network layer. * @param skbuff skbuff struct to be passed to network layer.
* @param netdev pointer to network device * @param netdev pointer to network device
...@@ -1905,8 +2287,18 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1905,8 +2287,18 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
cmdsetup.u64 = 0; cmdsetup.u64 = 0;
cmdsetup.s.iq_no = iq_no; cmdsetup.s.iq_no = iq_no;
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->encapsulation) {
cmdsetup.s.tnl_csum = 1;
stats->tx_vxlan++;
} else {
cmdsetup.s.transport_csum = 1; cmdsetup.s.transport_csum = 1;
}
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
cmdsetup.s.timestamp = 1;
}
if (!skb_shinfo(skb)->nr_frags) { if (!skb_shinfo(skb)->nr_frags) {
cmdsetup.s.u.datasize = skb->len; cmdsetup.s.u.datasize = skb->len;
...@@ -2025,6 +2417,15 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2025,6 +2417,15 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
} }
/* HW insert VLAN tag */
if (skb_vlan_tag_present(skb)) {
irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
}
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo);
else
status = octnet_send_nic_data_pkt(oct, &ndata); status = octnet_send_nic_data_pkt(oct, &ndata);
if (status == IQ_SEND_FAILED) if (status == IQ_SEND_FAILED)
goto lio_xmit_failed; goto lio_xmit_failed;
...@@ -2074,6 +2475,61 @@ static void liquidio_tx_timeout(struct net_device *netdev) ...@@ -2074,6 +2475,61 @@ static void liquidio_tx_timeout(struct net_device *netdev)
txqs_wake(netdev); txqs_wake(netdev);
} }
static int
liquidio_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto __attribute__((unused)), u16 vid)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
}
static int
liquidio_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto __attribute__((unused)), u16 vid)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
}
/** Sending command to enable/disable RX checksum offload /** Sending command to enable/disable RX checksum offload
* @param netdev pointer to network device * @param netdev pointer to network device
* @param command OCTNET_CMD_TNL_RX_CSUM_CTL * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
...@@ -2105,6 +2561,40 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, ...@@ -2105,6 +2561,40 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
return ret; return ret;
} }
/** Sending command to add/delete VxLAN UDP port to firmware
* @param netdev pointer to network device
* @param command OCTNET_CMD_VXLAN_PORT_CONFIG
* @param vxlan_port VxLAN port to be added or deleted
* @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
* OCTNET_CMD_VXLAN_PORT_DEL
* @returns SUCCESS or FAILURE
*/
static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
u16 vxlan_port, u8 vxlan_cmd_bit)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
int ret = 0;
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.more = vxlan_cmd_bit;
nctrl.ncmd.s.param1 = vxlan_port;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev,
"DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
ret);
}
return ret;
}
/** \brief Net device fix features /** \brief Net device fix features
* @param netdev pointer to network device * @param netdev pointer to network device
* @param request features requested * @param request features requested
...@@ -2173,16 +2663,46 @@ static int liquidio_set_features(struct net_device *netdev, ...@@ -2173,16 +2663,46 @@ static int liquidio_set_features(struct net_device *netdev,
return 0; return 0;
} }
static void liquidio_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
liquidio_vxlan_port_command(netdev,
OCTNET_CMD_VXLAN_PORT_CONFIG,
htons(ti->port),
OCTNET_CMD_VXLAN_PORT_ADD);
}
static void liquidio_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
liquidio_vxlan_port_command(netdev,
OCTNET_CMD_VXLAN_PORT_CONFIG,
htons(ti->port),
OCTNET_CMD_VXLAN_PORT_DEL);
}
static const struct net_device_ops lionetdevops = { static const struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open, .ndo_open = liquidio_open,
.ndo_stop = liquidio_stop, .ndo_stop = liquidio_stop,
.ndo_start_xmit = liquidio_xmit, .ndo_start_xmit = liquidio_xmit,
.ndo_get_stats = liquidio_get_stats,
.ndo_set_mac_address = liquidio_set_mac, .ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout, .ndo_tx_timeout = liquidio_tx_timeout,
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu, .ndo_change_mtu = liquidio_change_mtu,
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features, .ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features, .ndo_set_features = liquidio_set_features,
.ndo_udp_tunnel_add = liquidio_add_vxlan_port,
.ndo_udp_tunnel_del = liquidio_del_vxlan_port,
.ndo_select_queue = select_q, .ndo_select_queue = select_q,
}; };
...@@ -2388,6 +2908,25 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -2388,6 +2908,25 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_LRO; | NETIF_F_LRO;
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
*/
lio->enc_dev_capability = NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_HW_CSUM | NETIF_F_SG
| NETIF_F_RXCSUM
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_LRO;
netdev->hw_enc_features =
(lio->enc_dev_capability & ~NETIF_F_LRO);
netdev->vlan_features = lio->dev_capability;
/* Add any unchangeable hw features */
lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->features = (lio->dev_capability & ~NETIF_F_LRO); netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
netdev->hw_features = lio->dev_capability; netdev->hw_features = lio->dev_capability;
...@@ -2443,6 +2982,13 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -2443,6 +2982,13 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
goto setup_nic_dev_fail; goto setup_nic_dev_fail;
} }
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev);
if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
else
octeon_dev->priv_flags = 0x0;
if (netdev->features & NETIF_F_LRO) if (netdev->features & NETIF_F_LRO)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6); OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
...@@ -2508,6 +3054,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ...@@ -2508,6 +3054,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
*/ */
static int liquidio_init_nic_module(struct octeon_device *oct) static int liquidio_init_nic_module(struct octeon_device *oct)
{ {
struct oct_intrmod_cfg *intrmod_cfg;
int num_nic_ports = 1; int num_nic_ports = 1;
int i, retval = 0; int i, retval = 0;
...@@ -2529,6 +3076,26 @@ static int liquidio_init_nic_module(struct octeon_device *oct) ...@@ -2529,6 +3076,26 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
goto octnet_init_failure; goto octnet_init_failure;
} }
/* Initialize interrupt moderation params */
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
intrmod_cfg->rx_enable = 1;
intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
intrmod_cfg->tx_enable = 1;
intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
octnet_init_failure: octnet_init_failure:
oct->ifcount = 0; oct->ifcount = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment