Commit 74e04aca authored by David S. Miller's avatar David S. Miller
parents 01da0c2b eef4560f
...@@ -220,6 +220,17 @@ config IXGBE_DCB ...@@ -220,6 +220,17 @@ config IXGBE_DCB
If unsure, say N. If unsure, say N.
config IXGBE_PTP
bool "PTP Clock Support"
default n
depends on IXGBE && PTP_1588_CLOCK
---help---
Say Y here if you want support for 1588 Timestamping with a
PHC device, using the PTP 1588 Clock support. This is
required to enable timestamping support for the device.
If unsure, say N.
config IXGBEVF config IXGBEVF
tristate "Intel(R) 82599 Virtual Function Ethernet support" tristate "Intel(R) 82599 Virtual Function Ethernet support"
depends on PCI_MSI depends on PCI_MSI
......
...@@ -335,7 +335,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data) ...@@ -335,7 +335,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data)
static int igb_get_regs_len(struct net_device *netdev) static int igb_get_regs_len(struct net_device *netdev)
{ {
#define IGB_REGS_LEN 551 #define IGB_REGS_LEN 739
return IGB_REGS_LEN * sizeof(u32); return IGB_REGS_LEN * sizeof(u32);
} }
...@@ -556,6 +556,42 @@ static void igb_get_regs(struct net_device *netdev, ...@@ -556,6 +556,42 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[552] = adapter->stats.b2ospc; regs_buff[552] = adapter->stats.b2ospc;
regs_buff[553] = adapter->stats.o2bspc; regs_buff[553] = adapter->stats.o2bspc;
regs_buff[554] = adapter->stats.b2ogprc; regs_buff[554] = adapter->stats.b2ogprc;
if (hw->mac.type != e1000_82576)
return;
for (i = 0; i < 12; i++)
regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
for (i = 0; i < 4; i++)
regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
for (i = 0; i < 12; i++)
regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
for (i = 0; i < 12; i++)
regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
for (i = 0; i < 12; i++)
regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
for (i = 0; i < 12; i++)
regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
for (i = 0; i < 12; i++)
regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
} }
static int igb_get_eeprom_len(struct net_device *netdev) static int igb_get_eeprom_len(struct net_device *netdev)
......
...@@ -39,4 +39,6 @@ ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ...@@ -39,4 +39,6 @@ ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
...@@ -36,6 +36,12 @@ ...@@ -36,6 +36,12 @@
#include <linux/aer.h> #include <linux/aer.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#ifdef CONFIG_IXGBE_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#endif /* CONFIG_IXGBE_PTP */
#include "ixgbe_type.h" #include "ixgbe_type.h"
#include "ixgbe_common.h" #include "ixgbe_common.h"
#include "ixgbe_dcb.h" #include "ixgbe_dcb.h"
...@@ -96,6 +102,7 @@ ...@@ -96,6 +102,7 @@
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
...@@ -458,6 +465,8 @@ struct ixgbe_adapter { ...@@ -458,6 +465,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
/* Tx fast path data */ /* Tx fast path data */
int num_tx_queues; int num_tx_queues;
...@@ -545,6 +554,17 @@ struct ixgbe_adapter { ...@@ -545,6 +554,17 @@ struct ixgbe_adapter {
u32 interrupt_event; u32 interrupt_event;
u32 led_reg; u32 led_reg;
#ifdef CONFIG_IXGBE_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
u32 base_incval;
u32 cycle_speed;
#endif /* CONFIG_IXGBE_PTP */
/* SR-IOV */ /* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
unsigned int num_vfs; unsigned int num_vfs;
...@@ -652,12 +672,15 @@ extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, ...@@ -652,12 +672,15 @@ extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask); union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev); extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif #endif
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
#endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring, extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
...@@ -688,4 +711,18 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) ...@@ -688,4 +711,18 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index); return netdev_get_tx_queue(ring->netdev, ring->queue_index);
} }
#ifdef CONFIG_IXGBE_PTP
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
#endif /* CONFIG_IXGBE_PTP */
#endif /* _IXGBE_H_ */ #endif /* _IXGBE_H_ */
...@@ -2561,7 +2561,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) ...@@ -2561,7 +2561,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
break; break;
else else
/* Use interrupt-safe sleep just in case */ /* Use interrupt-safe sleep just in case */
udelay(10); udelay(1000);
} }
/* For informational purposes only */ /* For informational purposes only */
......
...@@ -191,53 +191,46 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, ...@@ -191,53 +191,46 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*/ */
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{ {
u32 reg; u32 fcrtl, reg;
u8 i; u8 i;
if (pfc_en) {
/* Enable Transmit Priority Flow Control */ /* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS); reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X; reg &= ~IXGBE_RMCS_TFCE_802_3X;
/* correct the reporting of our flow control status */
reg |= IXGBE_RMCS_TFCE_PRIORITY; reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */ /* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~IXGBE_FCTRL_RFCE; reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
/* Configure pause time */ if (pfc_en)
for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
/* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
}
/* fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
* Configure flow control thresholds and enable priority flow control /* Configure PFC Tx thresholds per TC */
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
int enabled = pfc_en & (1 << i); if (!(pfc_en & (1 << i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
reg = hw->fc.low_water << 10; IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
}
if (enabled == pfc_enabled_tx || reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
enabled == pfc_enabled_full) IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
reg |= IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); /* Configure pause time */
reg = hw->fc.pause_time * 0x00010001;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
reg = hw->fc.high_water[i] << 10; /* Configure flow control refresh threshold value */
if (enabled == pfc_enabled_tx || IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
enabled == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
return 0; return 0;
} }
......
...@@ -211,24 +211,42 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, ...@@ -211,24 +211,42 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
*/ */
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{ {
u32 i, j, reg; u32 i, j, fcrtl, reg;
u8 max_tc = 0; u8 max_tc = 0;
for (i = 0; i < MAX_USER_PRIORITY; i++) /* Enable Transmit Priority Flow Control */
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
reg |= IXGBE_MFLCN_DPF;
/*
* X540 supports per TC Rx priority flow control. So
* clear all TCs and only enable those that should be
* enabled.
*/
reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
if (hw->mac.type == ixgbe_mac_X540)
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
if (pfc_en)
reg |= IXGBE_MFLCN_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
for (i = 0; i < MAX_USER_PRIORITY; i++) {
if (prio_tc[i] > max_tc) if (prio_tc[i] > max_tc)
max_tc = prio_tc[i]; max_tc = prio_tc[i];
}
fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */ /* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { for (i = 0; i <= max_tc; i++) {
int enabled = 0; int enabled = 0;
if (i > max_tc) {
reg = 0;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
continue;
}
for (j = 0; j < MAX_USER_PRIORITY; j++) { for (j = 0; j < MAX_USER_PRIORITY; j++) {
if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
enabled = 1; enabled = 1;
...@@ -236,51 +254,30 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) ...@@ -236,51 +254,30 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
} }
} }
reg = hw->fc.low_water << 10; if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
if (enabled) IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
reg |= IXGBE_FCRTL_XONE; } else {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
}
reg = hw->fc.high_water[i] << 10;
if (enabled)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
} }
if (pfc_en) { for (; i < MAX_TRAFFIC_CLASS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
}
/* Configure pause time (2 TCs per register) */ /* Configure pause time (2 TCs per register) */
reg = hw->fc.pause_time | (hw->fc.pause_time << 16); reg = hw->fc.pause_time * 0x00010001;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */ /* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
reg = IXGBE_FCCFG_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
/*
* Enable Receive PFC
* 82599 will always honor XOFF frames we receive when
* we are in PFC mode however X540 only honors enabled
* traffic classes.
*/
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
reg &= ~IXGBE_MFLCN_RFCE;
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
if (hw->mac.type == ixgbe_mac_X540) {
reg &= ~IXGBE_MFLCN_RPFCE_MASK;
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
}
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
} else {
hw->mac.ops.fc_enable(hw);
}
return 0; return 0;
} }
......
...@@ -338,6 +338,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) ...@@ -338,6 +338,8 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct ixgbe_hw *hw = &adapter->hw;
int ret = DCB_NO_HW_CHG; int ret = DCB_NO_HW_CHG;
int i; int i;
...@@ -350,32 +352,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) ...@@ -350,32 +352,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (!adapter->dcb_set_bitmap) if (!adapter->dcb_set_bitmap)
return ret; return ret;
if (adapter->dcb_cfg.pfc_mode_enable) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
adapter->last_lfc_mode =
adapter->hw.fc.current_mode;
break;
default:
break;
}
adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
} else {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
break;
default:
break;
}
}
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
...@@ -388,23 +364,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) ...@@ -388,23 +364,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif #endif
ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
max_frame, DCB_TX_CONFIG); DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
max_frame, DCB_RX_CONFIG); DCB_RX_CONFIG);
ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill);
DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max(dcb_cfg, max);
ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id);
ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type);
DCB_TX_CONFIG, bwg_id); ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);
ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
DCB_TX_CONFIG, prio_type);
ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
DCB_TX_CONFIG, prio_tc);
ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id,
bwg_id, prio_type, prio_tc); prio_type, prio_tc);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(netdev, i, prio_tc[i]); netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
...@@ -413,19 +385,21 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) ...@@ -413,19 +385,21 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
} }
if (adapter->dcb_set_bitmap & BIT_PFC) { if (adapter->dcb_set_bitmap & BIT_PFC) {
if (dcb_cfg->pfc_mode_enable) {
u8 pfc_en; u8 pfc_en;
u8 prio_tc[MAX_USER_PRIORITY]; u8 prio_tc[MAX_USER_PRIORITY];
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);
DCB_TX_CONFIG, prio_tc); ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en);
ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc);
ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc); } else {
if (ret != DCB_HW_CHG_RST) hw->mac.ops.fc_enable(hw);
ret = DCB_HW_CHG;
} }
if (adapter->dcb_cfg.pfc_mode_enable) ixgbe_set_rx_drop_en(adapter);
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
ret = DCB_HW_CHG;
}
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* Reprogam FCoE hardware offloads when the traffic class /* Reprogam FCoE hardware offloads when the traffic class
...@@ -647,7 +621,9 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, ...@@ -647,7 +621,9 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc) struct ieee_pfc *pfc)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 *prio_tc; u8 *prio_tc;
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL; return -EINVAL;
...@@ -659,16 +635,18 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, ...@@ -659,16 +635,18 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return -ENOMEM; return -ENOMEM;
} }
if (pfc->pfc_en) {
adapter->last_lfc_mode = adapter->hw.fc.current_mode;
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
} else {
adapter->hw.fc.current_mode = adapter->last_lfc_mode;
}
prio_tc = adapter->ixgbe_ieee_ets->prio_tc; prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
/* Enable link flow control parameters if PFC is disabled */
if (pfc->pfc_en)
err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc);
else
err = hw->mac.ops.fc_enable(hw);
ixgbe_set_rx_drop_en(adapter);
return err;
} }
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
......
...@@ -391,11 +391,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, ...@@ -391,11 +391,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
} else if (hw->fc.current_mode == ixgbe_fc_full) { } else if (hw->fc.current_mode == ixgbe_fc_full) {
pause->rx_pause = 1; pause->rx_pause = 1;
pause->tx_pause = 1; pause->tx_pause = 1;
#ifdef CONFIG_DCB
} else if (hw->fc.current_mode == ixgbe_fc_pfc) {
pause->rx_pause = 0;
pause->tx_pause = 0;
#endif
} }
} }
...@@ -404,21 +399,14 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, ...@@ -404,21 +399,14 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fc_info fc; struct ixgbe_fc_info fc = hw->fc;
#ifdef CONFIG_DCB /* 82598 does no support link flow control with DCB enabled */
if (adapter->dcb_cfg.pfc_mode_enable || if ((hw->mac.type == ixgbe_mac_82598EB) &&
((hw->mac.type == ixgbe_mac_82598EB) && (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
(adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
return -EINVAL; return -EINVAL;
#endif fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
fc = hw->fc;
if (pause->autoneg != AUTONEG_ENABLE)
fc.disable_fc_autoneg = true;
else
fc.disable_fc_autoneg = false;
if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
fc.requested_mode = ixgbe_fc_full; fc.requested_mode = ixgbe_fc_full;
...@@ -426,14 +414,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, ...@@ -426,14 +414,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
fc.requested_mode = ixgbe_fc_rx_pause; fc.requested_mode = ixgbe_fc_rx_pause;
else if (!pause->rx_pause && pause->tx_pause) else if (!pause->rx_pause && pause->tx_pause)
fc.requested_mode = ixgbe_fc_tx_pause; fc.requested_mode = ixgbe_fc_tx_pause;
else if (!pause->rx_pause && !pause->tx_pause)
fc.requested_mode = ixgbe_fc_none;
else else
return -EINVAL; fc.requested_mode = ixgbe_fc_none;
#ifdef CONFIG_DCB
adapter->last_lfc_mode = fc.requested_mode;
#endif
/* if the thing changed then we'll update and use new autoneg */ /* if the thing changed then we'll update and use new autoneg */
if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
...@@ -2714,6 +2696,46 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -2714,6 +2696,46 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret; return ret;
} }
static int ixgbe_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
#ifdef CONFIG_IXGBE_PTP
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
else
info->phc_index = -1;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_SOME);
break;
#endif /* CONFIG_IXGBE_PTP */
default:
return ethtool_op_get_ts_info(dev, info);
break;
}
return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = { static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings, .get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings, .set_settings = ixgbe_set_settings,
...@@ -2742,6 +2764,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { ...@@ -2742,6 +2764,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce, .set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc, .get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc, .set_rxnfc = ixgbe_set_rxnfc,
.get_ts_info = ixgbe_get_ts_info,
}; };
void ixgbe_set_ethtool_ops(struct net_device *netdev) void ixgbe_set_ethtool_ops(struct net_device *netdev)
......
...@@ -63,8 +63,8 @@ static char ixgbe_default_device_descr[] = ...@@ -63,8 +63,8 @@ static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection"; "Intel(R) 10 Gigabit Network Connection";
#endif #endif
#define MAJ 3 #define MAJ 3
#define MIN 8 #define MIN 9
#define BUILD 21 #define BUILD 15
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k" __stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION; const char ixgbe_driver_version[] = DRV_VERSION;
...@@ -610,16 +610,17 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, ...@@ -610,16 +610,17 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
/* tx_buffer must be completely set up in the transmit path */ /* tx_buffer must be completely set up in the transmit path */
} }
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats; struct ixgbe_hw_stats *hwstats = &adapter->stats;
u32 data = 0;
u32 xoff[8] = {0};
int i; int i;
u32 data;
if ((hw->fc.current_mode != ixgbe_fc_full) &&
(hw->fc.current_mode != ixgbe_fc_rx_pause))
return;
if ((hw->fc.current_mode == ixgbe_fc_full) ||
(hw->fc.current_mode == ixgbe_fc_rx_pause)) {
switch (hw->mac.type) { switch (hw->mac.type) {
case ixgbe_mac_82598EB: case ixgbe_mac_82598EB:
data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
...@@ -636,13 +637,23 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) ...@@ -636,13 +637,23 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED, clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state); &adapter->tx_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u32 xoff[8] = {0};
int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
ixgbe_update_xoff_rx_lfc(adapter);
return; return;
} else if (((adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) && }
!(adapter->dcb_cfg.pfc_mode_enable)) ||
((adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) &&
adapter->ixgbe_ieee_pfc &&
!(adapter->ixgbe_ieee_pfc->pfc_en)))
return;
/* update stats for each tc, only valid with PFC enabled */ /* update stats for each tc, only valid with PFC enabled */
for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
...@@ -778,6 +789,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -778,6 +789,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount; total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs; total_packets += tx_buffer->gso_segs;
#ifdef CONFIG_IXGBE_PTP
if (unlikely(tx_buffer->tx_flags &
IXGBE_TX_FLAGS_TSTAMP))
ixgbe_ptp_tx_hwtstamp(q_vector,
tx_buffer->skb);
#endif
/* free the skb */ /* free the skb */
dev_kfree_skb_any(tx_buffer->skb); dev_kfree_skb_any(tx_buffer->skb);
...@@ -1378,6 +1396,11 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ...@@ -1378,6 +1396,11 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb); ixgbe_rx_checksum(rx_ring, rx_desc, skb);
#ifdef CONFIG_IXGBE_PTP
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
#endif
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid); __vlan_hwaccel_put_tag(skb, vid);
...@@ -2299,6 +2322,9 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ...@@ -2299,6 +2322,9 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
} }
ixgbe_check_fan_failure(adapter, eicr); ixgbe_check_fan_failure(adapter, eicr);
#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_check_pps_event(adapter, eicr);
#endif
/* re-enable the original interrupt state, no lsc, no queues */ /* re-enable the original interrupt state, no lsc, no queues */
if (!test_bit(__IXGBE_DOWN, &adapter->state)) if (!test_bit(__IXGBE_DOWN, &adapter->state))
...@@ -2491,6 +2517,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ...@@ -2491,6 +2517,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
} }
ixgbe_check_fan_failure(adapter, eicr); ixgbe_check_fan_failure(adapter, eicr);
#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_check_pps_event(adapter, eicr);
#endif
/* would disable interrupts here but EIAM disabled it */ /* would disable interrupts here but EIAM disabled it */
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
...@@ -2760,6 +2789,61 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) ...@@ -2760,6 +2789,61 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
} }
static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u8 reg_idx = ring->reg_idx;
u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
srrctl |= IXGBE_SRRCTL_DROP_EN;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u8 reg_idx = ring->reg_idx;
u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
srrctl &= ~IXGBE_SRRCTL_DROP_EN;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
#ifdef CONFIG_IXGBE_DCB
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
#else
static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
#endif
{
int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
/*
* We should set the drop enable bit if:
* SR-IOV is enabled
* or
* Number of Rx queues > 1 and flow control is disabled
*
* This allows us to avoid head of line blocking for security
* and performance reasons.
*/
if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
!(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
} else {
for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
}
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
...@@ -4403,9 +4487,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4403,9 +4487,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* default flow control settings */ /* default flow control settings */
hw->fc.requested_mode = ixgbe_fc_full; hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
#ifdef CONFIG_DCB
adapter->last_lfc_mode = hw->fc.current_mode;
#endif
ixgbe_pbthresh_setup(adapter); ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true; hw->fc.send_xon = true;
...@@ -5268,8 +5349,10 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) ...@@ -5268,8 +5349,10 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
if (adapter->ixgbe_ieee_pfc) if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
hw->mac.ops.fc_enable(hw); hw->mac.ops.fc_enable(hw);
ixgbe_set_rx_drop_en(adapter);
}
if (link_up || if (link_up ||
time_after(jiffies, (adapter->link_check_timeout + time_after(jiffies, (adapter->link_check_timeout +
...@@ -5322,6 +5405,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) ...@@ -5322,6 +5405,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
flow_rx = false; flow_rx = false;
break; break;
} }
#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_start_cyclecounter(adapter);
#endif
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ? (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
"10 Gbps" : "10 Gbps" :
...@@ -5359,6 +5447,10 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) ...@@ -5359,6 +5447,10 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_start_cyclecounter(adapter);
#endif
e_info(drv, "NIC Link is Down\n"); e_info(drv, "NIC Link is Down\n");
netif_carrier_off(netdev); netif_carrier_off(netdev);
} }
...@@ -5658,6 +5750,9 @@ static void ixgbe_service_task(struct work_struct *work) ...@@ -5658,6 +5750,9 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_watchdog_subtask(adapter); ixgbe_watchdog_subtask(adapter);
ixgbe_fdir_reinit_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter); ixgbe_check_hang_subtask(adapter);
#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_overflow_check(adapter);
#endif
ixgbe_service_event_complete(adapter); ixgbe_service_event_complete(adapter);
} }
...@@ -5808,6 +5903,11 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) ...@@ -5808,6 +5903,11 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
#ifdef CONFIG_IXGBE_PTP
if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
#endif
/* set segmentation enable bits for TSO/FSO */ /* set segmentation enable bits for TSO/FSO */
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
...@@ -6198,6 +6298,15 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -6198,6 +6298,15 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
} }
skb_tx_timestamp(skb);
#ifdef CONFIG_IXGBE_PTP
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
}
#endif
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
/* /*
* Use the l2switch_enable flag - would be false if the DMA * Use the l2switch_enable flag - would be false if the DMA
...@@ -6350,7 +6459,14 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) ...@@ -6350,7 +6459,14 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
switch (cmd) {
#ifdef CONFIG_IXGBE_PTP
case SIOCSHWTSTAMP:
return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
#endif
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
} }
/** /**
...@@ -6542,14 +6658,16 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -6542,14 +6658,16 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (tc) { if (tc) {
netdev_set_num_tc(dev, tc); netdev_set_num_tc(dev, tc);
adapter->last_lfc_mode = adapter->hw.fc.current_mode;
adapter->flags |= IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
adapter->hw.fc.requested_mode = ixgbe_fc_none; adapter->hw.fc.requested_mode = ixgbe_fc_none;
}
} else { } else {
netdev_reset_tc(dev); netdev_reset_tc(dev);
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
...@@ -7135,6 +7253,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7135,6 +7253,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_init(adapter);
#endif /* CONFIG_IXGBE_PTP*/
/* save off EEPROM version number */ /* save off EEPROM version number */
hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
...@@ -7222,8 +7344,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7222,8 +7344,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
e_dev_info("%s\n", ixgbe_default_device_descr); e_dev_info("%s\n", ixgbe_default_device_descr);
cards_found++; cards_found++;
#ifdef CONFIG_IXGBE_HWMON
if (ixgbe_sysfs_init(adapter)) if (ixgbe_sysfs_init(adapter))
e_err(probe, "failed to allocate sysfs resources\n"); e_err(probe, "failed to allocate sysfs resources\n");
#endif /* CONFIG_IXGBE_HWMON */
return 0; return 0;
...@@ -7263,6 +7387,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) ...@@ -7263,6 +7387,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_DOWN, &adapter->state); set_bit(__IXGBE_DOWN, &adapter->state);
cancel_work_sync(&adapter->service_task); cancel_work_sync(&adapter->service_task);
#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_stop(adapter);
#endif
#ifdef CONFIG_IXGBE_DCA #ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
...@@ -7271,7 +7399,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) ...@@ -7271,7 +7399,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
} }
#endif #endif
#ifdef CONFIG_IXGBE_HWMON
ixgbe_sysfs_exit(adapter); ixgbe_sysfs_exit(adapter);
#endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
......
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include <linux/export.h>
/*
* The 82599 and the X540 do not have true 64bit nanosecond scale
* counter registers. Instead, SYSTIME is defined by a fixed point
* system which allows the user to define the scale counter increment
* value at every level change of the oscillator driving the SYSTIME
* value. For both devices the TIMINCA:IV field defines this
* increment. On the X540 device, 31 bits are provided. However on the
* 82599 only provides 24 bits. The time unit is determined by the
* clock frequency of the oscillator in combination with the TIMINCA
* register. When these devices link at 10Gb the oscillator has a
* period of 6.4ns. In order to convert the scale counter into
* nanoseconds the cyclecounter and timecounter structures are
* used. The SYSTIME registers need to be converted to ns values by use
* of only a right shift (division by power of 2). The following math
* determines the largest incvalue that will fit into the available
* bits in the TIMINCA register.
*
* PeriodWidth: Number of bits to store the clock period
* MaxWidth: The maximum width value of the TIMINCA register
* Period: The clock period for the oscillator
* round(): discard the fractional portion of the calculation
*
* Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ]
*
* For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns
* For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns
*
* The period also changes based on the link speed:
* At 10Gb link or no link, the period remains the same.
* At 1Gb link, the period is multiplied by 10. (64ns)
* At 100Mb link, the period is multiplied by 100. (640ns)
*
* The calculated value allows us to right shift the SYSTIME register
* value in order to quickly convert it into a nanosecond clock,
* while allowing for the maximum possible adjustment value.
*
* These diagrams are only for the 10Gb link period
*
* SYSTIMEH SYSTIMEL
* +--------------+ +--------------+
* X540 | 32 | | 1 | 3 | 28 |
* *--------------+ +--------------+
* \________ 36 bits ______/ fract
*
* +--------------+ +--------------+
* 82599 | 32 | | 8 | 3 | 21 |
* *--------------+ +--------------+
* \________ 43 bits ______/ fract
*
* The 36 bit X540 SYSTIME overflows every
* 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds
*
* The 43 bit 82599 SYSTIME overflows every
* 2^43 * 10^-9 / 3600 = 2.4 hours
*/
#define IXGBE_INCVAL_10GB 0x66666666
#define IXGBE_INCVAL_1GB 0x40000000
#define IXGBE_INCVAL_100 0x50000000
#define IXGBE_INCVAL_SHIFT_10GB 28
#define IXGBE_INCVAL_SHIFT_1GB 24
#define IXGBE_INCVAL_SHIFT_100 21
#define IXGBE_INCVAL_SHIFT_82599 7
#define IXGBE_INCPER_SHIFT_82599 24
#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
#ifndef NSECS_PER_SEC
#define NSECS_PER_SEC 1000000000ULL
#endif
/**
* ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
* @cc - the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
* arbitrary fixed point registers
*/
static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
{
struct ixgbe_adapter *adapter =
container_of(cc, struct ixgbe_adapter, cc);
struct ixgbe_hw *hw = &adapter->hw;
u64 stamp = 0;
stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
return stamp;
}
/**
* ixgbe_ptp_adjfreq
* @ptp - the ptp clock structure
* @ppb - parts per billion adjustment from base
*
* adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
*/
static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
u64 freq;
u32 diff, incval;
int neg_adj = 0;
if (ppb < 0) {
neg_adj = 1;
ppb = -ppb;
}
smp_mb();
incval = ACCESS_ONCE(adapter->base_incval);
freq = incval;
freq *= ppb;
diff = div_u64(freq, 1000000000ULL);
incval = neg_adj ? (incval - diff) : (incval + diff);
switch (hw->mac.type) {
case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
break;
case ixgbe_mac_82599EB:
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
(1 << IXGBE_INCPER_SHIFT_82599) |
incval);
break;
default:
break;
}
return 0;
}
/**
* ixgbe_ptp_adjtime
* @ptp - the ptp clock structure
* @delta - offset to adjust the cycle counter by
*
* adjust the timer by resetting the timecounter structure.
*/
static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
unsigned long flags;
u64 now;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
now = timecounter_read(&adapter->tc);
now += delta;
/* reset the timecounter */
timecounter_init(&adapter->tc,
&adapter->cc,
now);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
return 0;
}
/**
* ixgbe_ptp_gettime
* @ptp - the ptp clock structure
* @ts - timespec structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
u64 ns;
u32 remainder;
unsigned long flags;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
ts->tv_nsec = remainder;
return 0;
}
/**
* ixgbe_ptp_settime
* @ptp - the ptp clock structure
* @ts - the timespec containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
*/
static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
u64 ns;
unsigned long flags;
ns = ts->tv_sec * 1000000000ULL;
ns += ts->tv_nsec;
/* reset the timecounter */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->tc, &adapter->cc, ns);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
return 0;
}
/**
* ixgbe_ptp_enable
* @ptp - the ptp clock structure
* @rq - the requested feature to change
* @on - whether to enable or disable the feature
*
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
*/
static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
/**
* When PPS is enabled, unmask the interrupt for the ClockOut
* feature, so that the interrupt handler can send the PPS
* event when the clock SDP triggers. Clear mask when PPS is
* disabled
*/
if (rq->type == PTP_CLK_REQ_PPS) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
if (on)
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
else
adapter->flags2 &=
~IXGBE_FLAG2_PTP_PPS_ENABLED;
return 0;
default:
break;
}
}
return -ENOTSUPP;
}
/**
* ixgbe_ptp_check_pps_event
* @adapter - the private adapter structure
* @eicr - the interrupt cause register value
*
* This function is called by the interrupt routine when checking for
* interrupts. It will check and handle a pps event.
*/
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
struct ptp_clock_event event;
event.type = PTP_CLOCK_PPS;
/* Make sure ptp clock is valid, and PPS event enabled */
if (!adapter->ptp_clock ||
!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
return;
switch (hw->mac.type) {
case ixgbe_mac_X540:
if (eicr & IXGBE_EICR_TIMESYNC)
ptp_clock_event(adapter->ptp_clock, &event);
break;
default:
break;
}
}
/**
* ixgbe_ptp_enable_sdp
* @hw - the hardware private structure
* @shift - the clock shift for calculating nanoseconds
*
* this function enables the clock out feature on the sdp0 for the
* X540 device. It will create a 1second periodic output that can be
* used as the PPS (via an interrupt).
*
* It calculates when the systime will be on an exact second, and then
* aligns the start of the PPS signal to that value. The shift is
* necessary because it can change based on the link speed.
*/
static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
{
u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh;
u64 clock_edge = 0;
u32 rem;
switch (hw->mac.type) {
case ixgbe_mac_X540:
esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
/*
* enable the SDP0 pin as output, and connected to the native
* function for Timesync (ClockOut)
*/
esdp |= (IXGBE_ESDP_SDP0_DIR |
IXGBE_ESDP_SDP0_NATIVE);
/*
* enable the Clock Out feature on SDP0, and allow interrupts
* to occur when the pin changes
*/
tsauxc = (IXGBE_TSAUXC_EN_CLK |
IXGBE_TSAUXC_SYNCLK |
IXGBE_TSAUXC_SDP0_INT);
/* clock period (or pulse length) */
clktiml = (u32)(NSECS_PER_SEC << shift);
clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
/*
* account for the fact that we can't do u64 division
* with remainder, by converting the clock values into
* nanoseconds first
*/
clock_edge >>= shift;
div_u64_rem(clock_edge, NSECS_PER_SEC, &rem);
clock_edge += (NSECS_PER_SEC - rem);
clock_edge <<= shift;
/* specify the initial clock start time */
trgttiml = (u32)clock_edge;
trgttimh = (u32)(clock_edge >> 32);
IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml);
IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh);
IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC);
break;
default:
break;
}
}
/**
* ixgbe_ptp_disable_sdp
* @hw - the private hardware structure
*
* this function disables the auxiliary SDP clock out feature
*/
static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw)
{
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC);
IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0);
}
/**
* ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
* @work: structure containing information about this work task
*
* this work function is scheduled to continue reading the timecounter
* in order to prevent missing when the system time registers wrap
* around. This needs to be run approximately twice a minute when no
* PTP activity is occurring.
*/
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
struct timespec ts;
if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
(elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
}
}
/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: structure containing interrupt and ring information
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval = 0, ns;
u32 tsynctxctl;
unsigned long flags;
/* we cannot process timestamps on a ring without a q_vector */
if (!q_vector || !q_vector->adapter)
return;
adapter = q_vector->adapter;
hw = &adapter->hw;
tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
/*
* if TX timestamp is not valid, exit after clearing the
* timestamp registers
*/
if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
}
/**
* ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
struct skb_shared_hwtstamps *shhwtstamps;
u64 regval = 0, ns;
u32 tsyncrxctl;
unsigned long flags;
/* we cannot process timestamps on a ring without a q_vector */
if (!q_vector || !q_vector->adapter)
return;
adapter = q_vector->adapter;
hw = &adapter->hw;
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
/*
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
* read the registers to make them available again. Because only one
* packet can be time stamped at a time, we know that the register
* values must belong to this one here and therefore we don't need to
* compare any of the additional attributes stored for it.
*
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp = ns_to_ktime(ns);
}
/**
* ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
* @adapter: pointer to adapter struct
* @ifreq: ioctl data
* @cmd: particular ioctl requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
*
* Incoming time stamping has to be configured via the hardware
* filters. Not all combinations are supported, in particular event
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
*/
int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hwtstamp_config config;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = 0;
bool is_l4 = false;
bool is_l2 = false;
u32 regval;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* reserved for future extensions */
if (config.flags)
return -EINVAL;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
is_l2 = true;
is_l4 = true;
config.rx_filter = HWTSTAMP_FILTER_SOME;
break;
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
is_l2 = true;
is_l4 = true;
config.rx_filter = HWTSTAMP_FILTER_SOME;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
default:
/*
* register RXMTRL must be set, therefore it is not
* possible to time stamp both V1 Sync and Delay_Req messages
* and hardware does not support timestamping all packets
* => return error
*/
return -ERANGE;
}
if (hw->mac.type == ixgbe_mac_82598EB) {
if (tsync_rx_ctl | tsync_tx_ctl)
return -ERANGE;
return 0;
}
/* define ethertype filter for timestamped packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
(IXGBE_ETQF_FILTER_EN | /* enable filter */
IXGBE_ETQF_1588 | /* enable timestamping */
ETH_P_1588)); /* 1588 eth protocol type */
else
IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0);
#define PTP_PORT 319
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
| IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
| IXGBE_FTQF_QUEUE_ENABLE);
ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
& IXGBE_FTQF_DEST_PORT_MASK /* dest check */
& IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
<< IXGBE_FTQF_5TUPLE_MASK_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
(3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
IXGBE_IMIR_SIZE_BP_82599));
/* enable port check */
IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
(htons(PTP_PORT) |
htons(PTP_PORT) << 16));
IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
tsync_rx_mtrl |= PTP_PORT << 16;
} else {
IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
}
/* enable/disable TX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
regval |= tsync_tx_ctl;
IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval);
/* enable/disable RX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK);
regval |= tsync_rx_ctl;
IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval);
/* define which PTP packets are time stamped */
IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl);
IXGBE_WRITE_FLUSH(hw);
/* clear TX/RX time stamp registers, just to be sure */
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
/**
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
* @adapter - pointer to the adapter structure
*
* this function initializes the timecounter and cyclecounter
* structures for use in generated a ns counter from the arbitrary
* fixed point cycles registers in the hardware.
*
* A change in link speed impacts the frequency of the DMA clock on
* the device, which is used to generate the cycle counter
* registers. Therefor this function is called whenever the link speed
* changes.
*
* This function also turns on the SDP pin for clock out feature (X540
* only), because this is where the shift is first calculated.
*/
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 incval = 0;
u32 shift = 0;
u32 cycle_speed;
unsigned long flags;
/**
* Determine what speed we need to set the cyclecounter
* for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
* unknown speeds as 10Gb. (Hence why we can't just copy the
* link_speed.
*/
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
case IXGBE_LINK_SPEED_1GB_FULL:
case IXGBE_LINK_SPEED_10GB_FULL:
cycle_speed = adapter->link_speed;
break;
default:
/* cycle speed should be 10Gb when there is no link */
cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
}
/* Bail if the cycle speed didn't change */
if (adapter->cycle_speed == cycle_speed)
return;
/* disable the SDP clock out */
ixgbe_ptp_disable_sdp(hw);
/**
* Scale the NIC cycle counter by a large factor so that
* relatively small corrections to the frequency can be added
* or subtracted. The drawbacks of a large factor include
* (a) the clock register overflows more quickly, (b) the cycle
* counter structure must be able to convert the systime value
* to nanoseconds using only a multiplier and a right-shift,
* and (c) the value must fit within the timinca register space
* => math based on internal DMA clock rate and available bits
*/
switch (cycle_speed) {
case IXGBE_LINK_SPEED_100_FULL:
incval = IXGBE_INCVAL_100;
shift = IXGBE_INCVAL_SHIFT_100;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
incval = IXGBE_INCVAL_1GB;
shift = IXGBE_INCVAL_SHIFT_1GB;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
incval = IXGBE_INCVAL_10GB;
shift = IXGBE_INCVAL_SHIFT_10GB;
break;
}
/**
* Modify the calculated values to fit within the correct
* number of bits specified by the hardware. The 82599 doesn't
* have the same space as the X540, so bitshift the calculated
* values to fit.
*/
switch (hw->mac.type) {
case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
break;
case ixgbe_mac_82599EB:
incval >>= IXGBE_INCVAL_SHIFT_82599;
shift -= IXGBE_INCVAL_SHIFT_82599;
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
(1 << IXGBE_INCPER_SHIFT_82599) |
incval);
break;
default:
/* other devices aren't supported */
return;
}
/* reset the system time registers */
IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
/* now that the shift has been calculated and the systime
* registers reset, (re-)enable the Clock out feature*/
ixgbe_ptp_enable_sdp(hw, shift);
/* store the new cycle speed */
adapter->cycle_speed = cycle_speed;
ACCESS_ONCE(adapter->base_incval) = incval;
smp_mb();
/* grab the ptp lock */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
memset(&adapter->cc, 0, sizeof(adapter->cc));
adapter->cc.read = ixgbe_ptp_read;
adapter->cc.mask = CLOCKSOURCE_MASK(64);
adapter->cc.shift = shift;
adapter->cc.mult = 1;
/* reset the ns time counter */
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
}
/**
* ixgbe_ptp_init
* @adapter - the ixgbe private adapter structure
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
* cyclecounter init routine and exits.
*/
void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
case ixgbe_mac_82599EB:
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
default:
adapter->ptp_clock = NULL;
return;
}
spin_lock_init(&adapter->tmreg_lock);
ixgbe_ptp_start_cyclecounter(adapter);
/* (Re)start the overflow check */
adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
return;
}
/**
* ixgbe_ptp_stop - disable ptp device and stop the overflow check
* @adapter: pointer to adapter struct
*
* this function stops the ptp support, and cancels the delayed work.
*/
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
ixgbe_ptp_disable_sdp(&adapter->hw);
/* stop the overflow check task */
adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_dev_info("removed PHC on %s\n",
adapter->netdev->name);
}
}
...@@ -544,13 +544,18 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ...@@ -544,13 +544,18 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
if (retval) if (retval) {
pr_err("Error receiving message from VF\n"); pr_err("Error receiving message from VF\n");
return retval;
}
/* this is a message we already processed, do nothing */ /* this is a message we already processed, do nothing */
if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
return retval; return retval;
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
/* /*
* until the vf completes a virtual function reset it should not be * until the vf completes a virtual function reset it should not be
* allowed to start any configuration. * allowed to start any configuration.
...@@ -635,14 +640,14 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ...@@ -635,14 +640,14 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
} }
break; break;
case IXGBE_VF_SET_MACVLAN: case IXGBE_VF_SET_MACVLAN:
if (adapter->vfinfo[vf].pf_set_mac) { index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
IXGBE_VT_MSGINFO_SHIFT;
if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
e_warn(drv, "VF %d requested MACVLAN filter but is " e_warn(drv, "VF %d requested MACVLAN filter but is "
"administratively denied\n", vf); "administratively denied\n", vf);
retval = -1; retval = -1;
break; break;
} }
index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
IXGBE_VT_MSGINFO_SHIFT;
/* /*
* If the VF is allowed to set MAC filters then turn off * If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives. An index * anti-spoofing to avoid false positives. An index
......
...@@ -37,12 +37,7 @@ ...@@ -37,12 +37,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/hwmon.h> #include <linux/hwmon.h>
/*
* This file provides a sysfs interface to export information from the
* driver. The information presented is READ-ONLY.
*/
#ifdef CONFIG_IXGBE_HWMON #ifdef CONFIG_IXGBE_HWMON
/* hwmon callback functions */ /* hwmon callback functions */
static ssize_t ixgbe_hwmon_show_location(struct device *dev, static ssize_t ixgbe_hwmon_show_location(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
...@@ -162,17 +157,13 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, ...@@ -162,17 +157,13 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
return rc; return rc;
} }
#endif /* CONFIG_IXGBE_HWMON */
static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
{ {
#ifdef CONFIG_IXGBE_HWMON
int i; int i;
#endif /* CONFIG_IXGBE_HWMON */
if (adapter == NULL) if (adapter == NULL)
return; return;
#ifdef CONFIG_IXGBE_HWMON
for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) { for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
device_remove_file(&adapter->pdev->dev, device_remove_file(&adapter->pdev->dev,
...@@ -183,12 +174,6 @@ static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) ...@@ -183,12 +174,6 @@ static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
if (adapter->ixgbe_hwmon_buff.device) if (adapter->ixgbe_hwmon_buff.device)
hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device); hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
#endif /* CONFIG_IXGBE_HWMON */
if (adapter->info_kobj != NULL) {
kobject_put(adapter->info_kobj);
adapter->info_kobj = NULL;
}
} }
/* called from ixgbe_main.c */ /* called from ixgbe_main.c */
...@@ -200,32 +185,19 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) ...@@ -200,32 +185,19 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
/* called from ixgbe_main.c */ /* called from ixgbe_main.c */
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
{ {
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff; struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
unsigned int i; unsigned int i;
int n_attrs; int n_attrs;
#endif /* CONFIG_IXGBE_HWMON */
struct net_device *netdev = adapter->netdev;
int rc = 0; int rc = 0;
/* create info kobj and attribute listings in kobj */
adapter->info_kobj = kobject_create_and_add("info", &netdev->dev.kobj);
if (adapter->info_kobj == NULL) {
rc = -ENOMEM;
goto err;
}
#ifdef CONFIG_IXGBE_HWMON
/* If this method isn't defined we don't support thermals */ /* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) {
rc = -EPERM; goto exit;
goto err;
} }
/* Don't create thermal hwmon interface if no sensors present */ /* Don't create thermal hwmon interface if no sensors present */
rc = adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw); if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
if (rc) goto exit;
goto err;
/* /*
* Allocation space for max attributs * Allocation space for max attributs
...@@ -261,7 +233,6 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) ...@@ -261,7 +233,6 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
if (rc) if (rc)
goto err; goto err;
} }
#endif /* CONFIG_IXGBE_HWMON */
goto exit; goto exit;
...@@ -270,4 +241,5 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) ...@@ -270,4 +241,5 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
exit: exit:
return rc; return rc;
} }
#endif /* CONFIG_IXGBE_HWMON */
...@@ -824,6 +824,8 @@ struct ixgbe_thermal_sensor_data { ...@@ -824,6 +824,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ #define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ #define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ #define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */
#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */
#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ #define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ #define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ #define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
...@@ -1309,6 +1311,7 @@ enum { ...@@ -1309,6 +1311,7 @@ enum {
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ #define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ #define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ #define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ #define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
...@@ -1326,6 +1329,7 @@ enum { ...@@ -1326,6 +1329,7 @@ enum {
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
...@@ -1344,6 +1348,7 @@ enum { ...@@ -1344,6 +1348,7 @@ enum {
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ #define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
...@@ -1361,6 +1366,7 @@ enum { ...@@ -1361,6 +1366,7 @@ enum {
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ #define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
...@@ -1501,8 +1507,10 @@ enum { ...@@ -1501,8 +1507,10 @@ enum {
#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ #define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ #define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ #define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */
/* LEDCTL Bit Masks */ /* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040 #define IXGBE_LED_IVRT_BASE 0x00000040
...@@ -1879,6 +1887,40 @@ enum { ...@@ -1879,6 +1887,40 @@ enum {
#define IXGBE_RXDCTL_RLPML_EN 0x00008000 #define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
#define IXGBE_TSAUXC_EN_CLK 0x00000004
#define IXGBE_TSAUXC_SYNCLK 0x00000008
#define IXGBE_TSAUXC_SDP0_INT 0x00000040
#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01
#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02
#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04
#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00
#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000
#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100
#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300
#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800
#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00
#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00
#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ #define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
...@@ -2008,6 +2050,7 @@ enum { ...@@ -2008,6 +2050,7 @@ enum {
#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */
/* PSRTYPE bit definitions */ /* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010 #define IXGBE_PSRTYPE_TCPHDR 0x00000010
...@@ -2285,6 +2328,7 @@ struct ixgbe_adv_tx_context_desc { ...@@ -2285,6 +2328,7 @@ struct ixgbe_adv_tx_context_desc {
/* Adv Transmit Descriptor Config Masks */ /* Adv Transmit Descriptor Config Masks */
#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ #define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ #define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */
#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ #define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ #define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
...@@ -2573,9 +2617,6 @@ enum ixgbe_fc_mode { ...@@ -2573,9 +2617,6 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause, ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause, ixgbe_fc_tx_pause,
ixgbe_fc_full, ixgbe_fc_full,
#ifdef CONFIG_DCB
ixgbe_fc_pfc,
#endif
ixgbe_fc_default ixgbe_fc_default
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment