Commit b0ba9d5f authored by Casey Leedom's avatar Casey Leedom Committed by David S. Miller

net/cxgb4: Use new PCI_DEV_FLAGS_NO_RELAXED_ORDERING flag

cxgb4 Ethernet driver now queries PCIe configuration space to determine
if it can send TLPs to it with the Relaxed Ordering Attribute set.

Remove the enable_pcie_relaxed_ordering() to avoid enable PCIe Capability
Device Control[Relaxed Ordering Enable] at probe routine, to make sure
the driver will not send the Relaxed Ordering TLPs to the Root Complex which
could not deal the Relaxed Ordering TLPs.
Signed-off-by: default avatarCasey Leedom <leedom@chelsio.com>
Signed-off-by: default avatarDing Tianhong <dingtianhong@huawei.com>
Reviewed-by: default avatarCasey Leedom <leedom@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 077fa19c
...@@ -529,6 +529,7 @@ enum { /* adapter flags */ ...@@ -529,6 +529,7 @@ enum { /* adapter flags */
USING_SOFT_PARAMS = (1 << 6), USING_SOFT_PARAMS = (1 << 6),
MASTER_PF = (1 << 7), MASTER_PF = (1 << 7),
FW_OFLD_CONN = (1 << 9), FW_OFLD_CONN = (1 << 9),
ROOT_NO_RELAXED_ORDERING = (1 << 10),
}; };
enum { enum {
......
...@@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev) ...@@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev)
dev->name, adap->params.vpd.id, adap->name, buf); dev->name, adap->params.vpd.id, adap->name, buf);
} }
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
{
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
/* /*
* Free the following resources: * Free the following resources:
* - memory used for tables * - memory used for tables
...@@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
pci_enable_pcie_error_reporting(pdev); pci_enable_pcie_error_reporting(pdev);
enable_pcie_relaxed_ordering(pdev);
pci_set_master(pdev); pci_set_master(pdev);
pci_save_state(pdev); pci_save_state(pdev);
...@@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->msg_enable = DFLT_MSG_ENABLE; adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
* Ingress Packet Data to Free List Buffers in order to allow for
* chipset performance optimizations between the Root Complex and
* Memory Controllers. (Messages to the associated Ingress Queue
* notifying new Packet Placement in the Free Lists Buffers will be
* send without the Relaxed Ordering Attribute thus guaranteeing that
* all preceding PCIe Transaction Layer Packets will be processed
* first.) But some Root Complexes have various issues with Upstream
* Transaction Layer Packets with the Relaxed Ordering Attribute set.
* The PCIe devices which under the Root Complexes will be cleared the
* Relaxed Ordering bit in the configuration space, So we check our
* PCIe configuration space to see if it's flagged with advice against
* using Relaxed Ordering.
*/
if (!pcie_relaxed_ordering_enabled(pdev))
adapter->flags |= ROOT_NO_RELAXED_ORDERING;
spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock); spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock); spin_lock_init(&adapter->win0_lock);
......
...@@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, ...@@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct fw_iq_cmd c; struct fw_iq_cmd c;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev); struct port_info *pi = netdev_priv(dev);
int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */ /* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16); iq->size = roundup(iq->size, 16);
...@@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, ...@@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
FW_IQ_CMD_FL0FETCHRO_F | FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
FW_IQ_CMD_FL0DATARO_F | FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_F); FW_IQ_CMD_FL0PADEN_F);
if (cong >= 0) if (cong >= 0)
c.iqns_to_fl0congen |= c.iqns_to_fl0congen |=
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment