Commit 01f2e4ea authored by Scott Feldman's avatar Scott Feldman Committed by Jeff Garzik

enic: add Cisco 10G Ethernet NIC driver

Signed-off-by: default avatarScott Feldman <scofeldm@cisco.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 452c1ce2
...@@ -1046,6 +1046,13 @@ L: cbe-oss-dev@ozlabs.org ...@@ -1046,6 +1046,13 @@ L: cbe-oss-dev@ozlabs.org
W: http://www.ibm.com/developerworks/power/cell/ W: http://www.ibm.com/developerworks/power/cell/
S: Supported S: Supported
CISCO 10G ETHERNET DRIVER
P: Scott Feldman
M: scofeldm@cisco.com
P: Joe Eykholt
M: jeykholt@cisco.com
S: Supported
CFAG12864B LCD DRIVER CFAG12864B LCD DRIVER
P: Miguel Ojeda Sandonis P: Miguel Ojeda Sandonis
M: miguel.ojeda.sandonis@gmail.com M: miguel.ojeda.sandonis@gmail.com
......
...@@ -2388,6 +2388,13 @@ config EHEA ...@@ -2388,6 +2388,13 @@ config EHEA
To compile the driver as a module, choose M here. The module To compile the driver as a module, choose M here. The module
will be called ehea. will be called ehea.
config ENIC
tristate "E, the Cisco 10G Ethernet NIC"
depends on PCI && INET
select INET_LRO
help
This enables the support for the Cisco 10G Ethernet card.
config IXGBE config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support" tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI && INET depends on PCI && INET
......
...@@ -19,6 +19,7 @@ obj-$(CONFIG_ATL2) += atlx/ ...@@ -19,6 +19,7 @@ obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/ obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_TEHUTI) += tehuti.o obj-$(CONFIG_TEHUTI) += tehuti.o
obj-$(CONFIG_ENIC) += enic/
gianfar_driver-objs := gianfar.o \ gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o \ gianfar_ethtool.o \
......
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o vnic_dev.o vnic_rq.o
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
/*
* Completion queue descriptor types
*/
enum cq_desc_types {
CQ_DESC_TYPE_WQ_ENET = 0,
CQ_DESC_TYPE_DESC_COPY = 1,
CQ_DESC_TYPE_WQ_EXCH = 2,
CQ_DESC_TYPE_RQ_ENET = 3,
CQ_DESC_TYPE_RQ_FCP = 4,
};
/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
* type_specfic area is unique for each completion
* queue type.
*/
struct cq_desc {
__le16 completed_index;
__le16 q_number;
u8 type_specfic[11];
u8 type_color;
};
#define CQ_DESC_TYPE_BITS 7
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
*color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
*completed_index = le16_to_cpu(desc->completed_index) &
CQ_DESC_COMP_NDX_MASK;
}
#endif /* _CQ_DESC_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
__le16 completed_index;
__le16 q_number;
u8 reserved[11];
u8 type_color;
};
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
__le16 completed_index_flags;
__le16 q_number_rss_type_flags;
__le32 rss_hash;
__le16 bytes_written_flags;
__le16 vlan;
__le16 checksum_fcoe;
u8 flags;
u8 type_color;
};
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
{
u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
u16 q_number_rss_type_flags =
le16_to_cpu(desc->q_number_rss_type_flags);
u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
1 : 0;
*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
1 : 0;
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
1 : 0;
*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
*csum_not_calc = (q_number_rss_type_flags &
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
*rss_hash = le32_to_cpu(desc->rss_hash);
*bytes_written = bytes_written_flags &
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
*packet_error = (bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
*vlan_stripped = (bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
*vlan = le16_to_cpu(desc->vlan);
if (*fcoe) {
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
*fcoe_fc_crc_ok = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
*fcoe_enc_error = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
*fcoe_eof = (u8)((desc->checksum_fcoe >>
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
*checksum = 0;
} else {
*fcoe_sof = 0;
*fcoe_fc_crc_ok = 0;
*fcoe_enc_error = 0;
*fcoe_eof = 0;
*checksum = le16_to_cpu(desc->checksum_fcoe);
}
*tcp_udp_csum_ok =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
*ipv4_csum_ok =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
*ipv4_fragment =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
}
#endif /* _CQ_ENET_DESC_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _ENIC_H_
#define _ENIC_H_
#include <linux/inet_lro.h>
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_rss.h"
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
#define DRV_VERSION "0.0.1.18163.472"
#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
#define ENIC_LRO_MAX_DESC 8
#define ENIC_LRO_MAX_AGGR 64
enum enic_cq_index {
ENIC_CQ_RQ,
ENIC_CQ_WQ,
ENIC_CQ_MAX,
};
enum enic_intx_intr_index {
ENIC_INTX_WQ_RQ,
ENIC_INTX_ERR,
ENIC_INTX_NOTIFY,
ENIC_INTX_MAX,
};
enum enic_msix_intr_index {
ENIC_MSIX_RQ,
ENIC_MSIX_WQ,
ENIC_MSIX_ERR,
ENIC_MSIX_NOTIFY,
ENIC_MSIX_MAX,
};
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
};
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
struct pci_dev *pdev;
struct vnic_enet_config config;
struct vnic_dev_bar bar0;
struct vnic_dev *vdev;
struct net_device_stats net_stats;
struct timer_list notify_timer;
struct work_struct reset;
struct msix_entry msix_entry[ENIC_MSIX_MAX];
struct enic_msix_entry msix[ENIC_MSIX_MAX];
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int mc_count;
int csum_rx_enabled;
u32 port_mtu;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[1];
spinlock_t wq_lock[1];
unsigned int wq_count;
struct vlan_group *vlan_group;
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[1];
unsigned int rq_count;
int (*rq_alloc_buf)(struct vnic_rq *rq);
struct napi_struct napi;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX];
unsigned int intr_count;
u32 __iomem *legacy_pba; /* memory-mapped */
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
unsigned int cq_count;
};
#endif /* _ENIC_H_ */
This diff is collapsed.
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_enet.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_nic.h"
#include "vnic_rss.h"
#include "enic_res.h"
#include "enic.h"
int enic_get_vnic_config(struct enic *enic)
{
struct vnic_enet_config *c = &enic->config;
int err;
err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
if (err) {
printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
return err;
}
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(enic->vdev, \
offsetof(struct vnic_enet_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
printk(KERN_ERR PFX \
"Error getting %s, %d\n", #m, err); \
return err; \
} \
} while (0)
GET_CONFIG(flags);
GET_CONFIG(wq_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(mtu);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(intr_mode);
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
c->wq_desc_count));
c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
c->rq_desc_count =
min_t(u32, ENIC_MAX_RQ_DESCS,
max_t(u32, ENIC_MIN_RQ_DESCS,
c->rq_desc_count));
c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
if (c->mtu == 0)
c->mtu = 1500;
c->mtu = min_t(u16, ENIC_MAX_MTU,
max_t(u16, ENIC_MIN_MTU,
c->mtu));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
"wq/rq %d/%d\n",
enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
c->wq_desc_count, c->rq_desc_count);
printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
"intr timer %d\n",
c->mtu, ENIC_SETTING(enic, TXCSUM),
ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
ENIC_SETTING(enic, LRO), c->intr_timer);
return 0;
}
void enic_add_station_addr(struct enic *enic)
{
vnic_dev_add_addr(enic->vdev, enic->mac_addr);
}
void enic_add_multicast_addr(struct enic *enic, u8 *addr)
{
vnic_dev_add_addr(enic->vdev, addr);
}
void enic_del_multicast_addr(struct enic *enic, u8 *addr)
{
vnic_dev_del_addr(enic->vdev, addr);
}
void enic_add_vlan(struct enic *enic, u16 vlanid)
{
u64 a0 = vlanid, a1 = 0;
int wait = 1000;
int err;
err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
if (err)
printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
}
void enic_del_vlan(struct enic *enic, u16 vlanid)
{
u64 a0 = vlanid, a1 = 0;
int wait = 1000;
int err;
err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
if (err)
printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
}
int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
rss_hash_type, rss_hash_bits, rss_base_cpu,
rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
a0 = nic_cfg;
a1 = 0;
return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
}
void enic_free_vnic_resources(struct enic *enic)
{
unsigned int i;
for (i = 0; i < enic->wq_count; i++)
vnic_wq_free(&enic->wq[i]);
for (i = 0; i < enic->rq_count; i++)
vnic_rq_free(&enic->rq[i]);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
vnic_intr_free(&enic->intr[i]);
}
void enic_get_res_counts(struct enic *enic)
{
enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
enic->intr_count = vnic_dev_get_res_count(enic->vdev,
RES_TYPE_INTR_CTRL);
printk(KERN_INFO PFX "vNIC resources avail: "
"wq %d rq %d cq %d intr %d\n",
enic->wq_count, enic->rq_count,
enic->cq_count, enic->intr_count);
}
void enic_init_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int interrupt_offset;
unsigned int error_interrupt_enable;
unsigned int error_interrupt_offset;
unsigned int cq_index;
unsigned int i;
intr_mode = vnic_dev_get_intr_mode(enic->vdev);
/* Init RQ/WQ resources.
*
* RQ[0 - n-1] point to CQ[0 - n-1]
* WQ[0 - m-1] point to CQ[n - n+m-1]
*
* Error interrupt is not enabled for MSI.
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
error_interrupt_offset = enic->intr_count - 2;
break;
default:
error_interrupt_enable = 0;
error_interrupt_offset = 0;
break;
}
for (i = 0; i < enic->rq_count; i++) {
cq_index = i;
vnic_rq_init(&enic->rq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < enic->wq_count; i++) {
cq_index = enic->rq_count + i;
vnic_wq_init(&enic->wq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
/* Init CQ resources
*
* CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
* CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
*/
for (i = 0; i < enic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
interrupt_offset = i;
break;
default:
interrupt_offset = 0;
break;
}
vnic_cq_init(&enic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
interrupt_offset,
0 /* cq_message_addr */);
}
/* Init INTR resources
*
* mask_on_assertion is not used for INTx due to the level-
* triggered nature of INTx
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSI:
case VNIC_DEV_INTR_MODE_MSIX:
mask_on_assertion = 1;
break;
default:
mask_on_assertion = 0;
break;
}
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
enic->config.intr_timer,
enic->config.intr_timer_type,
mask_on_assertion);
}
/* Clear LIF stats
*/
vnic_dev_stats_clear(enic->vdev);
}
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int i;
int err;
intr_mode = vnic_dev_get_intr_mode(enic->vdev);
printk(KERN_INFO PFX "vNIC resources used: "
"wq %d rq %d cq %d intr %d intr mode %s\n",
enic->wq_count, enic->rq_count,
enic->cq_count, enic->intr_count,
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown"
);
/* Allocate queue resources
*/
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
enic->config.wq_desc_count,
sizeof(struct wq_enet_desc));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < enic->rq_count; i++) {
err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
enic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.rq_desc_count,
sizeof(struct cq_enet_rq_desc));
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
sizeof(struct cq_enet_wq_desc));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < enic->intr_count; i++) {
err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
if (err)
goto err_out_cleanup;
}
/* Hook remaining resource
*/
enic->legacy_pba = vnic_dev_get_res(enic->vdev,
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
return 0;
err_out_cleanup:
enic_free_vnic_resources(enic);
return err;
}
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _ENIC_RES_H_
#define _ENIC_RES_H_
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#define ENIC_MIN_WQ_DESCS 64
#define ENIC_MAX_WQ_DESCS 4096
#define ENIC_MIN_RQ_DESCS 64
#define ENIC_MAX_RQ_DESCS 4096
#define ENIC_MIN_MTU 576 /* minimum for IPv4 */
#define ENIC_MAX_MTU 9000
#define ENIC_MULTICAST_PERFECT_FILTERS 32
#define ENIC_NON_TSO_MAX_DESC 16
#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int mss_or_csum_offset, unsigned int hdr_len,
int vlan_tag_insert, unsigned int vlan_tag,
int offload_mode, int cq_entry, int sop, int eop)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
(u16)mss_or_csum_offset,
(u16)hdr_len, (u8)offload_mode,
(u8)eop, (u8)cq_entry,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
wmb();
vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
}
static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
0, 0, 0, 0, 0,
eop, 0 /* !SOP */, eop);
}
static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
0, 0, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
int ip_csum, int tcpudp_csum, int vlan_tag_insert,
unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
(ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
0, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int csum_offset, unsigned int hdr_len,
int vlan_tag_insert, unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_CSUM_L4,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, unsigned int len,
unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
unsigned int vlan_tag, int eop)
{
enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
mss, hdr_len, vlan_tag_insert, vlan_tag,
WQ_ENET_OFFLOAD_MODE_TSO,
eop, 1 /* SOP */, eop);
}
static inline void enic_queue_rq_desc(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
dma_addr_t dma_addr, unsigned int len)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
u8 type = os_buf_index ?
RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
rq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
type, (u16)len);
wmb();
vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
}
struct enic;
int enic_get_vnic_config(struct enic *);
void enic_add_station_addr(struct enic *enic);
void enic_add_multicast_addr(struct enic *enic, u8 *addr);
void enic_del_multicast_addr(struct enic *enic, u8 *addr);
void enic_add_vlan(struct enic *enic, u16 vlanid);
void enic_del_vlan(struct enic *enic, u16 vlanid);
int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en);
void enic_get_res_counts(struct enic *enic);
void enic_init_vnic_resources(struct enic *enic);
int enic_alloc_vnic_resources(struct enic *);
void enic_free_vnic_resources(struct enic *);
#endif /* _ENIC_RES_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
/* Ethernet receive queue descriptor: 16B */
struct rq_enet_desc {
__le64 address;
__le16 length_type;
u8 reserved[6];
};
enum rq_enet_type_types {
RQ_ENET_TYPE_ONLY_SOP = 0,
RQ_ENET_TYPE_NOT_SOP = 1,
RQ_ENET_TYPE_RESV2 = 2,
RQ_ENET_TYPE_RESV3 = 3,
};
#define RQ_ENET_ADDR_BITS 64
#define RQ_ENET_LEN_BITS 14
#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
#define RQ_ENET_TYPE_BITS 2
#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
u64 address, u8 type, u16 length)
{
desc->address = cpu_to_le64(address);
desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
}
static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
u64 *address, u8 *type, u16 *length)
{
*address = le64_to_cpu(desc->address);
*length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
*type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
RQ_ENET_TYPE_MASK);
}
#endif /* _RQ_ENET_DESC_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
void vnic_cq_free(struct vnic_cq *cq)
{
vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
cq->ctrl = NULL;
}
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
cq->index = index;
cq->vdev = vdev;
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
if (err)
return err;
return 0;
}
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int cq_message_enable,
unsigned int interrupt_offset, u64 cq_message_addr)
{
u64 paddr;
paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &cq->ctrl->ring_base);
iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
iowrite32(color_enable, &cq->ctrl->color_enable);
iowrite32(cq_head, &cq->ctrl->cq_head);
iowrite32(cq_tail, &cq->ctrl->cq_tail);
iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
}
void vnic_cq_clean(struct vnic_cq *cq)
{
cq->to_clean = 0;
cq->last_color = 0;
iowrite32(0, &cq->ctrl->cq_head);
iowrite32(0, &cq->ctrl->cq_tail);
iowrite32(1, &cq->ctrl->cq_tail_color);
vnic_dev_clear_desc_ring(&cq->ring);
}
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
#include "cq_desc.h"
#include "vnic_dev.h"
/* Completion queue control */
struct vnic_cq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 flow_control_enable; /* 0x10 */
u32 pad1;
u32 color_enable; /* 0x18 */
u32 pad2;
u32 cq_head; /* 0x20 */
u32 pad3;
u32 cq_tail; /* 0x28 */
u32 pad4;
u32 cq_tail_color; /* 0x30 */
u32 pad5;
u32 interrupt_enable; /* 0x38 */
u32 pad6;
u32 cq_entry_enable; /* 0x40 */
u32 pad7;
u32 cq_message_enable; /* 0x48 */
u32 pad8;
u32 interrupt_offset; /* 0x50 */
u32 pad9;
u64 cq_message_addr; /* 0x58 */
u32 pad10;
};
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
unsigned int to_clean;
unsigned int last_color;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
unsigned int work_to_do,
int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
u8 type, u16 q_number, u16 completed_index, void *opaque),
void *opaque)
{
struct cq_desc *cq_desc;
unsigned int work_done = 0;
u16 q_number, completed_index;
u8 type, color;
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq_desc, type,
q_number, completed_index, opaque))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int message_enable,
unsigned int interrupt_offset, u64 message_addr);
void vnic_cq_clean(struct vnic_cq *cq);
#endif /* _VNIC_CQ_H_ */
This diff is collapsed.
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#ifndef VNIC_PADDR_TARGET
#define VNIC_PADDR_TARGET 0x0000000000000000ULL
#endif
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
VNIC_DEV_INTR_MODE_MSI,
VNIC_DEV_INTR_MODE_MSIX,
};
struct vnic_dev_bar {
void __iomem *vaddr;
dma_addr_t bus_addr;
unsigned long len;
};
struct vnic_dev_ring {
void *descs;
size_t size;
dma_addr_t base_addr;
size_t base_align;
void *descs_unaligned;
size_t size_unaligned;
dma_addr_t base_addr_unaligned;
unsigned int desc_size;
unsigned int desc_count;
unsigned int desc_avail;
};
struct vnic_dev;
struct vnic_stats;
void *vnic_dev_priv(struct vnic_dev *vdev);
unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index);
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
void vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
u32 vnic_dev_mtu(struct vnic_dev *vdev);
int vnic_dev_close(struct vnic_dev *vdev);
int vnic_dev_enable(struct vnic_dev *vdev);
int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
void vnic_dev_unregister(struct vnic_dev *vdev);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar);
#endif /* _VNIC_DEV_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
#define _CMD_NBITS 14
#define _CMD_VTYPEBITS 10
#define _CMD_FLAGSBITS 6
#define _CMD_DIRBITS 2
#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
#define _CMD_NSHIFT 0
#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
/*
* Direction bits (from host perspective).
*/
#define _CMD_DIR_NONE 0U
#define _CMD_DIR_WRITE 1U
#define _CMD_DIR_READ 2U
#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
/*
* Flag bits.
*/
#define _CMD_FLAGS_NONE 0U
#define _CMD_FLAGS_NOWAIT 1U
/*
* vNIC type bits.
*/
#define _CMD_VTYPE_NONE 0U
#define _CMD_VTYPE_ENET 1U
#define _CMD_VTYPE_FC 2U
#define _CMD_VTYPE_SCSI 4U
#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
/*
* Used to create cmds..
*/
#define _CMDCF(dir, flags, vtype, nr) \
(((dir) << _CMD_DIRSHIFT) | \
((flags) << _CMD_FLAGSSHIFT) | \
((vtype) << _CMD_VTYPESHIFT) | \
((nr) << _CMD_NSHIFT))
#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
/*
* Used to decode cmds..
*/
#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
* out: a0=value */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
/* stats dump in mem: (u64)a0=paddr to stats area,
* (u16)a1=sizeof stats area */
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
/* hang detection notification */
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
/* disable/enable promisc mode: (u8)a0=0/1 */
/***** XXX DEPRECATED *****/
CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
/* disable/enable all-multi mode: (u8)a0=0/1 */
/***** XXX DEPRECATED *****/
CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
/* add addr from (u48)a0 */
CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
/* del addr from (u48)a0 */
CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
/* add VLAN id in (u16)a0 */
CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
/* del VLAN id in (u16)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
/* nic_cfg in (u32)a0 */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
/* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
/* initiate softreset */
CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
/* softreset status:
* out: a0=0 reset complete, a0=1 reset in progress */
CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
/* set struct vnic_devcmd_notify buffer in mem:
* in:
* (u64)a0=paddr to notify (set paddr=0 to unset)
* (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
* (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
* out:
* (u32)a1 = effective size
*/
CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
/* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
* (u8)a1=PXENV_UNDI_xxx */
CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
/* open status:
* out: a0=0 open complete, a0=1 open in progress */
CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
/* close vnic */
CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
/* variant of CMD_INIT, with provisioning info
* (u64)a0=paddr of vnic_devcmd_provinfo
* (u32)a1=sizeof provision info */
CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
/* enable virtual link */
CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
/* disable virtual link */
CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
/* init status:
* out: a0=0 init complete, a0=1 init in progress
* if a0=0, a1=errno */
CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
/* INT13 API: (u64)a0=paddr to vnic_int13_params struct
* (u8)a1=INT13_CMD_xxx */
CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
/* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
/* undo initialize of virtual link */
CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
};
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
#define CMD_PFILTER_BROADCAST 0x04
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
};
enum vnic_devcmd_error {
ERR_SUCCESS = 0,
ERR_EINVAL = 1,
ERR_EFAULT = 2,
ERR_EPERM = 3,
ERR_EBUSY = 4,
ERR_ECMDUNKNOWN = 5,
ERR_EBADSTATE = 6,
ERR_ENOMEM = 7,
ERR_ETIMEDOUT = 8,
ERR_ELINKDOWN = 9,
};
struct vnic_devcmd_fw_info {
char fw_version[32];
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
};
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
u32 link_state; /* link up == 1 */
u32 port_speed; /* effective port speed (rate limit) */
u32 mtu; /* MTU */
u32 msglvl; /* requested driver msg lvl */
u32 uif; /* uplink interface */
u32 status; /* status bits (see VNIC_STF_*) */
u32 error; /* error code (see ERR_*) for first ERR */
};
#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
u8 data[0];
};
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
*
* If cmd completed successfully STAT_ERROR will be clear
* and args registers contain cmd-specific results.
*
* If cmd error, STAT_ERROR will be set and args[0] contains error code.
*
* status register is read-only. While STAT_BUSY is set,
* all other register contents are read-only.
*/
/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
#define VNIC_DEVCMD_NARGS 15
struct vnic_devcmd {
u32 status; /* RO */
u32 cmd; /* RW */
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
#endif /* _VNIC_DEVCMD_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
u32 wq_desc_count;
u32 rq_desc_count;
u16 mtu;
u16 intr_timer;
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
};
#define VENETF_TSO 0x1 /* TSO enabled */
#define VENETF_LRO 0x2 /* LRO enabled */
#define VENETF_RXCSUM 0x4 /* RX csum enabled */
#define VENETF_TXCSUM 0x8 /* TX csum enabled */
#define VENETF_RSS 0x10 /* RSS enabled */
#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
#endif /* _VNIC_ENIC_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void vnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
index);
return -EINVAL;
}
return 0;
}
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#define VNIC_INTR_TIMER_MAX 0xffff
#define VNIC_INTR_TIMER_TYPE_ABS 0
#define VNIC_INTR_TIMER_TYPE_QUIET 1
/* Interrupt control */
struct vnic_intr_ctrl {
u32 coalescing_timer; /* 0x00 */
u32 pad0;
u32 coalescing_value; /* 0x08 */
u32 pad1;
u32 coalescing_type; /* 0x10 */
u32 pad2;
u32 mask_on_assertion; /* 0x18 */
u32 pad3;
u32 mask; /* 0x20 */
u32 pad4;
u32 int_credits; /* 0x28 */
u32 pad5;
u32 int_credit_return; /* 0x30 */
u32 pad6;
};
struct vnic_intr {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
};
static inline void vnic_intr_unmask(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->mask);
}
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
unsigned int credits, int unmask, int reset_timer)
{
#define VNIC_INTR_UNMASK_SHIFT 16
#define VNIC_INTR_RESET_TIMER_SHIFT 17
u32 int_credit_return = (credits & 0xffff) |
(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
}
static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
{
/* get and ack interrupt in one read (clear-and-ack-on-read) */
return ioread32(legacy_pba);
}
void vnic_intr_free(struct vnic_intr *intr);
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_NIC_H_
#define _VNIC_NIC_H_
#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
#define NIC_CFG_RSS_ENABLE (1UL << 22)
#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
#define NIC_CFG_RSS_ENABLE_SHIFT 22
#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu,
u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
<< NIC_CFG_RSS_HASH_TYPE_SHIFT) |
((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
<< NIC_CFG_RSS_HASH_BITS_SHIFT) |
((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
<< NIC_CFG_RSS_BASE_CPU_SHIFT) |
((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
<< NIC_CFG_RSS_ENABLE_SHIFT) |
((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
<< NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
<< NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
}
#endif /* _VNIC_NIC_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
#define VNIC_RES_VERSION 0x00000000L
/* vNIC resource types */
enum vnic_res_type {
RES_TYPE_EOL, /* End-of-list */
RES_TYPE_WQ, /* Work queues */
RES_TYPE_RQ, /* Receive queues */
RES_TYPE_CQ, /* Completion queues */
RES_TYPE_RSVD1,
RES_TYPE_NIC_CFG, /* Enet NIC config registers */
RES_TYPE_RSVD2,
RES_TYPE_RSVD3,
RES_TYPE_RSVD4,
RES_TYPE_RSVD5,
RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status, r2c */
RES_TYPE_RSVD6,
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
RES_TYPE_MAX, /* Count of resource types */
};
struct vnic_resource_header {
u32 magic;
u32 version;
};
struct vnic_resource {
u8 type;
u8 bar;
u8 pad[2];
u32 bar_offset;
u32 count;
};
#endif /* _VNIC_RESOURCE_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_rq.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
vdev = rq->vdev;
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!rq->bufs[i]) {
printk(KERN_ERR "Failed to alloc rq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = rq->bufs[i];
for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)rq->ring.descs +
rq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = rq->bufs[0];
break;
} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
buf->next = rq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
rq->to_use = rq->to_clean = rq->bufs[0];
rq->buf_index = 0;
return 0;
}
void vnic_rq_free(struct vnic_rq *rq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = rq->vdev;
vnic_dev_free_desc_ring(vdev, &rq->ring);
for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
kfree(rq->bufs[i]);
rq->bufs[i] = NULL;
}
rq->ctrl = NULL;
}
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
rq->index = index;
rq->vdev = vdev;
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
vnic_rq_disable(rq);
err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_rq_alloc_bufs(rq);
if (err) {
vnic_rq_free(rq);
return err;
}
return 0;
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
u32 fetch_index;
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &rq->ctrl->ring_base);
iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
iowrite32(cq_index, &rq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
iowrite32(0, &rq->ctrl->dropped_packet_count);
iowrite32(0, &rq->ctrl->error_status);
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
}
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
{
return ioread32(&rq->ctrl->error_status);
}
void vnic_rq_enable(struct vnic_rq *rq)
{
iowrite32(1, &rq->ctrl->enable);
}
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
{
struct vnic_rq_buf *buf;
u32 fetch_index;
BUG_ON(ioread32(&rq->ctrl->enable));
buf = rq->to_clean;
while (vnic_rq_desc_used(rq) > 0) {
(*buf_clean)(rq, buf);
buf = rq->to_clean = buf->next;
rq->ring.desc_avail++;
}
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
vnic_dev_clear_desc_ring(&rq->ring);
}
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
/* Receive queue control */
struct vnic_rq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 posted_index; /* 0x10 */
u32 pad1;
u32 cq_index; /* 0x18 */
u32 pad2;
u32 enable; /* 0x20 */
u32 pad3;
u32 running; /* 0x28 */
u32 pad4;
u32 fetch_index; /* 0x30 */
u32 pad5;
u32 error_interrupt_enable; /* 0x38 */
u32 pad6;
u32 error_interrupt_offset; /* 0x40 */
u32 pad7;
u32 error_status; /* 0x48 */
u32 pad8;
u32 dropped_packet_count; /* 0x50 */
u32 pad9;
u32 dropped_packet_count_rc; /* 0x58 */
u32 pad10;
};
/* Break the vnic_rq_buf allocations into blocks of 64 entries */
#define VNIC_RQ_BUF_BLK_ENTRIES 64
#define VNIC_RQ_BUF_BLK_SZ \
(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
dma_addr_t dma_addr;
void *os_buf;
unsigned int os_buf_index;
unsigned int len;
unsigned int index;
void *desc;
};
struct vnic_rq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
struct vnic_rq_buf *to_use;
struct vnic_rq_buf *to_clean;
void *os_buf_head;
unsigned int buf_index;
unsigned int pkts_outstanding;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
{
/* how many does SW own? */
return rq->ring.desc_avail;
}
static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
{
/* how many does HW own? */
return rq->ring.desc_count - rq->ring.desc_avail - 1;
}
static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
{
return rq->to_use->desc;
}
static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
{
return rq->to_use->index;
}
static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
{
return rq->buf_index++;
}
static inline void vnic_rq_post(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
dma_addr_t dma_addr, unsigned int len)
{
struct vnic_rq_buf *buf = rq->to_use;
buf->os_buf = os_buf;
buf->os_buf_index = os_buf_index;
buf->dma_addr = dma_addr;
buf->len = len;
buf = buf->next;
rq->to_use = buf;
rq->ring.desc_avail--;
/* Move the posted_index every nth descriptor
*/
#ifndef VNIC_RQ_RETURN_RATE
#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
#endif
if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
iowrite32(buf->index, &rq->ctrl->posted_index);
}
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;
}
enum desc_return_options {
VNIC_RQ_RETURN_DESC,
VNIC_RQ_DEFER_RETURN_DESC,
};
static inline void vnic_rq_service(struct vnic_rq *rq,
struct cq_desc *cq_desc, u16 completed_index,
int desc_return, void (*buf_service)(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque), void *opaque)
{
struct vnic_rq_buf *buf;
int skipped;
buf = rq->to_clean;
while (1) {
skipped = (buf->index != completed_index);
(*buf_service)(rq, cq_desc, buf, skipped, opaque);
if (desc_return == VNIC_RQ_RETURN_DESC)
rq->ring.desc_avail++;
rq->to_clean = buf->next;
if (!skipped)
break;
buf = rq->to_clean;
}
}
static inline int vnic_rq_fill(struct vnic_rq *rq,
int (*buf_fill)(struct vnic_rq *rq))
{
int err;
while (vnic_rq_desc_avail(rq) > 1) {
err = (*buf_fill)(rq);
if (err)
return err;
}
return 0;
}
void vnic_rq_free(struct vnic_rq *rq);
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
unsigned int vnic_rq_error_status(struct vnic_rq *rq);
void vnic_rq_enable(struct vnic_rq *rq);
int vnic_rq_disable(struct vnic_rq *rq);
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
#endif /* _VNIC_RQ_H_ */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#ifndef _VNIC_RSS_H_
#define _VNIC_RSS_H_
/* RSS key array */
union vnic_rss_key {
struct {
u8 b[10];
u8 b_pad[6];
} key[4];
u64 raw[8];
};
/* RSS cpu array */
union vnic_rss_cpu {
struct {
u8 b[4] ;
u8 b_pad[4];
} cpu[32];
u64 raw[32];
};
void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
#endif /* _VNIC_RSS_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -1411,6 +1411,8 @@ ...@@ -1411,6 +1411,8 @@
#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 #define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 #define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
#define PCI_VENDOR_ID_CISCO 0x1137
#define PCI_VENDOR_ID_ZIATECH 0x1138 #define PCI_VENDOR_ID_ZIATECH 0x1138
#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment