Commit 547f2735 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-next'

Or Gerlitz says:

====================
Mellanox ethernet driver update Oct-30-2014

The 1st patch from Saeed fixes a bug in the last net-next batch where
a VF could get access to set port configuration, the next patch from Amir
fixes a race in the port VPI logic. Next are two performance patches from Ido.

The patch to add checksum complete status on GRE and such packets was
preceded with a patch that converted the driver to only use napi_gro_receive
vs. the current code which goes through napi_gro_frags on it's usual track.
Eric D. has some thoughts and suggestions on that change for which we
want to take the time and consider, so for the time being dropped that
patch and the ones that depend on it.

Changes from V0:
  - have the caller to provide the __GFP_COLD hint to the service function
  - dropped the patch that changes the GRO logic and the subsequent dependent
    patches.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d75b1ade d475c95b
......@@ -990,11 +990,11 @@ static struct mlx4_cmd_info cmd_info[] = {
{
.opcode = MLX4_CMD_CONFIG_DEV,
.has_inbox = false,
.has_outbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper
.wrapper = mlx4_CONFIG_DEV_wrapper
},
{
.opcode = MLX4_CMD_ALLOC_RES,
......@@ -1345,7 +1345,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL,
.wrapper = mlx4_ACCESS_REG_wrapper,
},
/* Native multicast commands are not available for guests */
{
......
......@@ -74,7 +74,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
page_alloc->page_offset = frag_info->frag_align;
page_alloc->page_offset = 0;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads. Note we can not use atomic_set().
*/
......@@ -156,7 +156,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
frag_info, GFP_KERNEL))
frag_info, GFP_KERNEL | __GFP_COLD))
goto out;
}
return 0;
......@@ -268,7 +268,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
GFP_KERNEL)) {
GFP_KERNEL | __GFP_COLD)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
......@@ -635,7 +635,8 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
int index = ring->prod & ring->size_mask;
while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
if (mlx4_en_prepare_rx_desc(priv, ring, index,
GFP_ATOMIC | __GFP_COLD))
break;
ring->prod++;
index = ring->prod & ring->size_mask;
......@@ -945,15 +946,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
if (!i) {
priv->frag_info[i].frag_align = NET_IP_ALIGN;
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
} else {
priv->frag_info[i].frag_align = 0;
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
}
priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
SMP_CACHE_BYTES);
buf_size += priv->frag_info[i].frag_size;
i++;
}
......@@ -966,11 +960,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
" frag:%d - size:%d prefix:%d align:%d stride:%d\n",
" frag:%d - size:%d prefix:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size,
priv->frag_info[i].frag_align,
priv->frag_info[i].frag_stride);
}
}
......
......@@ -141,7 +141,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[12] = "Large cache line (>64B) CQE stride support",
[13] = "Large cache line (>64B) EQE stride support",
[14] = "Ethernet protocol control support",
[15] = "Ethernet Backplane autoneg support"
[15] = "Ethernet Backplane autoneg support",
[16] = "CONFIG DEV support"
};
int i;
......@@ -574,6 +575,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
......@@ -749,6 +751,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
if (field & 0x20)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
......@@ -1849,14 +1854,18 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
struct mlx4_config_dev {
__be32 update_flags;
__be32 rsdv1[3];
__be32 rsvd1[3];
__be16 vxlan_udp_dport;
__be16 rsvd2;
__be32 rsvd3[27];
__be16 rsvd4;
u8 rsvd5;
u8 rx_checksum_val;
};
#define MLX4_VXLAN_UDP_DPORT (1 << 0)
static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
{
int err;
struct mlx4_cmd_mailbox *mailbox;
......@@ -1874,6 +1883,77 @@ static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_
return err;
}
static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
{
int err;
struct mlx4_cmd_mailbox *mailbox;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (!err)
memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
/* Conversion between the HW values and the actual functionality.
* The value represented by the array index,
* and the functionality determined by the flags.
*/
static const u8 config_dev_csum_flags[] = {
[0] = 0,
[1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
[2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
MLX4_RX_CSUM_MODE_L4,
[3] = MLX4_RX_CSUM_MODE_L4 |
MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
MLX4_RX_CSUM_MODE_MULTI_VLAN
};
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
struct mlx4_config_dev_params *params)
{
struct mlx4_config_dev config_dev;
int err;
u8 csum_mask;
#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
return -ENOTSUPP;
err = mlx4_CONFIG_DEV_get(dev, &config_dev);
if (err)
return err;
csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
CONFIG_DEV_RX_CSUM_MODE_MASK;
if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
return -EINVAL;
params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
CONFIG_DEV_RX_CSUM_MODE_MASK;
if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
return -EINVAL;
params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
{
struct mlx4_config_dev config_dev;
......@@ -1882,7 +1962,7 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
config_dev.vxlan_udp_dport = udp_port;
return mlx4_CONFIG_DEV(dev, &config_dev);
return mlx4_CONFIG_DEV_set(dev, &config_dev);
}
EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
......@@ -2220,7 +2300,7 @@ static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
memcpy(inbuf->reg_data, reg_data, reg_len);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
MLX4_CMD_WRAPPED);
if (err)
goto out;
......@@ -2263,3 +2343,31 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
method, sizeof(*ptys_reg), ptys_reg);
}
EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_access_reg *inbuf = inbox->buf;
u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
u16 reg_id = be16_to_cpu(inbuf->reg_id);
if (slave != mlx4_master_func_num(dev) &&
method == MLX4_ACCESS_REG_WRITE)
return -EPERM;
if (reg_id == MLX4_REG_ID_PTYS) {
struct mlx4_ptys_reg *ptys_reg =
(struct mlx4_ptys_reg *)inbuf->reg_data;
ptys_reg->local_port =
mlx4_slave_convert_port(dev, slave,
ptys_reg->local_port);
}
return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
......@@ -901,9 +901,12 @@ static ssize_t set_port_type(struct device *dev,
struct mlx4_priv *priv = mlx4_priv(mdev);
enum mlx4_port_type types[MLX4_MAX_PORTS];
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
static DEFINE_MUTEX(set_port_type_mutex);
int i;
int err = 0;
mutex_lock(&set_port_type_mutex);
if (!strcmp(buf, "ib\n"))
info->tmp_type = MLX4_PORT_TYPE_IB;
else if (!strcmp(buf, "eth\n"))
......@@ -912,7 +915,8 @@ static ssize_t set_port_type(struct device *dev,
info->tmp_type = MLX4_PORT_TYPE_AUTO;
else {
mlx4_err(mdev, "%s is not supported port type\n", buf);
return -EINVAL;
err = -EINVAL;
goto err_out;
}
mlx4_stop_sense(mdev);
......@@ -958,6 +962,9 @@ static ssize_t set_port_type(struct device *dev,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
err_out:
mutex_unlock(&set_port_type_mutex);
return err ? err : count;
}
......
......@@ -947,6 +947,11 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
......@@ -1273,6 +1278,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
......
......@@ -481,7 +481,6 @@ struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
u16 frag_stride;
u16 frag_align;
};
#ifdef CONFIG_MLX4_EN_DCB
......
......@@ -2872,6 +2872,23 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
u8 get = vhcr->op_modifier;
if (get != 1)
return -EPERM;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
return err;
}
static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
int len, struct res_mtt **res)
{
......
......@@ -199,6 +199,33 @@ enum {
MLX4_CMD_NATIVE
};
/*
* MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
* Receive checksum value is reported in CQE also for non TCP/UDP packets.
*
* MLX4_RX_CSUM_MODE_L4 -
* L4_CSUM bit in CQE, which indicates whether or not L4 checksum
* was validated correctly, is supported.
*
* MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
* IP_OK CQE's field is supported also for non TCP/UDP IP packets.
*
* MLX4_RX_CSUM_MODE_MULTI_VLAN -
* Receive Checksum offload is supported for packets with more than 2 vlan headers.
*/
enum mlx4_rx_csum_mode {
MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0,
MLX4_RX_CSUM_MODE_L4 = 1UL << 1,
MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2,
MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3
};
struct mlx4_config_dev_params {
u16 vxlan_udp_dport;
u8 rx_csum_flags_port_1;
u8 rx_csum_flags_port_2;
};
struct mlx4_dev;
struct mlx4_cmd_mailbox {
......@@ -250,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
struct mlx4_config_dev_params *params);
/*
* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
......
......@@ -188,7 +188,8 @@ enum {
MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15
MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16
};
enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment