Commit 74f9d556 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
virtchnl: fix fake 1-elem arrays

Alexander Lobakin says:

6.5-rc1 started spitting warning splats when composing virtchnl
messages, precisely on virtchnl_rss_key and virtchnl_lut:

[   84.167709] memcpy: detected field-spanning write (size 52) of single
field "vrk->key" at drivers/net/ethernet/intel/iavf/iavf_virtchnl.c:1095
(size 1)
[   84.169915] WARNING: CPU: 3 PID: 11 at drivers/net/ethernet/intel/
iavf/iavf_virtchnl.c:1095 iavf_set_rss_key+0x123/0x140 [iavf]
...
[   84.191982] Call Trace:
[   84.192439]  <TASK>
[   84.192900]  ? __warn+0xc9/0x1a0
[   84.193353]  ? iavf_set_rss_key+0x123/0x140 [iavf]
[   84.193818]  ? report_bug+0x12c/0x1b0
[   84.194266]  ? handle_bug+0x42/0x70
[   84.194714]  ? exc_invalid_op+0x1a/0x50
[   84.195149]  ? asm_exc_invalid_op+0x1a/0x20
[   84.195592]  ? iavf_set_rss_key+0x123/0x140 [iavf]
[   84.196033]  iavf_watchdog_task+0xb0c/0xe00 [iavf]
...
[   84.225476] memcpy: detected field-spanning write (size 64) of single
field "vrl->lut" at drivers/net/ethernet/intel/iavf/iavf_virtchnl.c:1127
(size 1)
[   84.227190] WARNING: CPU: 27 PID: 1044 at drivers/net/ethernet/intel/
iavf/iavf_virtchnl.c:1127 iavf_set_rss_lut+0x123/0x140 [iavf]
...
[   84.246601] Call Trace:
[   84.247228]  <TASK>
[   84.247840]  ? __warn+0xc9/0x1a0
[   84.248263]  ? iavf_set_rss_lut+0x123/0x140 [iavf]
[   84.248698]  ? report_bug+0x12c/0x1b0
[   84.249122]  ? handle_bug+0x42/0x70
[   84.249549]  ? exc_invalid_op+0x1a/0x50
[   84.249970]  ? asm_exc_invalid_op+0x1a/0x20
[   84.250390]  ? iavf_set_rss_lut+0x123/0x140 [iavf]
[   84.250820]  iavf_watchdog_task+0xb16/0xe00 [iavf]

Gustavo already tried to fix those back in 2021[0][1]. Unfortunately,
a VM can run a different kernel than the host, meaning that those
structures are sorta ABI.
However, it is possible to have proper flex arrays + struct_size()
calculations and still send the very same messages with the same sizes.
The common rule is:

elem[1] -> elem[]
size = struct_size() + <difference between the old and the new msg size>

The "old" size in the current code is calculated 3 different ways for
10 virtchnl structures total. Each commit addresses one of the ways
cumulatively instead of per-structure.

I was planning to send it to -net initially, but given that virtchnl was
renamed from i40evf and got some fat style cleanup commits in the past,
it's not very straightforward to even pick appropriate SHAs, not
speaking of automatic portability. I may send manual backports for
a couple of the latest supported kernels later on if anyone needs it
at all.

[0] https://lore.kernel.org/all/20210525230912.GA175802@embeddedor
[1] https://lore.kernel.org/all/20210525231851.GA176647@embeddedor

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  virtchnl: fix fake 1-elem arrays for structures allocated as `nents`
  virtchnl: fix fake 1-elem arrays in structures allocated as `nents + 1`
  virtchnl: fix fake 1-elem arrays in structs allocated as `nents + 1` - 1
====================

Link: https://lore.kernel.org/r/20230816210657.1326772-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 04f28408 b0654e64
......@@ -506,6 +506,7 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
struct virtchnl_rdma_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
size_t size;
u32 msix_vf;
int ret = 0;
......@@ -521,9 +522,9 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
}
kfree(vf->qvlist_info);
vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
qvlist_info->num_vectors - 1),
GFP_KERNEL);
size = virtchnl_struct_size(vf->qvlist_info, qv_info,
qvlist_info->num_vectors);
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
if (!vf->qvlist_info) {
ret = -ENOMEM;
goto err_out;
......@@ -2103,7 +2104,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
goto err;
}
len = struct_size(vfres, vsi_res, num_vsis);
len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
aq_ret = -ENOMEM;
......
......@@ -92,9 +92,9 @@ struct iavf_vsi {
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \
sizeof(struct virtchnl_vsi_resource)))
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE \
virtchnl_struct_size((struct virtchnl_vf_resource *)NULL, \
vsi_res, IAVF_MAX_VF_VSI)
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
......
......@@ -469,8 +469,8 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
}
v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info;
msg_size = struct_size(v_qvlist_info, qv_info,
v_qvlist_info->num_vectors - 1);
msg_size = virtchnl_struct_size(v_qvlist_info, qv_info,
v_qvlist_info->num_vectors);
adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
......
......@@ -53,7 +53,7 @@ struct iavf_qv_info {
struct iavf_qvlist_info {
u32 num_vectors;
struct iavf_qv_info qv_info[1];
struct iavf_qv_info qv_info[];
};
#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF
......
......@@ -215,8 +215,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
u16 len;
int err;
len = sizeof(struct virtchnl_vf_resource) +
IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
event.buf_len = len;
event.msg_buf = kzalloc(len, GFP_KERNEL);
if (!event.msg_buf)
......@@ -284,7 +283,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
return;
}
adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = struct_size(vqci, qpair, pairs);
len = virtchnl_struct_size(vqci, qpair, pairs);
vqci = kzalloc(len, GFP_KERNEL);
if (!vqci)
return;
......@@ -397,7 +396,7 @@ void iavf_map_queues(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
vimi = kzalloc(len, GFP_KERNEL);
if (!vimi)
return;
......@@ -476,13 +475,11 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
len = struct_size(veal, list, count);
len = virtchnl_struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
len = struct_size(veal, list, count);
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(veal, list, --count);
more = true;
}
......@@ -547,13 +544,11 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
len = struct_size(veal, list, count);
len = virtchnl_struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
len = struct_size(veal, list, count);
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(veal, list, --count);
more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
......@@ -687,12 +682,12 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
len = sizeof(*vvfl) + (count * sizeof(u16));
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
sizeof(u16);
len = sizeof(*vvfl) + (count * sizeof(u16));
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
......@@ -732,15 +727,12 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
more = true;
}
len = sizeof(*vvfl_v2) + ((count - 1) *
sizeof(struct virtchnl_vlan_filter));
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) /
sizeof(struct virtchnl_vlan_filter);
len = sizeof(*vvfl_v2) +
((count - 1) *
sizeof(struct virtchnl_vlan_filter));
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
more = true;
}
......@@ -838,12 +830,12 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
len = sizeof(*vvfl) + (count * sizeof(u16));
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
sizeof(u16);
len = sizeof(*vvfl) + (count * sizeof(u16));
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
......@@ -884,16 +876,12 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
len = sizeof(*vvfl_v2) +
((count - 1) * sizeof(struct virtchnl_vlan_filter));
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(*vvfl_v2)) /
sizeof(struct virtchnl_vlan_filter);
len = sizeof(*vvfl_v2) +
((count - 1) *
sizeof(struct virtchnl_vlan_filter));
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
more = true;
}
......@@ -1085,8 +1073,7 @@ void iavf_set_rss_key(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
len = sizeof(struct virtchnl_rss_key) +
(adapter->rss_key_size * sizeof(u8)) - 1;
len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
vrk = kzalloc(len, GFP_KERNEL);
if (!vrk)
return;
......@@ -1117,8 +1104,7 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
len = sizeof(struct virtchnl_rss_lut) +
(adapter->rss_lut_size * sizeof(u8)) - 1;
len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
vrl = kzalloc(len, GFP_KERNEL);
if (!vrl)
return;
......@@ -1499,7 +1485,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
return;
}
len = struct_size(vti, list, adapter->num_tc - 1);
len = virtchnl_struct_size(vti, list, adapter->num_tc);
vti = kzalloc(len, GFP_KERNEL);
if (!vti)
return;
......@@ -2175,9 +2161,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
break;
case VIRTCHNL_OP_GET_VF_RESOURCES: {
u16 len = sizeof(struct virtchnl_vf_resource) +
IAVF_MAX_VF_VSI *
sizeof(struct virtchnl_vsi_resource);
u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
memcpy(adapter->vf_res, msg, min(msglen, len));
iavf_validate_num_queues(adapter);
iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
......
......@@ -428,7 +428,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
goto err;
}
len = sizeof(struct virtchnl_vf_resource);
len = virtchnl_struct_size(vfres, vsi_res, 0);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment