Commit b7187139 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'hardening-v6.9-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull more hardening updates from Kees Cook:

 - CONFIG_MEMCPY_SLOW_KUNIT_TEST is no longer needed (Guenter Roeck)

 - Fix needless UTF-8 character in arch/Kconfig (Liu Song)

 - Improve __counted_by warning message in LKDTM (Nathan Chancellor)

 - Refactor DEFINE_FLEX() for default use of __counted_by

 - Disable signed integer overflow sanitizer on GCC < 8

* tag 'hardening-v6.9-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  lkdtm/bugs: Improve warning message for compilers without counted_by support
  overflow: Change DEFINE_FLEX to take __counted_by member
  Revert "kunit: memcpy: Split slow memcpy tests into MEMCPY_SLOW_KUNIT_TEST"
  arch/Kconfig: eliminate needless UTF-8 character in Kconfig help
  ubsan: Disable signed integer overflow sanitizer on GCC < 8
parents bfa8f186 231dc3f0
...@@ -799,7 +799,7 @@ config CFI_CLANG ...@@ -799,7 +799,7 @@ config CFI_CLANG
depends on ARCH_SUPPORTS_CFI_CLANG depends on ARCH_SUPPORTS_CFI_CLANG
depends on $(cc-option,-fsanitize=kcfi) depends on $(cc-option,-fsanitize=kcfi)
help help
This option enables Clangs forward-edge Control Flow Integrity This option enables Clang's forward-edge Control Flow Integrity
(CFI) checking, where the compiler injects a runtime check to each (CFI) checking, where the compiler injects a runtime check to each
indirect function call to ensure the target is a valid function with indirect function call to ensure the target is a valid function with
the correct static type. This restricts possible call targets and the correct static type. This restricts possible call targets and
......
...@@ -417,7 +417,7 @@ static void lkdtm_FAM_BOUNDS(void) ...@@ -417,7 +417,7 @@ static void lkdtm_FAM_BOUNDS(void)
pr_err("FAIL: survived access of invalid flexible array member index!\n"); pr_err("FAIL: survived access of invalid flexible array member index!\n");
if (!__has_attribute(__counted_by__)) if (!__has_attribute(__counted_by__))
pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n", pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
lkdtm_kernel_info); lkdtm_kernel_info);
else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
pr_expected_config(CONFIG_UBSAN_TRAP); pr_expected_config(CONFIG_UBSAN_TRAP);
......
...@@ -956,7 +956,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ...@@ -956,7 +956,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
u16 q_idx) u16 q_idx)
{ {
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL; return -EINVAL;
...@@ -978,7 +978,7 @@ int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, ...@@ -978,7 +978,7 @@ int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
static int static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{ {
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0; int err = 0;
u16 q_idx; u16 q_idx;
......
...@@ -4695,7 +4695,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, ...@@ -4695,7 +4695,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list); u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx; struct ice_q_ctx *q_ctx;
int status = -ENOENT; int status = -ENOENT;
...@@ -4917,7 +4917,7 @@ int ...@@ -4917,7 +4917,7 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id) u16 *q_id)
{ {
DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list); u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw; struct ice_hw *hw;
int status = 0; int status = 0;
......
...@@ -1938,7 +1938,7 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, ...@@ -1938,7 +1938,7 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/ */
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{ {
DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info, DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
ICE_PKG_CNT); ICE_PKG_CNT);
u16 size = __struct_size(pkg_info); u16 size = __struct_size(pkg_info);
u32 i; u32 i;
...@@ -1990,7 +1990,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, ...@@ -1990,7 +1990,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg, struct ice_pkg_hdr *ospkg,
struct ice_seg **seg) struct ice_seg **seg)
{ {
DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info, DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
ICE_PKG_CNT); ICE_PKG_CNT);
u16 size = __struct_size(pkg); u16 size = __struct_size(pkg);
enum ice_ddp_state state; enum ice_ddp_state state;
......
...@@ -491,7 +491,7 @@ static void ...@@ -491,7 +491,7 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport, ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc) u16 vsi_num, u8 tc)
{ {
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf); struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size; u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf); u16 buf_size = __struct_size(buf);
...@@ -849,7 +849,7 @@ static void ...@@ -849,7 +849,7 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num, ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc) u8 tc)
{ {
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf); struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size; u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf); u16 buf_size = __struct_size(buf);
...@@ -1873,7 +1873,7 @@ static void ...@@ -1873,7 +1873,7 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw, ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc) u16 vsi_num, u8 tc)
{ {
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf); struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size; u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf); u16 buf_size = __struct_size(buf);
......
...@@ -237,7 +237,7 @@ static int ...@@ -237,7 +237,7 @@ static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u32 node_teid) u32 node_teid)
{ {
DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1); DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
u16 buf_size = __struct_size(buf); u16 buf_size = __struct_size(buf);
u16 num_groups_removed = 0; u16 num_groups_removed = 0;
int status; int status;
...@@ -2219,7 +2219,7 @@ int ...@@ -2219,7 +2219,7 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list) u16 num_items, u32 *list)
{ {
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf); u16 buf_len = __struct_size(buf);
struct ice_sched_node *node; struct ice_sched_node *node;
u16 i, grps_movd = 0; u16 i, grps_movd = 0;
......
...@@ -1812,7 +1812,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, ...@@ -1812,7 +1812,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type, enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc) enum ice_adminq_opc opc)
{ {
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf); u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele; struct ice_aqc_res_elem *vsi_ele;
int status; int status;
...@@ -2081,7 +2081,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, ...@@ -2081,7 +2081,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
*/ */
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{ {
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf); u16 buf_len = __struct_size(sw_buf);
int status; int status;
...@@ -4418,7 +4418,7 @@ int ...@@ -4418,7 +4418,7 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id) u16 *counter_id)
{ {
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf); u16 buf_len = __struct_size(buf);
int status; int status;
...@@ -4446,7 +4446,7 @@ int ...@@ -4446,7 +4446,7 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id) u16 counter_id)
{ {
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf); u16 buf_len = __struct_size(buf);
int status; int status;
...@@ -4476,7 +4476,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, ...@@ -4476,7 +4476,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/ */
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{ {
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf); u16 buf_len = __struct_size(buf);
u16 res_type; u16 res_type;
int status; int status;
......
...@@ -398,7 +398,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) ...@@ -398,7 +398,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* @count: Number of elements in the array; must be compile-time const. * @count: Number of elements in the array; must be compile-time const.
* @initializer: initializer expression (could be empty for no init). * @initializer: initializer expression (could be empty for no init).
*/ */
#define _DEFINE_FLEX(type, name, member, count, initializer) \ #define _DEFINE_FLEX(type, name, member, count, initializer...) \
_Static_assert(__builtin_constant_p(count), \ _Static_assert(__builtin_constant_p(count), \
"onstack flex array members require compile-time const count"); \ "onstack flex array members require compile-time const count"); \
union { \ union { \
...@@ -408,8 +408,8 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) ...@@ -408,8 +408,8 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
type *name = (type *)&name##_u type *name = (type *)&name##_u
/** /**
* DEFINE_FLEX() - Define an on-stack instance of structure with a trailing * DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing
* flexible array member. * flexible array member, when it does not have a __counted_by annotation.
* *
* @type: structure type name, including "struct" keyword. * @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define. * @name: Name for a variable to define.
...@@ -420,7 +420,24 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) ...@@ -420,7 +420,24 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* flexible array member. * flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards. * Use __struct_size(@name) to get compile-time size of it afterwards.
*/ */
#define DEFINE_FLEX(type, name, member, count) \ #define DEFINE_RAW_FLEX(type, name, member, count) \
_DEFINE_FLEX(type, name, member, count, = {}) _DEFINE_FLEX(type, name, member, count, = {})
/**
* DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
* flexible array member.
*
* @TYPE: structure type name, including "struct" keyword.
* @NAME: Name for a variable to define.
* @MEMBER: Name of the array member.
* @COUNTER: Name of the __counted_by member.
* @COUNT: Number of elements in the array; must be compile-time const.
*
* Define a zeroed, on-stack, instance of @TYPE structure with a trailing
* flexible array member.
* Use __struct_size(@NAME) to get compile-time size of it afterwards.
*/
#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
_DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .obj.COUNTER = COUNT, })
#endif /* __LINUX_OVERFLOW_H */ #endif /* __LINUX_OVERFLOW_H */
...@@ -2703,18 +2703,6 @@ config MEMCPY_KUNIT_TEST ...@@ -2703,18 +2703,6 @@ config MEMCPY_KUNIT_TEST
If unsure, say N. If unsure, say N.
config MEMCPY_SLOW_KUNIT_TEST
bool "Include exhaustive memcpy tests"
depends on MEMCPY_KUNIT_TEST
default y
help
Some memcpy tests are quite exhaustive in checking for overlaps
and bit ranges. These can be very slow, so they are split out
as a separate config, in case they need to be disabled.
Note this config option will be replaced by the use of KUnit test
attributes.
config IS_SIGNED_TYPE_KUNIT_TEST config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT depends on KUNIT
......
...@@ -119,6 +119,8 @@ config UBSAN_SIGNED_WRAP ...@@ -119,6 +119,8 @@ config UBSAN_SIGNED_WRAP
bool "Perform checking for signed arithmetic wrap-around" bool "Perform checking for signed arithmetic wrap-around"
default UBSAN default UBSAN
depends on !COMPILE_TEST depends on !COMPILE_TEST
# The no_sanitize attribute was introduced in GCC with version 8.
depends on !CC_IS_GCC || GCC_VERSION >= 80000
depends on $(cc-option,-fsanitize=signed-integer-overflow) depends on $(cc-option,-fsanitize=signed-integer-overflow)
help help
This option enables -fsanitize=signed-integer-overflow which checks This option enables -fsanitize=signed-integer-overflow which checks
......
...@@ -309,9 +309,6 @@ static void set_random_nonzero(struct kunit *test, u8 *byte) ...@@ -309,9 +309,6 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
static void init_large(struct kunit *test) static void init_large(struct kunit *test)
{ {
if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
/* Get many bit patterns. */ /* Get many bit patterns. */
get_random_bytes(large_src, ARRAY_SIZE(large_src)); get_random_bytes(large_src, ARRAY_SIZE(large_src));
......
...@@ -1172,6 +1172,24 @@ static void castable_to_type_test(struct kunit *test) ...@@ -1172,6 +1172,24 @@ static void castable_to_type_test(struct kunit *test)
#undef TEST_CASTABLE_TO_TYPE #undef TEST_CASTABLE_TO_TYPE
} }
struct foo {
int a;
u32 counter;
s16 array[] __counted_by(counter);
};
static void DEFINE_FLEX_test(struct kunit *test)
{
DEFINE_RAW_FLEX(struct foo, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
KUNIT_EXPECT_EQ(test, __struct_size(two),
sizeof(struct foo) + sizeof(s16) + sizeof(s16));
KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
}
static struct kunit_case overflow_test_cases[] = { static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(u8_u8__u8_overflow_test), KUNIT_CASE(u8_u8__u8_overflow_test),
KUNIT_CASE(s8_s8__s8_overflow_test), KUNIT_CASE(s8_s8__s8_overflow_test),
...@@ -1194,6 +1212,7 @@ static struct kunit_case overflow_test_cases[] = { ...@@ -1194,6 +1212,7 @@ static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(overflows_type_test), KUNIT_CASE(overflows_type_test),
KUNIT_CASE(same_type_test), KUNIT_CASE(same_type_test),
KUNIT_CASE(castable_to_type_test), KUNIT_CASE(castable_to_type_test),
KUNIT_CASE(DEFINE_FLEX_test),
{} {}
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment