Commit e7157f28 authored by David S. Miller's avatar David S. Miller

Merge branch 'nla_align-set-2'

Nicolas Dichtel says:

====================
netlink: align attributes when needed (patchset #2)

This is the continuation (series #2) of the work done to align netlink
attributes when these attributes contain some 64-bit fields.

In patch #3, I didn't modify the function ila_encap_nlsize(). I was waiting
feedback for this patch: http://patchwork.ozlabs.org/patch/613766/
If it's approved, there will be an update to switch nla_total_size() to
nla_total_size_64bit() after the merge of net in net-next.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d296ba60 2dad624e
...@@ -92,6 +92,8 @@ enum { ...@@ -92,6 +92,8 @@ enum {
IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
IEEE802154_ATTR_PAD,
__IEEE802154_ATTR_MAX, __IEEE802154_ATTR_MAX,
}; };
......
...@@ -271,6 +271,7 @@ enum { ...@@ -271,6 +271,7 @@ enum {
IFLA_BR_NF_CALL_IP6TABLES, IFLA_BR_NF_CALL_IP6TABLES,
IFLA_BR_NF_CALL_ARPTABLES, IFLA_BR_NF_CALL_ARPTABLES,
IFLA_BR_VLAN_DEFAULT_PVID, IFLA_BR_VLAN_DEFAULT_PVID,
IFLA_BR_PAD,
__IFLA_BR_MAX, __IFLA_BR_MAX,
}; };
...@@ -313,6 +314,7 @@ enum { ...@@ -313,6 +314,7 @@ enum {
IFLA_BRPORT_HOLD_TIMER, IFLA_BRPORT_HOLD_TIMER,
IFLA_BRPORT_FLUSH, IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER, IFLA_BRPORT_MULTICAST_ROUTER,
IFLA_BRPORT_PAD,
__IFLA_BRPORT_MAX __IFLA_BRPORT_MAX
}; };
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
...@@ -666,6 +668,7 @@ enum { ...@@ -666,6 +668,7 @@ enum {
IFLA_VF_STATS_TX_BYTES, IFLA_VF_STATS_TX_BYTES,
IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST, IFLA_VF_STATS_MULTICAST,
IFLA_VF_STATS_PAD,
__IFLA_VF_STATS_MAX, __IFLA_VF_STATS_MAX,
}; };
......
...@@ -14,6 +14,7 @@ enum { ...@@ -14,6 +14,7 @@ enum {
ILA_ATTR_LOCATOR_MATCH, /* u64 */ ILA_ATTR_LOCATOR_MATCH, /* u64 */
ILA_ATTR_IFINDEX, /* s32 */ ILA_ATTR_IFINDEX, /* s32 */
ILA_ATTR_DIR, /* u32 */ ILA_ATTR_DIR, /* u32 */
ILA_ATTR_PAD,
__ILA_ATTR_MAX, __ILA_ATTR_MAX,
}; };
......
...@@ -435,6 +435,7 @@ enum { ...@@ -435,6 +435,7 @@ enum {
IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */ IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
IPVS_STATS_ATTR_INBPS, /* current in byte rate */ IPVS_STATS_ATTR_INBPS, /* current in byte rate */
IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */ IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
IPVS_STATS_ATTR_PAD,
__IPVS_STATS_ATTR_MAX, __IPVS_STATS_ATTR_MAX,
}; };
......
...@@ -143,6 +143,7 @@ enum { ...@@ -143,6 +143,7 @@ enum {
L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */ L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */
L2TP_ATTR_RX_OOS_PACKETS, /* u64 */ L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
L2TP_ATTR_RX_ERRORS, /* u64 */ L2TP_ATTR_RX_ERRORS, /* u64 */
L2TP_ATTR_STATS_PAD,
__L2TP_ATTR_STATS_MAX, __L2TP_ATTR_STATS_MAX,
}; };
......
...@@ -2197,6 +2197,8 @@ enum nl80211_attrs { ...@@ -2197,6 +2197,8 @@ enum nl80211_attrs {
NL80211_ATTR_STA_SUPPORT_P2P_PS, NL80211_ATTR_STA_SUPPORT_P2P_PS,
NL80211_ATTR_PAD,
/* add attributes here, update the policy in nl80211.c */ /* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST, __NL80211_ATTR_AFTER_LAST,
...@@ -3023,6 +3025,7 @@ enum nl80211_survey_info { ...@@ -3023,6 +3025,7 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_TIME_RX, NL80211_SURVEY_INFO_TIME_RX,
NL80211_SURVEY_INFO_TIME_TX, NL80211_SURVEY_INFO_TIME_TX,
NL80211_SURVEY_INFO_TIME_SCAN, NL80211_SURVEY_INFO_TIME_SCAN,
NL80211_SURVEY_INFO_PAD,
/* keep last */ /* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST, __NL80211_SURVEY_INFO_AFTER_LAST,
...@@ -3468,6 +3471,7 @@ enum nl80211_bss { ...@@ -3468,6 +3471,7 @@ enum nl80211_bss {
NL80211_BSS_BEACON_TSF, NL80211_BSS_BEACON_TSF,
NL80211_BSS_PRESP_DATA, NL80211_BSS_PRESP_DATA,
NL80211_BSS_LAST_SEEN_BOOTTIME, NL80211_BSS_LAST_SEEN_BOOTTIME,
NL80211_BSS_PAD,
/* keep last */ /* keep last */
__NL80211_BSS_AFTER_LAST, __NL80211_BSS_AFTER_LAST,
......
...@@ -519,6 +519,7 @@ enum ovs_flow_attr { ...@@ -519,6 +519,7 @@ enum ovs_flow_attr {
* logging should be suppressed. */ * logging should be suppressed. */
OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */ OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */
OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */ OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
OVS_FLOW_ATTR_PAD,
__OVS_FLOW_ATTR_MAX __OVS_FLOW_ATTR_MAX
}; };
......
...@@ -179,6 +179,7 @@ enum { ...@@ -179,6 +179,7 @@ enum {
TCA_TBF_PRATE64, TCA_TBF_PRATE64,
TCA_TBF_BURST, TCA_TBF_BURST,
TCA_TBF_PBURST, TCA_TBF_PBURST,
TCA_TBF_PAD,
__TCA_TBF_MAX, __TCA_TBF_MAX,
}; };
...@@ -368,6 +369,7 @@ enum { ...@@ -368,6 +369,7 @@ enum {
TCA_HTB_DIRECT_QLEN, TCA_HTB_DIRECT_QLEN,
TCA_HTB_RATE64, TCA_HTB_RATE64,
TCA_HTB_CEIL64, TCA_HTB_CEIL64,
TCA_HTB_PAD,
__TCA_HTB_MAX, __TCA_HTB_MAX,
}; };
...@@ -531,6 +533,7 @@ enum { ...@@ -531,6 +533,7 @@ enum {
TCA_NETEM_RATE, TCA_NETEM_RATE,
TCA_NETEM_ECN, TCA_NETEM_ECN,
TCA_NETEM_RATE64, TCA_NETEM_RATE64,
TCA_NETEM_PAD,
__TCA_NETEM_MAX, __TCA_NETEM_MAX,
}; };
......
...@@ -135,9 +135,9 @@ static inline size_t br_port_info_size(void) ...@@ -135,9 +135,9 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
+ nla_total_size(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+ nla_total_size(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+ nla_total_size(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
#endif #endif
...@@ -190,13 +190,16 @@ static int br_port_fill_attrs(struct sk_buff *skb, ...@@ -190,13 +190,16 @@ static int br_port_fill_attrs(struct sk_buff *skb,
return -EMSGSIZE; return -EMSGSIZE;
timerval = br_timer_value(&p->message_age_timer); timerval = br_timer_value(&p->message_age_timer);
if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval)) if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE; return -EMSGSIZE;
timerval = br_timer_value(&p->forward_delay_timer); timerval = br_timer_value(&p->forward_delay_timer);
if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval)) if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE; return -EMSGSIZE;
timerval = br_timer_value(&p->hold_timer); timerval = br_timer_value(&p->hold_timer);
if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval)) if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE; return -EMSGSIZE;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
...@@ -1087,10 +1090,10 @@ static size_t br_get_size(const struct net_device *brdev) ...@@ -1087,10 +1090,10 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
nla_total_size(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
nla_total_size(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
nla_total_size(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
nla_total_size(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
...@@ -1101,12 +1104,12 @@ static size_t br_get_size(const struct net_device *brdev) ...@@ -1101,12 +1104,12 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
#endif #endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
...@@ -1129,16 +1132,17 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) ...@@ -1129,16 +1132,17 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u64 clockval; u64 clockval;
clockval = br_timer_value(&br->hello_timer); clockval = br_timer_value(&br->hello_timer);
if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = br_timer_value(&br->tcn_timer); clockval = br_timer_value(&br->tcn_timer);
if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = br_timer_value(&br->topology_change_timer); clockval = br_timer_value(&br->topology_change_timer);
if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = br_timer_value(&br->gc_timer); clockval = br_timer_value(&br->gc_timer);
if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
...@@ -1182,22 +1186,28 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) ...@@ -1182,22 +1186,28 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
return -EMSGSIZE; return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_last_member_interval); clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_membership_interval); clockval = jiffies_to_clock_t(br->multicast_membership_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_querier_interval); clockval = jiffies_to_clock_t(br->multicast_querier_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_query_interval); clockval = jiffies_to_clock_t(br->multicast_query_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_query_response_interval); clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval)) if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
#endif #endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
......
...@@ -825,17 +825,17 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, ...@@ -825,17 +825,17 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
nla_total_size(sizeof(struct ifla_vf_link_state)) + nla_total_size(sizeof(struct ifla_vf_link_state)) +
nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
/* IFLA_VF_STATS_RX_PACKETS */ /* IFLA_VF_STATS_RX_PACKETS */
nla_total_size(sizeof(__u64)) + nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_PACKETS */ /* IFLA_VF_STATS_TX_PACKETS */
nla_total_size(sizeof(__u64)) + nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_RX_BYTES */ /* IFLA_VF_STATS_RX_BYTES */
nla_total_size(sizeof(__u64)) + nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_BYTES */ /* IFLA_VF_STATS_TX_BYTES */
nla_total_size(sizeof(__u64)) + nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_BROADCAST */ /* IFLA_VF_STATS_BROADCAST */
nla_total_size(sizeof(__u64)) + nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_MULTICAST */ /* IFLA_VF_STATS_MULTICAST */
nla_total_size(sizeof(__u64)) + nla_total_size_64bit(sizeof(__u64)) +
nla_total_size(sizeof(struct ifla_vf_trust))); nla_total_size(sizeof(struct ifla_vf_trust)));
return size; return size;
} else } else
...@@ -1153,18 +1153,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, ...@@ -1153,18 +1153,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_nest_cancel(skb, vfinfo); nla_nest_cancel(skb, vfinfo);
return -EMSGSIZE; return -EMSGSIZE;
} }
if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS, if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
vf_stats.rx_packets) || vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS, nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
vf_stats.tx_packets) || vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES, nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
vf_stats.rx_bytes) || vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES, nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
vf_stats.tx_bytes) || vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
nla_put_u64(skb, IFLA_VF_STATS_BROADCAST, nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast) || vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
nla_put_u64(skb, IFLA_VF_STATS_MULTICAST, nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
vf_stats.multicast)) vf_stats.multicast, IFLA_VF_STATS_PAD))
return -EMSGSIZE; return -EMSGSIZE;
nla_nest_end(skb, vfstats); nla_nest_end(skb, vfstats);
nla_nest_end(skb, vf); nla_nest_end(skb, vf);
......
...@@ -34,9 +34,11 @@ ...@@ -34,9 +34,11 @@
#include "ieee802154.h" #include "ieee802154.h"
static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr) static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr,
int padattr)
{ {
return nla_put_u64(msg, type, swab64((__force u64)hwaddr)); return nla_put_u64_64bit(msg, type, swab64((__force u64)hwaddr),
padattr);
} }
static __le64 nla_get_hwaddr(const struct nlattr *nla) static __le64 nla_get_hwaddr(const struct nlattr *nla)
...@@ -623,7 +625,8 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg, ...@@ -623,7 +625,8 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg,
if (desc->device_addr.mode == IEEE802154_ADDR_LONG && if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
desc->device_addr.extended_addr)) desc->device_addr.extended_addr,
IEEE802154_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
} }
...@@ -638,7 +641,7 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg, ...@@ -638,7 +641,7 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg,
if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX && if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED, nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
desc->extended_source)) desc->extended_source, IEEE802154_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
return 0; return 0;
...@@ -1063,7 +1066,8 @@ ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq, ...@@ -1063,7 +1066,8 @@ ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) || nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
desc->short_addr) || desc->short_addr) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) || nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr,
IEEE802154_ATTR_PAD) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
desc->frame_counter) || desc->frame_counter) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE, nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
...@@ -1167,7 +1171,8 @@ ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq, ...@@ -1167,7 +1171,8 @@ ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) || nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) || nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr,
IEEE802154_ATTR_PAD) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
devkey->frame_counter) || devkey->frame_counter) ||
ieee802154_llsec_fill_key_id(msg, &devkey->key_id)) ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
......
...@@ -813,7 +813,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, ...@@ -813,7 +813,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) || if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) || nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) ||
nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) || nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
wpan_dev_id(wpan_dev), NL802154_ATTR_PAD) ||
nla_put_u32(msg, NL802154_ATTR_GENERATION, nla_put_u32(msg, NL802154_ATTR_GENERATION,
rdev->devlist_generation ^ rdev->devlist_generation ^
(cfg802154_rdev_list_generation << 2))) (cfg802154_rdev_list_generation << 2)))
......
...@@ -109,7 +109,8 @@ static int ila_fill_encap_info(struct sk_buff *skb, ...@@ -109,7 +109,8 @@ static int ila_fill_encap_info(struct sk_buff *skb,
{ {
struct ila_params *p = ila_params_lwtunnel(lwtstate); struct ila_params *p = ila_params_lwtunnel(lwtstate);
if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator)) if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator,
ILA_ATTR_PAD))
goto nla_put_failure; goto nla_put_failure;
return 0; return 0;
......
...@@ -418,12 +418,15 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info) ...@@ -418,12 +418,15 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg) static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
{ {
if (nla_put_u64(msg, ILA_ATTR_IDENTIFIER, if (nla_put_u64_64bit(msg, ILA_ATTR_IDENTIFIER,
(__force u64)ila->p.identifier) || (__force u64)ila->p.identifier,
nla_put_u64(msg, ILA_ATTR_LOCATOR, ILA_ATTR_PAD) ||
(__force u64)ila->p.ip.locator) || nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
nla_put_u64(msg, ILA_ATTR_LOCATOR_MATCH, (__force u64)ila->p.ip.locator,
(__force u64)ila->p.ip.locator_match) || ILA_ATTR_PAD) ||
nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
(__force u64)ila->p.ip.locator_match,
ILA_ATTR_PAD) ||
nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) || nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) ||
nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir)) nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir))
return -1; return -1;
......
...@@ -346,22 +346,30 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla ...@@ -346,22 +346,30 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
if (nest == NULL) if (nest == NULL)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
atomic_long_read(&tunnel->stats.tx_packets)) || atomic_long_read(&tunnel->stats.tx_packets),
nla_put_u64(skb, L2TP_ATTR_TX_BYTES, L2TP_ATTR_STATS_PAD) ||
atomic_long_read(&tunnel->stats.tx_bytes)) || nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, atomic_long_read(&tunnel->stats.tx_bytes),
atomic_long_read(&tunnel->stats.tx_errors)) || L2TP_ATTR_STATS_PAD) ||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
atomic_long_read(&tunnel->stats.rx_packets)) || atomic_long_read(&tunnel->stats.tx_errors),
nla_put_u64(skb, L2TP_ATTR_RX_BYTES, L2TP_ATTR_STATS_PAD) ||
atomic_long_read(&tunnel->stats.rx_bytes)) || nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, atomic_long_read(&tunnel->stats.rx_packets),
atomic_long_read(&tunnel->stats.rx_seq_discards)) || L2TP_ATTR_STATS_PAD) ||
nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
atomic_long_read(&tunnel->stats.rx_oos_packets)) || atomic_long_read(&tunnel->stats.rx_bytes),
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, L2TP_ATTR_STATS_PAD) ||
atomic_long_read(&tunnel->stats.rx_errors))) nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&tunnel->stats.rx_seq_discards),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&tunnel->stats.rx_oos_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
atomic_long_read(&tunnel->stats.rx_errors),
L2TP_ATTR_STATS_PAD))
goto nla_put_failure; goto nla_put_failure;
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
...@@ -754,22 +762,30 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl ...@@ -754,22 +762,30 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
if (nest == NULL) if (nest == NULL)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
atomic_long_read(&session->stats.tx_packets)) || atomic_long_read(&session->stats.tx_packets),
nla_put_u64(skb, L2TP_ATTR_TX_BYTES, L2TP_ATTR_STATS_PAD) ||
atomic_long_read(&session->stats.tx_bytes)) || nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, atomic_long_read(&session->stats.tx_bytes),
atomic_long_read(&session->stats.tx_errors)) || L2TP_ATTR_STATS_PAD) ||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
atomic_long_read(&session->stats.rx_packets)) || atomic_long_read(&session->stats.tx_errors),
nla_put_u64(skb, L2TP_ATTR_RX_BYTES, L2TP_ATTR_STATS_PAD) ||
atomic_long_read(&session->stats.rx_bytes)) || nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, atomic_long_read(&session->stats.rx_packets),
atomic_long_read(&session->stats.rx_seq_discards)) || L2TP_ATTR_STATS_PAD) ||
nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
atomic_long_read(&session->stats.rx_oos_packets)) || atomic_long_read(&session->stats.rx_bytes),
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, L2TP_ATTR_STATS_PAD) ||
atomic_long_read(&session->stats.rx_errors))) nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&session->stats.rx_seq_discards),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&session->stats.rx_oos_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
atomic_long_read(&session->stats.rx_errors),
L2TP_ATTR_STATS_PAD))
goto nla_put_failure; goto nla_put_failure;
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
......
...@@ -2875,8 +2875,10 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, ...@@ -2875,8 +2875,10 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) || if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) || nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) || nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) || IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) || nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) || nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) || nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
...@@ -2900,16 +2902,26 @@ static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type, ...@@ -2900,16 +2902,26 @@ static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
if (!nl_stats) if (!nl_stats)
return -EMSGSIZE; return -EMSGSIZE;
if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) || if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns,
nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) || IPVS_STATS_ATTR_PAD) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts,
nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) || IPVS_STATS_ATTR_PAD) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts,
nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) || IPVS_STATS_ATTR_PAD) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) || IPVS_STATS_ATTR_PAD) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) || nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps)) IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps,
IPVS_STATS_ATTR_PAD))
goto nla_put_failure; goto nla_put_failure;
nla_nest_end(skb, nl_stats); nla_nest_end(skb, nl_stats);
......
...@@ -754,7 +754,8 @@ static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, ...@@ -754,7 +754,8 @@ static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
if (used && if (used &&
nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
OVS_FLOW_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
if (stats.n_packets && if (stats.n_packets &&
......
...@@ -1122,10 +1122,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, ...@@ -1122,10 +1122,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
goto nla_put_failure; goto nla_put_failure;
if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps)) nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
TCA_HTB_PAD))
goto nla_put_failure; goto nla_put_failure;
if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps)) nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
TCA_HTB_PAD))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, nest); return nla_nest_end(skb, nest);
......
...@@ -994,7 +994,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -994,7 +994,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
if (q->rate >= (1ULL << 32)) { if (q->rate >= (1ULL << 32)) {
if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate)) if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
TCA_NETEM_PAD))
goto nla_put_failure; goto nla_put_failure;
rate.rate = ~0U; rate.rate = ~0U;
} else { } else {
......
...@@ -472,11 +472,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -472,11 +472,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
goto nla_put_failure; goto nla_put_failure;
if (q->rate.rate_bytes_ps >= (1ULL << 32) && if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps)) nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
TCA_TBF_PAD))
goto nla_put_failure; goto nla_put_failure;
if (tbf_peak_present(q) && if (tbf_peak_present(q) &&
q->peak.rate_bytes_ps >= (1ULL << 32) && q->peak.rate_bytes_ps >= (1ULL << 32) &&
nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps)) nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
TCA_TBF_PAD))
goto nla_put_failure; goto nla_put_failure;
return nla_nest_end(skb, nest); return nla_nest_end(skb, nest);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment