Commit 84e00d9b authored by Jakub Kicinski's avatar Jakub Kicinski

net: convert some netlink netdev iterators to depend on the xarray

Reap the benefits of easier iteration thanks to the xarray.
Convert just the genetlink ones, those are easier to test.
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Link: https://lore.kernel.org/r/20230726185530.2247698-3-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 759ab1ed
...@@ -3016,6 +3016,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */ ...@@ -3016,6 +3016,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */
if (netdev_master_upper_dev_get_rcu(slave) == (bond)) if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
#define for_each_netdev_dump(net, d, ifindex) \
xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex))
static inline struct net_device *next_net_device(struct net_device *dev) static inline struct net_device *next_net_device(struct net_device *dev)
{ {
struct list_head *lh; struct list_head *lh;
......
...@@ -101,43 +101,22 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -101,43 +101,22 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
struct net_device *netdev; struct net_device *netdev;
int idx = 0, s_idx; int err = 0;
int h, s_h;
int err;
s_h = cb->args[0];
s_idx = cb->args[1];
rtnl_lock(); rtnl_lock();
for_each_netdev_dump(net, netdev, cb->args[0]) {
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { err = netdev_nl_dev_fill(netdev, skb,
struct hlist_head *head; NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, 0,
idx = 0; NETDEV_CMD_DEV_GET);
head = &net->dev_index_head[h]; if (err < 0)
hlist_for_each_entry(netdev, head, index_hlist) { break;
if (idx < s_idx)
goto cont;
err = netdev_nl_dev_fill(netdev, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, 0,
NETDEV_CMD_DEV_GET);
if (err < 0)
break;
cont:
idx++;
}
} }
rtnl_unlock(); rtnl_unlock();
if (err != -EMSGSIZE) if (err != -EMSGSIZE)
return err; return err;
cb->args[1] = idx;
cb->args[0] = h;
cb->seq = net->dev_base_seq;
return skb->len; return skb->len;
} }
......
...@@ -252,8 +252,7 @@ int ethnl_multicast(struct sk_buff *skb, struct net_device *dev) ...@@ -252,8 +252,7 @@ int ethnl_multicast(struct sk_buff *skb, struct net_device *dev)
* @ops: request ops of currently processed message type * @ops: request ops of currently processed message type
* @req_info: parsed request header of processed request * @req_info: parsed request header of processed request
* @reply_data: data needed to compose the reply * @reply_data: data needed to compose the reply
* @pos_hash: saved iteration position - hashbucket * @pos_ifindex: saved iteration position - ifindex
* @pos_idx: saved iteration position - index
* *
* These parameters are kept in struct netlink_callback as context preserved * These parameters are kept in struct netlink_callback as context preserved
* between iterations. They are initialized by ethnl_default_start() and used * between iterations. They are initialized by ethnl_default_start() and used
...@@ -263,8 +262,7 @@ struct ethnl_dump_ctx { ...@@ -263,8 +262,7 @@ struct ethnl_dump_ctx {
const struct ethnl_request_ops *ops; const struct ethnl_request_ops *ops;
struct ethnl_req_info *req_info; struct ethnl_req_info *req_info;
struct ethnl_reply_data *reply_data; struct ethnl_reply_data *reply_data;
int pos_hash; unsigned long pos_ifindex;
int pos_idx;
}; };
static const struct ethnl_request_ops * static const struct ethnl_request_ops *
...@@ -490,55 +488,27 @@ static int ethnl_default_dumpit(struct sk_buff *skb, ...@@ -490,55 +488,27 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
{ {
struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb);
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
int s_idx = ctx->pos_idx; struct net_device *dev;
int h, idx = 0;
int ret = 0; int ret = 0;
rtnl_lock(); rtnl_lock();
for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { for_each_netdev_dump(net, dev, ctx->pos_ifindex) {
struct hlist_head *head; dev_hold(dev);
struct net_device *dev; rtnl_unlock();
unsigned int seq;
ret = ethnl_default_dump_one(skb, dev, ctx, cb);
head = &net->dev_index_head[h];
rtnl_lock();
restart_chain: dev_put(dev);
seq = net->dev_base_seq;
cb->seq = seq;
idx = 0;
hlist_for_each_entry(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
dev_hold(dev);
rtnl_unlock();
ret = ethnl_default_dump_one(skb, dev, ctx, cb);
dev_put(dev);
if (ret < 0) {
if (ret == -EOPNOTSUPP)
goto lock_and_cont;
if (likely(skb->len))
ret = skb->len;
goto out;
}
lock_and_cont:
rtnl_lock();
if (net->dev_base_seq != seq) {
s_idx = idx + 1;
goto restart_chain;
}
cont:
idx++;
}
if (ret < 0 && ret != -EOPNOTSUPP) {
if (likely(skb->len))
ret = skb->len;
break;
}
} }
rtnl_unlock(); rtnl_unlock();
out:
ctx->pos_hash = h;
ctx->pos_idx = idx;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
return ret; return ret;
} }
...@@ -584,8 +554,7 @@ static int ethnl_default_start(struct netlink_callback *cb) ...@@ -584,8 +554,7 @@ static int ethnl_default_start(struct netlink_callback *cb)
ctx->ops = ops; ctx->ops = ops;
ctx->req_info = req_info; ctx->req_info = req_info;
ctx->reply_data = reply_data; ctx->reply_data = reply_data;
ctx->pos_hash = 0; ctx->pos_ifindex = 0;
ctx->pos_idx = 0;
return 0; return 0;
......
...@@ -212,8 +212,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) ...@@ -212,8 +212,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
struct ethnl_tunnel_info_dump_ctx { struct ethnl_tunnel_info_dump_ctx {
struct ethnl_req_info req_info; struct ethnl_req_info req_info;
int pos_hash; unsigned long ifindex;
int pos_idx;
}; };
int ethnl_tunnel_info_start(struct netlink_callback *cb) int ethnl_tunnel_info_start(struct netlink_callback *cb)
...@@ -243,57 +242,39 @@ int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -243,57 +242,39 @@ int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
int s_idx = ctx->pos_idx; struct net_device *dev;
int h, idx = 0;
int ret = 0; int ret = 0;
void *ehdr; void *ehdr;
rtnl_lock(); rtnl_lock();
cb->seq = net->dev_base_seq; for_each_netdev_dump(net, dev, ctx->ifindex) {
for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { ehdr = ethnl_dump_put(skb, cb,
struct hlist_head *head; ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
struct net_device *dev; if (!ehdr) {
ret = -EMSGSIZE;
head = &net->dev_index_head[h]; break;
idx = 0;
hlist_for_each_entry(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
ehdr = ethnl_dump_put(skb, cb,
ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
if (!ehdr) {
ret = -EMSGSIZE;
goto out;
}
ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER);
if (ret < 0) {
genlmsg_cancel(skb, ehdr);
goto out;
}
ctx->req_info.dev = dev;
ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
ctx->req_info.dev = NULL;
if (ret < 0) {
genlmsg_cancel(skb, ehdr);
if (ret == -EOPNOTSUPP)
goto cont;
goto out;
}
genlmsg_end(skb, ehdr);
cont:
idx++;
} }
ret = ethnl_fill_reply_header(skb, dev,
ETHTOOL_A_TUNNEL_INFO_HEADER);
if (ret < 0) {
genlmsg_cancel(skb, ehdr);
break;
}
ctx->req_info.dev = dev;
ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
ctx->req_info.dev = NULL;
if (ret < 0) {
genlmsg_cancel(skb, ehdr);
if (ret == -EOPNOTSUPP)
continue;
break;
}
genlmsg_end(skb, ehdr);
} }
out:
rtnl_unlock(); rtnl_unlock();
ctx->pos_hash = h;
ctx->pos_idx = idx;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
if (ret == -EMSGSIZE && skb->len) if (ret == -EMSGSIZE && skb->len)
return skb->len; return skb->len;
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment