vdpa.c 33 KB
Newer Older
Jason Wang's avatar
Jason Wang committed
1 2 3 4 5 6 7 8 9 10 11 12 13
// SPDX-License-Identifier: GPL-2.0-only
/*
 * vDPA bus.
 *
 * Copyright (c) 2020, Red Hat. All rights reserved.
 *     Author: Jason Wang <jasowang@redhat.com>
 *
 */

#include <linux/module.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/vdpa.h>
14 15 16
#include <uapi/linux/vdpa.h>
#include <net/genetlink.h>
#include <linux/mod_devicetable.h>
17
#include <linux/virtio_ids.h>
Jason Wang's avatar
Jason Wang committed
18

19
static LIST_HEAD(mdev_head);
20
/* A global mutex that protects vdpa management device and device level operations. */
21
static DECLARE_RWSEM(vdpa_dev_lock);
Jason Wang's avatar
Jason Wang committed
22 23
static DEFINE_IDA(vdpa_index_ida);

24 25
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
26
	down_write(&vdev->cf_lock);
27
	vdev->config->set_status(vdev, status);
28
	up_write(&vdev->cf_lock);
29 30 31
}
EXPORT_SYMBOL(vdpa_set_status);

32 33
static struct genl_family vdpa_nl_family;

Jason Wang's avatar
Jason Wang committed
34 35 36 37
static int vdpa_dev_probe(struct device *d)
{
	struct vdpa_device *vdev = dev_to_vdpa(d);
	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
38 39
	const struct vdpa_config_ops *ops = vdev->config;
	u32 max_num, min_num = 1;
Jason Wang's avatar
Jason Wang committed
40 41
	int ret = 0;

42 43 44 45 46
	d->dma_mask = &d->coherent_dma_mask;
	ret = dma_set_mask_and_coherent(d, DMA_BIT_MASK(64));
	if (ret)
		return ret;

47 48 49 50 51 52
	max_num = ops->get_vq_num_max(vdev);
	if (ops->get_vq_num_min)
		min_num = ops->get_vq_num_min(vdev);
	if (max_num < min_num)
		return -EINVAL;

Jason Wang's avatar
Jason Wang committed
53 54 55 56 57 58
	if (drv && drv->probe)
		ret = drv->probe(vdev);

	return ret;
}

59
static void vdpa_dev_remove(struct device *d)
Jason Wang's avatar
Jason Wang committed
60 61 62 63 64 65 66 67
{
	struct vdpa_device *vdev = dev_to_vdpa(d);
	struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);

	if (drv && drv->remove)
		drv->remove(vdev);
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
{
	struct vdpa_device *vdev = dev_to_vdpa(dev);

	/* Check override first, and if set, only use the named driver */
	if (vdev->driver_override)
		return strcmp(vdev->driver_override, drv->name) == 0;

	/* Currently devices must be supported by all vDPA bus drivers */
	return 1;
}

static ssize_t driver_override_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
	struct vdpa_device *vdev = dev_to_vdpa(dev);
85
	int ret;
86

87 88 89
	ret = driver_set_override(dev, &vdev->driver_override, buf, count);
	if (ret)
		return ret;
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117

	return count;
}

static ssize_t driver_override_show(struct device *dev,
				    struct device_attribute *attr, char *buf)
{
	struct vdpa_device *vdev = dev_to_vdpa(dev);
	ssize_t len;

	device_lock(dev);
	len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
	device_unlock(dev);

	return len;
}
static DEVICE_ATTR_RW(driver_override);

static struct attribute *vdpa_dev_attrs[] = {
	&dev_attr_driver_override.attr,
	NULL,
};

static const struct attribute_group vdpa_dev_group = {
	.attrs  = vdpa_dev_attrs,
};
__ATTRIBUTE_GROUPS(vdpa_dev);

118
static const struct bus_type vdpa_bus = {
Jason Wang's avatar
Jason Wang committed
119
	.name  = "vdpa",
120 121
	.dev_groups = vdpa_dev_groups,
	.match = vdpa_dev_match,
Jason Wang's avatar
Jason Wang committed
122 123 124 125 126 127 128 129 130 131 132 133
	.probe = vdpa_dev_probe,
	.remove = vdpa_dev_remove,
};

static void vdpa_release_dev(struct device *d)
{
	struct vdpa_device *vdev = dev_to_vdpa(d);
	const struct vdpa_config_ops *ops = vdev->config;

	if (ops->free)
		ops->free(vdev);

134
	ida_free(&vdpa_index_ida, vdev->index);
135
	kfree(vdev->driver_override);
Jason Wang's avatar
Jason Wang committed
136 137 138 139 140 141 142 143 144
	kfree(vdev);
}

/**
 * __vdpa_alloc_device - allocate and initilaize a vDPA device
 * This allows driver to some prepartion after device is
 * initialized but before registered.
 * @parent: the parent device
 * @config: the bus operations that is supported by this device
145
 * @ngroups: number of groups supported by this device
146
 * @nas: number of address spaces supported by this device
Jason Wang's avatar
Jason Wang committed
147
 * @size: size of the parent structure that contains private data
148
 * @name: name of the vdpa device; optional.
149
 * @use_va: indicate whether virtual address must be used by this device
Jason Wang's avatar
Jason Wang committed
150
 *
151
 * Driver should use vdpa_alloc_device() wrapper macro instead of
Jason Wang's avatar
Jason Wang committed
152 153
 * using this directly.
 *
154 155
 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
 *	   ida.
Jason Wang's avatar
Jason Wang committed
156 157 158
 */
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
					const struct vdpa_config_ops *config,
159
					unsigned int ngroups, unsigned int nas,
160 161
					size_t size, const char *name,
					bool use_va)
Jason Wang's avatar
Jason Wang committed
162 163 164 165 166 167 168 169 170 171
{
	struct vdpa_device *vdev;
	int err = -EINVAL;

	if (!config)
		goto err;

	if (!!config->dma_map != !!config->dma_unmap)
		goto err;

172 173 174 175
	/* It should only work for the device that use on-chip IOMMU */
	if (use_va && !(config->dma_map || config->set_map))
		goto err;

Jason Wang's avatar
Jason Wang committed
176 177 178 179 180
	err = -ENOMEM;
	vdev = kzalloc(size, GFP_KERNEL);
	if (!vdev)
		goto err;

181
	err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
Jason Wang's avatar
Jason Wang committed
182 183 184 185 186 187 188 189
	if (err < 0)
		goto err_ida;

	vdev->dev.bus = &vdpa_bus;
	vdev->dev.parent = parent;
	vdev->dev.release = vdpa_release_dev;
	vdev->index = err;
	vdev->config = config;
190
	vdev->features_valid = false;
191
	vdev->use_va = use_va;
192
	vdev->ngroups = ngroups;
193
	vdev->nas = nas;
Jason Wang's avatar
Jason Wang committed
194

195 196 197 198
	if (name)
		err = dev_set_name(&vdev->dev, "%s", name);
	else
		err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
Jason Wang's avatar
Jason Wang committed
199 200 201
	if (err)
		goto err_name;

202
	init_rwsem(&vdev->cf_lock);
Jason Wang's avatar
Jason Wang committed
203 204 205 206 207
	device_initialize(&vdev->dev);

	return vdev;

err_name:
208
	ida_free(&vdpa_index_ida, vdev->index);
Jason Wang's avatar
Jason Wang committed
209 210 211 212 213 214 215
err_ida:
	kfree(vdev);
err:
	return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(__vdpa_alloc_device);

216 217 218 219 220 221 222
static int vdpa_name_match(struct device *dev, const void *data)
{
	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);

	return (strcmp(dev_name(&vdev->dev), data) == 0);
}

223
static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
224 225 226
{
	struct device *dev;

227 228
	vdev->nvqs = nvqs;

229
	lockdep_assert_held(&vdpa_dev_lock);
230 231 232 233 234 235 236 237 238 239 240 241 242 243
	dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
	if (dev) {
		put_device(dev);
		return -EEXIST;
	}
	return device_add(&vdev->dev);
}

/**
 * _vdpa_register_device - register a vDPA device with vdpa lock held
 * Caller must have a succeed call of vdpa_alloc_device() before.
 * Caller must invoke this routine in the management device dev_add()
 * callback after setting up valid mgmtdev for this vdpa device.
 * @vdev: the vdpa device to be registered to vDPA bus
244
 * @nvqs: number of virtqueues supported by this device
245
 *
246
 * Return: Returns an error when fail to add device to vDPA bus
247
 */
248
int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
249 250 251 252
{
	if (!vdev->mdev)
		return -EINVAL;

253
	return __vdpa_register_device(vdev, nvqs);
254 255 256
}
EXPORT_SYMBOL_GPL(_vdpa_register_device);

Jason Wang's avatar
Jason Wang committed
257 258
/**
 * vdpa_register_device - register a vDPA device
259
 * Callers must have a succeed call of vdpa_alloc_device() before.
Jason Wang's avatar
Jason Wang committed
260
 * @vdev: the vdpa device to be registered to vDPA bus
261
 * @nvqs: number of virtqueues supported by this device
Jason Wang's avatar
Jason Wang committed
262
 *
263
 * Return: Returns an error when fail to add to vDPA bus
Jason Wang's avatar
Jason Wang committed
264
 */
265
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
Jason Wang's avatar
Jason Wang committed
266
{
267 268
	int err;

269
	down_write(&vdpa_dev_lock);
270
	err = __vdpa_register_device(vdev, nvqs);
271
	up_write(&vdpa_dev_lock);
272
	return err;
Jason Wang's avatar
Jason Wang committed
273 274 275
}
EXPORT_SYMBOL_GPL(vdpa_register_device);

276 277 278 279 280 281 282 283
/**
 * _vdpa_unregister_device - unregister a vDPA device
 * Caller must invoke this routine as part of management device dev_del()
 * callback.
 * @vdev: the vdpa device to be unregisted from vDPA bus
 */
void _vdpa_unregister_device(struct vdpa_device *vdev)
{
284
	lockdep_assert_held(&vdpa_dev_lock);
285 286 287 288 289
	WARN_ON(!vdev->mdev);
	device_unregister(&vdev->dev);
}
EXPORT_SYMBOL_GPL(_vdpa_unregister_device);

Jason Wang's avatar
Jason Wang committed
290 291 292 293 294 295
/**
 * vdpa_unregister_device - unregister a vDPA device
 * @vdev: the vdpa device to be unregisted from vDPA bus
 */
void vdpa_unregister_device(struct vdpa_device *vdev)
{
296
	down_write(&vdpa_dev_lock);
Jason Wang's avatar
Jason Wang committed
297
	device_unregister(&vdev->dev);
298
	up_write(&vdpa_dev_lock);
Jason Wang's avatar
Jason Wang committed
299 300 301 302 303 304 305 306
}
EXPORT_SYMBOL_GPL(vdpa_unregister_device);

/**
 * __vdpa_register_driver - register a vDPA device driver
 * @drv: the vdpa device driver to be registered
 * @owner: module owner of the driver
 *
307
 * Return: Returns an err when fail to do the registration
Jason Wang's avatar
Jason Wang committed
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
 */
int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
{
	drv->driver.bus = &vdpa_bus;
	drv->driver.owner = owner;

	return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__vdpa_register_driver);

/**
 * vdpa_unregister_driver - unregister a vDPA device driver
 * @drv: the vdpa device driver to be unregistered
 */
void vdpa_unregister_driver(struct vdpa_driver *drv)
{
	driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(vdpa_unregister_driver);

328 329 330 331 332 333
/**
 * vdpa_mgmtdev_register - register a vdpa management device
 *
 * @mdev: Pointer to vdpa management device
 * vdpa_mgmtdev_register() register a vdpa management device which supports
 * vdpa device management.
334 335
 * Return: Returns 0 on success or failure when required callback ops are not
 *         initialized.
336 337 338 339 340 341 342
 */
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
{
	if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
		return -EINVAL;

	INIT_LIST_HEAD(&mdev->list);
343
	down_write(&vdpa_dev_lock);
344
	list_add_tail(&mdev->list, &mdev_head);
345
	up_write(&vdpa_dev_lock);
346 347 348 349
	return 0;
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);

350 351 352 353 354 355 356 357 358 359
static int vdpa_match_remove(struct device *dev, void *data)
{
	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
	struct vdpa_mgmt_dev *mdev = vdev->mdev;

	if (mdev == data)
		mdev->ops->dev_del(mdev, vdev);
	return 0;
}

360 361
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
{
362
	down_write(&vdpa_dev_lock);
363

364
	list_del(&mdev->list);
365 366 367 368

	/* Filter out all the entries belong to this management device and delete it. */
	bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);

369
	up_write(&vdpa_dev_lock);
370 371 372
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);

373 374 375 376 377 378 379 380 381 382 383
static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
				     unsigned int offset,
				     void *buf, unsigned int len)
{
	const struct vdpa_config_ops *ops = vdev->config;

	/*
	 * Config accesses aren't supposed to trigger before features are set.
	 * If it does happen we assume a legacy guest.
	 */
	if (!vdev->features_valid)
384
		vdpa_set_features_unlocked(vdev, 0);
385 386 387
	ops->get_config(vdev, offset, buf, len);
}

388 389 390 391 392 393 394 395 396 397
/**
 * vdpa_get_config - Get one or more device configuration fields.
 * @vdev: vdpa device to operate on
 * @offset: starting byte offset of the field
 * @buf: buffer pointer to read to
 * @len: length of the configuration fields in bytes
 */
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
		     void *buf, unsigned int len)
{
398
	down_read(&vdev->cf_lock);
399
	vdpa_get_config_unlocked(vdev, offset, buf, len);
400
	up_read(&vdev->cf_lock);
401 402 403 404 405 406 407 408 409 410 411 412 413
}
EXPORT_SYMBOL_GPL(vdpa_get_config);

/**
 * vdpa_set_config - Set one or more device configuration fields.
 * @vdev: vdpa device to operate on
 * @offset: starting byte offset of the field
 * @buf: buffer pointer to read from
 * @length: length of the configuration fields in bytes
 */
void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
		     const void *buf, unsigned int length)
{
414
	down_write(&vdev->cf_lock);
415
	vdev->config->set_config(vdev, offset, buf, length);
416
	up_write(&vdev->cf_lock);
417 418 419
}
EXPORT_SYMBOL_GPL(vdpa_set_config);

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
				 const char *busname, const char *devname)
{
	/* Bus name is optional for simulated management device, so ignore the
	 * device with bus if bus attribute is provided.
	 */
	if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
		return false;

	if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
		return true;

	if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
	    (strcmp(dev_name(mdev->device), devname) == 0))
		return true;

	return false;
}

static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
{
	struct vdpa_mgmt_dev *mdev;
	const char *busname = NULL;
	const char *devname;

	if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
		return ERR_PTR(-EINVAL);
	devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
	if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
		busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);

	list_for_each_entry(mdev, &mdev_head, list) {
		if (mgmtdev_handle_match(mdev, busname, devname))
			return mdev;
	}
	return ERR_PTR(-ENODEV);
}

static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
{
	if (mdev->device->bus &&
	    nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
		return -EMSGSIZE;
	if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
		return -EMSGSIZE;
	return 0;
}

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
static u64 vdpa_mgmtdev_get_classes(const struct vdpa_mgmt_dev *mdev,
				    unsigned int *nclasses)
{
	u64 supported_classes = 0;
	unsigned int n = 0;

	for (int i = 0; mdev->id_table[i].device; i++) {
		if (mdev->id_table[i].device > 63)
			continue;
		supported_classes |= BIT_ULL(mdev->id_table[i].device);
		n++;
	}
	if (nclasses)
		*nclasses = n;

	return supported_classes;
}

486 487 488 489 490 491 492 493 494 495 496 497 498 499
static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
			     u32 portid, u32 seq, int flags)
{
	void *hdr;
	int err;

	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
	if (!hdr)
		return -EMSGSIZE;
	err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
	if (err)
		goto msg_err;

	if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
500 501
			      vdpa_mgmtdev_get_classes(mdev, NULL),
			      VDPA_ATTR_UNSPEC)) {
502 503 504
		err = -EMSGSIZE;
		goto msg_err;
	}
505 506 507 508 509 510 511 512 513 514
	if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
			mdev->max_supported_vqs)) {
		err = -EMSGSIZE;
		goto msg_err;
	}
	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
			      mdev->supported_features, VDPA_ATTR_PAD)) {
		err = -EMSGSIZE;
		goto msg_err;
	}
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533

	genlmsg_end(msg, hdr);
	return 0;

msg_err:
	genlmsg_cancel(msg, hdr);
	return err;
}

static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
{
	struct vdpa_mgmt_dev *mdev;
	struct sk_buff *msg;
	int err;

	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (!msg)
		return -ENOMEM;

534
	down_read(&vdpa_dev_lock);
535 536
	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
	if (IS_ERR(mdev)) {
537
		up_read(&vdpa_dev_lock);
538 539 540 541 542 543
		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
		err = PTR_ERR(mdev);
		goto out;
	}

	err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
544
	up_read(&vdpa_dev_lock);
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	if (err)
		goto out;
	err = genlmsg_reply(msg, info);
	return err;

out:
	nlmsg_free(msg);
	return err;
}

static int
vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct vdpa_mgmt_dev *mdev;
	int start = cb->args[0];
	int idx = 0;
	int err;

563
	down_read(&vdpa_dev_lock);
564 565 566 567 568 569 570 571 572 573 574 575
	list_for_each_entry(mdev, &mdev_head, list) {
		if (idx < start) {
			idx++;
			continue;
		}
		err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
					cb->nlh->nlmsg_seq, NLM_F_MULTI);
		if (err)
			goto out;
		idx++;
	}
out:
576
	up_read(&vdpa_dev_lock);
577 578 579 580
	cb->args[0] = idx;
	return msg->len;
}

581 582 583
#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
				 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)     | \
				 BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
584

585 586 587 588 589 590 591 592 593 594
/*
 * Bitmask for all per-device features: feature bits VIRTIO_TRANSPORT_F_START
 * through VIRTIO_TRANSPORT_F_END are unset, i.e. 0xfffffc000fffffff for
 * all 64bit features. If the features are extended beyond 64 bits, or new
 * "holes" are reserved for other type of features than per-device, this
 * macro would have to be updated.
 */
#define VIRTIO_DEVICE_F_MASK (~0ULL << (VIRTIO_TRANSPORT_F_END + 1) | \
			      ((1ULL << VIRTIO_TRANSPORT_F_START) - 1))

595 596
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
{
597 598
	struct vdpa_dev_set_config config = {};
	struct nlattr **nl_attrs = info->attrs;
599
	struct vdpa_mgmt_dev *mdev;
600
	unsigned int ncls = 0;
601
	const u8 *macaddr;
602
	const char *name;
603
	u64 classes;
604 605 606 607 608 609 610
	int err = 0;

	if (!info->attrs[VDPA_ATTR_DEV_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);

611 612 613
	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
		macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
		memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
614
		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
615 616 617 618
	}
	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
		config.net.mtu =
			nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
619
		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
620
	}
621 622 623 624 625 626 627 628 629 630
	if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
		config.net.max_vq_pairs =
			nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
		if (!config.net.max_vq_pairs) {
			NL_SET_ERR_MSG_MOD(info->extack,
					   "At least one pair of VQs is required");
			return -EINVAL;
		}
		config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
	}
631
	if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
632 633
		u64 missing = 0x0ULL;

634 635
		config.device_features =
			nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
		if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR] &&
		    !(config.device_features & BIT_ULL(VIRTIO_NET_F_MAC)))
			missing |= BIT_ULL(VIRTIO_NET_F_MAC);
		if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU] &&
		    !(config.device_features & BIT_ULL(VIRTIO_NET_F_MTU)))
			missing |= BIT_ULL(VIRTIO_NET_F_MTU);
		if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] &&
		    config.net.max_vq_pairs > 1 &&
		    !(config.device_features & BIT_ULL(VIRTIO_NET_F_MQ)))
			missing |= BIT_ULL(VIRTIO_NET_F_MQ);
		if (missing) {
			NL_SET_ERR_MSG_FMT_MOD(info->extack,
					       "Missing features 0x%llx for provided attributes",
					       missing);
			return -EINVAL;
		}
652 653
		config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
	}
654 655 656 657 658 659 660 661 662 663

	/* Skip checking capability if user didn't prefer to configure any
	 * device networking attributes. It is likely that user might have used
	 * a device specific method to configure such attributes or using device
	 * default attributes.
	 */
	if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
	    !netlink_capable(skb, CAP_NET_ADMIN))
		return -EPERM;

664
	down_write(&vdpa_dev_lock);
665 666 667 668 669 670
	mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
	if (IS_ERR(mdev)) {
		NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
		err = PTR_ERR(mdev);
		goto err;
	}
671

672
	if ((config.mask & mdev->config_attr_mask) != config.mask) {
673 674 675
		NL_SET_ERR_MSG_FMT_MOD(info->extack,
				       "Some provided attributes are not supported: 0x%llx",
				       config.mask & ~mdev->config_attr_mask);
676 677 678
		err = -EOPNOTSUPP;
		goto err;
	}
679

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
	classes = vdpa_mgmtdev_get_classes(mdev, &ncls);
	if (config.mask & VDPA_DEV_NET_ATTRS_MASK &&
	    !(classes & BIT_ULL(VIRTIO_ID_NET))) {
		NL_SET_ERR_MSG_MOD(info->extack,
				   "Network class attributes provided on unsupported management device");
		err = -EINVAL;
		goto err;
	}
	if (!(config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
	    config.mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES) &&
	    classes & BIT_ULL(VIRTIO_ID_NET) && ncls > 1 &&
	    config.device_features & VIRTIO_DEVICE_F_MASK) {
		NL_SET_ERR_MSG_MOD(info->extack,
				   "Management device supports multi-class while device features specified are ambiguous");
		err = -EINVAL;
		goto err;
	}

698
	err = mdev->ops->dev_add(mdev, name, &config);
699
err:
700
	up_write(&vdpa_dev_lock);
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
	return err;
}

static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
{
	struct vdpa_mgmt_dev *mdev;
	struct vdpa_device *vdev;
	struct device *dev;
	const char *name;
	int err = 0;

	if (!info->attrs[VDPA_ATTR_DEV_NAME])
		return -EINVAL;
	name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);

716
	down_write(&vdpa_dev_lock);
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
	if (!dev) {
		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
		err = -ENODEV;
		goto dev_err;
	}
	vdev = container_of(dev, struct vdpa_device, dev);
	if (!vdev->mdev) {
		NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
		err = -EINVAL;
		goto mdev_err;
	}
	mdev = vdev->mdev;
	mdev->ops->dev_del(mdev, vdev);
mdev_err:
	put_device(dev);
dev_err:
734
	up_write(&vdpa_dev_lock);
735 736 737
	return err;
}

738 739 740 741 742
static int
vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
	      int flags, struct netlink_ext_ack *extack)
{
	u16 max_vq_size;
743
	u16 min_vq_size = 1;
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	u32 device_id;
	u32 vendor_id;
	void *hdr;
	int err;

	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
	if (!hdr)
		return -EMSGSIZE;

	err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
	if (err)
		goto msg_err;

	device_id = vdev->config->get_device_id(vdev);
	vendor_id = vdev->config->get_vendor_id(vdev);
	max_vq_size = vdev->config->get_vq_num_max(vdev);
760 761
	if (vdev->config->get_vq_num_min)
		min_vq_size = vdev->config->get_vq_num_min(vdev);
762 763 764 765 766 767 768 769 770 771 772 773

	err = -EMSGSIZE;
	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
		goto msg_err;
	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
		goto msg_err;
	if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
		goto msg_err;
	if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
		goto msg_err;
	if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
		goto msg_err;
774 775
	if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
		goto msg_err;
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799

	genlmsg_end(msg, hdr);
	return 0;

msg_err:
	genlmsg_cancel(msg, hdr);
	return err;
}

static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
{
	struct vdpa_device *vdev;
	struct sk_buff *msg;
	const char *devname;
	struct device *dev;
	int err;

	if (!info->attrs[VDPA_ATTR_DEV_NAME])
		return -EINVAL;
	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (!msg)
		return -ENOMEM;

800
	down_read(&vdpa_dev_lock);
801 802 803 804 805 806 807 808 809 810 811 812
	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
	if (!dev) {
		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
		err = -ENODEV;
		goto err;
	}
	vdev = container_of(dev, struct vdpa_device, dev);
	if (!vdev->mdev) {
		err = -EINVAL;
		goto mdev_err;
	}
	err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
813 814 815 816 817
	if (err)
		goto mdev_err;

	err = genlmsg_reply(msg, info);
	put_device(dev);
818
	up_read(&vdpa_dev_lock);
819 820
	return err;

821 822 823
mdev_err:
	put_device(dev);
err:
824
	up_read(&vdpa_dev_lock);
825
	nlmsg_free(msg);
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
	return err;
}

struct vdpa_dev_dump_info {
	struct sk_buff *msg;
	struct netlink_callback *cb;
	int start_idx;
	int idx;
};

static int vdpa_dev_dump(struct device *dev, void *data)
{
	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
	struct vdpa_dev_dump_info *info = data;
	int err;

	if (!vdev->mdev)
		return 0;
	if (info->idx < info->start_idx) {
		info->idx++;
		return 0;
	}
	err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
			    info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
	if (err)
		return err;

	info->idx++;
	return 0;
}

static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct vdpa_dev_dump_info info;

	info.msg = msg;
	info.cb = cb;
	info.start_idx = cb->args[0];
	info.idx = 0;

866
	down_read(&vdpa_dev_lock);
867
	bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
868
	up_read(&vdpa_dev_lock);
869 870 871 872
	cb->args[0] = info.idx;
	return msg->len;
}

873
static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
874 875 876 877
				       const struct virtio_net_config *config)
{
	u16 val_u16;

878 879
	if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
	    (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
880 881
		return 0;

882 883
	val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);

884 885 886
	return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
}

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
					const struct virtio_net_config *config)
{
	u16 val_u16;

	if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
		return 0;

	val_u16 = __virtio16_to_cpu(true, config->mtu);

	return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
}

static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
					const struct virtio_net_config *config)
{
	if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
		return 0;

	return  nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
			sizeof(config->mac), config->mac);
}

910 911 912 913 914 915 916 917 918 919 920 921
static int vdpa_dev_net_status_config_fill(struct sk_buff *msg, u64 features,
					   const struct virtio_net_config *config)
{
	u16 val_u16;

	if ((features & BIT_ULL(VIRTIO_NET_F_STATUS)) == 0)
		return 0;

	val_u16 = __virtio16_to_cpu(true, config->status);
	return nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16);
}

922 923 924
static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
{
	struct virtio_net_config config = {};
925
	u64 features_device;
926

927
	vdev->config->get_config(vdev, 0, &config, sizeof(config));
928

929
	features_device = vdev->config->get_device_features(vdev);
930

931
	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
932 933
			      VDPA_ATTR_PAD))
		return -EMSGSIZE;
934

935 936 937 938 939 940
	if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
		return -EMSGSIZE;

	if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
		return -EMSGSIZE;

941 942 943
	if (vdpa_dev_net_status_config_fill(msg, features_device, &config))
		return -EMSGSIZE;

944
	return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
945 946 947 948 949 950
}

static int
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
		     int flags, struct netlink_ext_ack *extack)
{
951 952
	u64 features_driver;
	u8 status = 0;
953 954 955 956
	u32 device_id;
	void *hdr;
	int err;

957
	down_read(&vdev->cf_lock);
958 959
	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
			  VDPA_CMD_DEV_CONFIG_GET);
960 961 962 963
	if (!hdr) {
		err = -EMSGSIZE;
		goto out;
	}
964 965 966 967 968 969 970 971 972 973 974 975

	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
		err = -EMSGSIZE;
		goto msg_err;
	}

	device_id = vdev->config->get_device_id(vdev);
	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
		err = -EMSGSIZE;
		goto msg_err;
	}

976 977 978 979 980 981 982 983 984 985 986
	/* only read driver features after the feature negotiation is done */
	status = vdev->config->get_status(vdev);
	if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
		features_driver = vdev->config->get_driver_features(vdev);
		if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
				      VDPA_ATTR_PAD)) {
			err = -EMSGSIZE;
			goto msg_err;
		}
	}

987 988 989 990 991 992 993 994 995 996 997
	switch (device_id) {
	case VIRTIO_ID_NET:
		err = vdpa_dev_net_config_fill(vdev, msg);
		break;
	default:
		err = -EOPNOTSUPP;
		break;
	}
	if (err)
		goto msg_err;

998
	up_read(&vdev->cf_lock);
999 1000 1001 1002 1003
	genlmsg_end(msg, hdr);
	return 0;

msg_err:
	genlmsg_cancel(msg, hdr);
1004
out:
1005
	up_read(&vdev->cf_lock);
1006 1007 1008
	return err;
}

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
			       struct genl_info *info, u32 index)
{
	struct virtio_net_config config = {};
	u64 features;
	u8 status;
	int err;

	status = vdev->config->get_status(vdev);
	if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
		NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
		return -EAGAIN;
	}
	vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));

	features = vdev->config->get_driver_features(vdev);
	if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
			      features, VDPA_ATTR_PAD))
		return -EMSGSIZE;

1029 1030 1031 1032
	err = vdpa_dev_net_mq_config_fill(msg, features, &config);
	if (err)
		return err;

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
		return -EMSGSIZE;

	err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
	if (err)
		return err;

	return 0;
}

static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
			     struct genl_info *info, u32 index)
{
	int err;

1048
	down_read(&vdev->cf_lock);
1049 1050 1051 1052 1053 1054 1055
	if (!vdev->config->get_vendor_vq_stats) {
		err = -EOPNOTSUPP;
		goto out;
	}

	err = vdpa_fill_stats_rec(vdev, msg, info, index);
out:
1056
	up_read(&vdev->cf_lock);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	return err;
}

static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
				      struct sk_buff *msg,
				      struct genl_info *info, u32 index)
{
	u32 device_id;
	void *hdr;
	int err;
	u32 portid = info->snd_portid;
	u32 seq = info->snd_seq;
	u32 flags = 0;

	hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
			  VDPA_CMD_DEV_VSTATS_GET);
	if (!hdr)
		return -EMSGSIZE;

	if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
		err = -EMSGSIZE;
		goto undo_msg;
	}

	device_id = vdev->config->get_device_id(vdev);
	if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
		err = -EMSGSIZE;
		goto undo_msg;
	}

	switch (device_id) {
	case VIRTIO_ID_NET:
		if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
1090
			NL_SET_ERR_MSG_MOD(info->extack, "queue index exceeds max value");
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
			err = -ERANGE;
			break;
		}

		err = vendor_stats_fill(vdev, msg, info, index);
		break;
	default:
		err = -EOPNOTSUPP;
		break;
	}
	genlmsg_end(msg, hdr);

	return err;

undo_msg:
	genlmsg_cancel(msg, hdr);
	return err;
}

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
{
	struct vdpa_device *vdev;
	struct sk_buff *msg;
	const char *devname;
	struct device *dev;
	int err;

	if (!info->attrs[VDPA_ATTR_DEV_NAME])
		return -EINVAL;
	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (!msg)
		return -ENOMEM;

1125
	down_read(&vdpa_dev_lock);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
	if (!dev) {
		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
		err = -ENODEV;
		goto dev_err;
	}
	vdev = container_of(dev, struct vdpa_device, dev);
	if (!vdev->mdev) {
		NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
		err = -EINVAL;
		goto mdev_err;
	}
	err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
				   0, info->extack);
	if (!err)
		err = genlmsg_reply(msg, info);

mdev_err:
	put_device(dev);
dev_err:
1146
	up_read(&vdpa_dev_lock);
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
	if (err)
		nlmsg_free(msg);
	return err;
}

static int vdpa_dev_config_dump(struct device *dev, void *data)
{
	struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
	struct vdpa_dev_dump_info *info = data;
	int err;

	if (!vdev->mdev)
		return 0;
	if (info->idx < info->start_idx) {
		info->idx++;
		return 0;
	}
	err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
				   info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
				   info->cb->extack);
	if (err)
		return err;

	info->idx++;
	return 0;
}

static int
vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
{
	struct vdpa_dev_dump_info info;

	info.msg = msg;
	info.cb = cb;
	info.start_idx = cb->args[0];
	info.idx = 0;

1184
	down_read(&vdpa_dev_lock);
1185
	bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
1186
	up_read(&vdpa_dev_lock);
1187 1188 1189 1190
	cb->args[0] = info.idx;
	return msg->len;
}

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
					  struct genl_info *info)
{
	struct vdpa_device *vdev;
	struct sk_buff *msg;
	const char *devname;
	struct device *dev;
	u32 index;
	int err;

	if (!info->attrs[VDPA_ATTR_DEV_NAME])
		return -EINVAL;

	if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
		return -EINVAL;

	devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (!msg)
		return -ENOMEM;

	index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
1213
	down_read(&vdpa_dev_lock);
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
	dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
	if (!dev) {
		NL_SET_ERR_MSG_MOD(info->extack, "device not found");
		err = -ENODEV;
		goto dev_err;
	}
	vdev = container_of(dev, struct vdpa_device, dev);
	if (!vdev->mdev) {
		NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
		err = -EINVAL;
		goto mdev_err;
	}
	err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
	if (err)
		goto mdev_err;

	err = genlmsg_reply(msg, info);

	put_device(dev);
1233
	up_read(&vdpa_dev_lock);
1234 1235 1236 1237 1238 1239 1240

	return err;

mdev_err:
	put_device(dev);
dev_err:
	nlmsg_free(msg);
1241
	up_read(&vdpa_dev_lock);
1242 1243 1244
	return err;
}

1245 1246 1247
static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
	[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
	[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
1248
	[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
1249
	[VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
1250
	[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
1251 1252
	/* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
	[VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
1253
	[VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
1254
	[VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
1255 1256 1257 1258 1259 1260 1261 1262
};

static const struct genl_ops vdpa_nl_ops[] = {
	{
		.cmd = VDPA_CMD_MGMTDEV_GET,
		.doit = vdpa_nl_cmd_mgmtdev_get_doit,
		.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
	},
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	{
		.cmd = VDPA_CMD_DEV_NEW,
		.doit = vdpa_nl_cmd_dev_add_set_doit,
		.flags = GENL_ADMIN_PERM,
	},
	{
		.cmd = VDPA_CMD_DEV_DEL,
		.doit = vdpa_nl_cmd_dev_del_set_doit,
		.flags = GENL_ADMIN_PERM,
	},
1273 1274 1275 1276 1277
	{
		.cmd = VDPA_CMD_DEV_GET,
		.doit = vdpa_nl_cmd_dev_get_doit,
		.dumpit = vdpa_nl_cmd_dev_get_dumpit,
	},
1278 1279 1280 1281 1282
	{
		.cmd = VDPA_CMD_DEV_CONFIG_GET,
		.doit = vdpa_nl_cmd_dev_config_get_doit,
		.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
	},
1283 1284 1285 1286 1287
	{
		.cmd = VDPA_CMD_DEV_VSTATS_GET,
		.doit = vdpa_nl_cmd_dev_stats_get_doit,
		.flags = GENL_ADMIN_PERM,
	},
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
};

static struct genl_family vdpa_nl_family __ro_after_init = {
	.name = VDPA_GENL_NAME,
	.version = VDPA_GENL_VERSION,
	.maxattr = VDPA_ATTR_MAX,
	.policy = vdpa_nl_policy,
	.netnsok = false,
	.module = THIS_MODULE,
	.ops = vdpa_nl_ops,
	.n_ops = ARRAY_SIZE(vdpa_nl_ops),
1299
	.resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
1300 1301
};

Jason Wang's avatar
Jason Wang committed
1302 1303
static int vdpa_init(void)
{
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
	int err;

	err = bus_register(&vdpa_bus);
	if (err)
		return err;
	err = genl_register_family(&vdpa_nl_family);
	if (err)
		goto err;
	return 0;

err:
	bus_unregister(&vdpa_bus);
	return err;
Jason Wang's avatar
Jason Wang committed
1317 1318 1319 1320
}

static void __exit vdpa_exit(void)
{
1321
	genl_unregister_family(&vdpa_nl_family);
Jason Wang's avatar
Jason Wang committed
1322 1323 1324 1325 1326 1327 1328 1329
	bus_unregister(&vdpa_bus);
	ida_destroy(&vdpa_index_ida);
}
core_initcall(vdpa_init);
module_exit(vdpa_exit);

MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
MODULE_LICENSE("GPL v2");