Commit ed3c5a0b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:
 "virtio, vhost: new device, fixes, speedups

  This includes the new virtio crypto device, and fixes all over the
  place. In particular enabling endian-ness checks for sparse builds
  found some bugs which this fixes. And it appears that everyone is in
  agreement that disabling endian-ness sparse checks shouldn't be
  necessary any longer.

  So this enables them for everyone, and drops the __CHECK_ENDIAN__ and
  __bitwise__ APIs.

  IRQ handling in virtio has been refactored somewhat, the larger switch
  to IRQ_SHARED will have to wait as it proved too aggressive"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (34 commits)
  Makefile: drop -D__CHECK_ENDIAN__ from cflags
  fs/logfs: drop __CHECK_ENDIAN__
  Documentation/sparse: drop __CHECK_ENDIAN__
  linux: drop __bitwise__ everywhere
  checkpatch: replace __bitwise__ with __bitwise
  Documentation/sparse: drop __bitwise__
  tools: enable endian checks for all sparse builds
  linux/types.h: enable endian checks for all sparse builds
  virtio_mmio: Set dev.release() to avoid warning
  vhost: remove unused feature bit
  virtio_ring: fix description of virtqueue_get_buf
  vhost/scsi: Remove unused but set variable
  tools/virtio: use {READ,WRITE}_ONCE() in uaccess.h
  vringh: kill off ACCESS_ONCE()
  tools/virtio: fix READ_ONCE()
  crypto: add virtio-crypto driver
  vhost: cache used event for better performance
  vsock: lookup and setup guest_cid inside vhost_vsock_lock
  virtio_pci: split vp_try_to_find_vqs into INTx and MSI-X variants
  virtio_pci: merge vp_free_vectors into vp_del_vqs
  ...
parents 66d46672 6bdf1e0e
...@@ -51,13 +51,6 @@ sure that bitwise types don't get mixed up (little-endian vs big-endian ...@@ -51,13 +51,6 @@ sure that bitwise types don't get mixed up (little-endian vs big-endian
vs cpu-endian vs whatever), and there the constant "0" really _is_ vs cpu-endian vs whatever), and there the constant "0" really _is_
special. special.
__bitwise__ - to be used for relatively compact stuff (gfp_t, etc.) that
is mostly warning-free and is supposed to stay that way. Warnings will
be generated without __CHECK_ENDIAN__.
__bitwise - noisy stuff; in particular, __le*/__be* are that. We really
don't want to drown in noise unless we'd explicitly asked for it.
Using sparse for lock checking Using sparse for lock checking
------------------------------ ------------------------------
...@@ -109,9 +102,4 @@ be recompiled or not. The latter is a fast way to check the whole tree if you ...@@ -109,9 +102,4 @@ be recompiled or not. The latter is a fast way to check the whole tree if you
have already built it. have already built it.
The optional make variable CF can be used to pass arguments to sparse. The The optional make variable CF can be used to pass arguments to sparse. The
build system passes -Wbitwise to sparse automatically. To perform endianness build system passes -Wbitwise to sparse automatically.
checks, you may define __CHECK_ENDIAN__::
make C=2 CF="-D__CHECK_ENDIAN__"
These checks are disabled by default as they generate a host of warnings.
...@@ -92,9 +92,4 @@ DaveJ 把每小时自动生成的 git 源码树 tar 包放在以下地址: ...@@ -92,9 +92,4 @@ DaveJ 把每小时自动生成的 git 源码树 tar 包放在以下地址:
如果你已经编译了内核,用后一种方式可以很快地检查整个源码树。 如果你已经编译了内核,用后一种方式可以很快地检查整个源码树。
make 的可选变量 CHECKFLAGS 可以用来向 sparse 工具传递参数。编译系统会自 make 的可选变量 CHECKFLAGS 可以用来向 sparse 工具传递参数。编译系统会自
动向 sparse 工具传递 -Wbitwise 参数。你可以定义 __CHECK_ENDIAN__ 来进行 动向 sparse 工具传递 -Wbitwise 参数。
大小尾检查。
make C=2 CHECKFLAGS="-D__CHECK_ENDIAN__"
这些检查默认都是被关闭的,因为他们通常会产生大量的警告。
...@@ -13013,6 +13013,7 @@ F: drivers/net/virtio_net.c ...@@ -13013,6 +13013,7 @@ F: drivers/net/virtio_net.c
F: drivers/block/virtio_blk.c F: drivers/block/virtio_blk.c
F: include/linux/virtio_*.h F: include/linux/virtio_*.h
F: include/uapi/linux/virtio_*.h F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
VIRTIO DRIVERS FOR S390 VIRTIO DRIVERS FOR S390
M: Christian Borntraeger <borntraeger@de.ibm.com> M: Christian Borntraeger <borntraeger@de.ibm.com>
...@@ -13049,6 +13050,14 @@ S: Maintained ...@@ -13049,6 +13050,14 @@ S: Maintained
F: drivers/virtio/virtio_input.c F: drivers/virtio/virtio_input.c
F: include/uapi/linux/virtio_input.h F: include/uapi/linux/virtio_input.h
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
L: virtualization@lists.linux-foundation.org
L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/virtio/
F: include/uapi/linux/virtio_crypto.h
VIA RHINE NETWORK DRIVER VIA RHINE NETWORK DRIVER
S: Orphan S: Orphan
F: drivers/net/ethernet/via/via-rhine.c F: drivers/net/ethernet/via/via-rhine.c
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <linux/types.h> #include <linux/types.h>
typedef unsigned int __bitwise__ samsung_gpio_pull_t; typedef unsigned int __bitwise samsung_gpio_pull_t;
/* forward declaration if gpio-core.h hasn't been included */ /* forward declaration if gpio-core.h hasn't been included */
struct samsung_gpio_chip; struct samsung_gpio_chip;
......
...@@ -40,5 +40,3 @@ hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o ...@@ -40,5 +40,3 @@ hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
hci_uart-objs := $(hci_uart-y) hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
...@@ -152,8 +152,8 @@ struct ports_device { ...@@ -152,8 +152,8 @@ struct ports_device {
spinlock_t c_ivq_lock; spinlock_t c_ivq_lock;
spinlock_t c_ovq_lock; spinlock_t c_ovq_lock;
/* The current config space is stored here */ /* max. number of ports this device can hold */
struct virtio_console_config config; u32 max_nr_ports;
/* The virtio device we're associated with */ /* The virtio device we're associated with */
struct virtio_device *vdev; struct virtio_device *vdev;
...@@ -1649,11 +1649,11 @@ static void handle_control_message(struct virtio_device *vdev, ...@@ -1649,11 +1649,11 @@ static void handle_control_message(struct virtio_device *vdev,
break; break;
} }
if (virtio32_to_cpu(vdev, cpkt->id) >= if (virtio32_to_cpu(vdev, cpkt->id) >=
portdev->config.max_nr_ports) { portdev->max_nr_ports) {
dev_warn(&portdev->vdev->dev, dev_warn(&portdev->vdev->dev,
"Request for adding port with " "Request for adding port with "
"out-of-bound id %u, max. supported id: %u\n", "out-of-bound id %u, max. supported id: %u\n",
cpkt->id, portdev->config.max_nr_ports - 1); cpkt->id, portdev->max_nr_ports - 1);
break; break;
} }
add_port(portdev, virtio32_to_cpu(vdev, cpkt->id)); add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
...@@ -1894,7 +1894,7 @@ static int init_vqs(struct ports_device *portdev) ...@@ -1894,7 +1894,7 @@ static int init_vqs(struct ports_device *portdev)
u32 i, j, nr_ports, nr_queues; u32 i, j, nr_ports, nr_queues;
int err; int err;
nr_ports = portdev->config.max_nr_ports; nr_ports = portdev->max_nr_ports;
nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
...@@ -2047,13 +2047,13 @@ static int virtcons_probe(struct virtio_device *vdev) ...@@ -2047,13 +2047,13 @@ static int virtcons_probe(struct virtio_device *vdev)
} }
multiport = false; multiport = false;
portdev->config.max_nr_ports = 1; portdev->max_nr_ports = 1;
/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */ /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
if (!is_rproc_serial(vdev) && if (!is_rproc_serial(vdev) &&
virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT, virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
struct virtio_console_config, max_nr_ports, struct virtio_console_config, max_nr_ports,
&portdev->config.max_nr_ports) == 0) { &portdev->max_nr_ports) == 0) {
multiport = true; multiport = true;
} }
......
...@@ -555,4 +555,6 @@ config CRYPTO_DEV_ROCKCHIP ...@@ -555,4 +555,6 @@ config CRYPTO_DEV_ROCKCHIP
source "drivers/crypto/chelsio/Kconfig" source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"
endif # CRYPTO_HW endif # CRYPTO_HW
...@@ -32,3 +32,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ ...@@ -32,3 +32,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver"
depends on VIRTIO
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
default m
help
This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto.
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
virtio_crypto-objs := \
virtio_crypto_algs.o \
virtio_crypto_mgr.o \
virtio_crypto_core.o
/* Algorithms supported by virtio crypto device
*
* Authors: Gonglei <arei.gonglei@huawei.com>
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
#include <linux/err.h>
#include <crypto/scatterwalk.h>
#include <linux/atomic.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
static unsigned int virtio_crypto_active_devs;
static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
{
u64 total = 0;
for (total = 0; sg; sg = sg_next(sg))
total += sg->length;
return total;
}
static int
virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
{
switch (key_len) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_192:
case AES_KEYSIZE_256:
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
break;
default:
pr_err("virtio_crypto: Unsupported key length: %d\n",
key_len);
return -EINVAL;
}
return 0;
}
static int virtio_crypto_alg_ablkcipher_init_session(
struct virtio_crypto_ablkcipher_ctx *ctx,
uint32_t alg, const uint8_t *key,
unsigned int keylen,
int encrypt)
{
struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
unsigned int tmp;
struct virtio_crypto *vcrypto = ctx->vcrypto;
int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
int err;
unsigned int num_out = 0, num_in = 0;
/*
* Avoid to do DMA from the stack, switch to using
* dynamically-allocated for the key
*/
uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
if (!cipher_key)
return -ENOMEM;
memcpy(cipher_key, key, keylen);
spin_lock(&vcrypto->ctrl_lock);
/* Pad ctrl header */
vcrypto->ctrl.header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
vcrypto->ctrl.header.algo = cpu_to_le32(alg);
/* Set the default dataqueue id to 0 */
vcrypto->ctrl.header.queue_id = 0;
vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
/* Pad cipher's parameters */
vcrypto->ctrl.u.sym_create_session.op_type =
cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
vcrypto->ctrl.header.algo;
vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
cpu_to_le32(keylen);
vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
cpu_to_le32(op);
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
sgs[num_out++] = &outhdr;
/* Set key */
sg_init_one(&key_sg, cipher_key, keylen);
sgs[num_out++] = &key_sg;
/* Return status and session id back */
sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
sgs[num_out + num_in++] = &inhdr;
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
num_in, vcrypto, GFP_ATOMIC);
if (err < 0) {
spin_unlock(&vcrypto->ctrl_lock);
kzfree(cipher_key);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
/*
* Trapping into the hypervisor, so the request should be
* handled immediately.
*/
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
!virtqueue_is_broken(vcrypto->ctrl_vq))
cpu_relax();
if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
spin_unlock(&vcrypto->ctrl_lock);
pr_err("virtio_crypto: Create session failed status: %u\n",
le32_to_cpu(vcrypto->input.status));
kzfree(cipher_key);
return -EINVAL;
}
if (encrypt)
ctx->enc_sess_info.session_id =
le64_to_cpu(vcrypto->input.session_id);
else
ctx->dec_sess_info.session_id =
le64_to_cpu(vcrypto->input.session_id);
spin_unlock(&vcrypto->ctrl_lock);
kzfree(cipher_key);
return 0;
}
static int virtio_crypto_alg_ablkcipher_close_session(
struct virtio_crypto_ablkcipher_ctx *ctx,
int encrypt)
{
struct scatterlist outhdr, status_sg, *sgs[2];
unsigned int tmp;
struct virtio_crypto_destroy_session_req *destroy_session;
struct virtio_crypto *vcrypto = ctx->vcrypto;
int err;
unsigned int num_out = 0, num_in = 0;
spin_lock(&vcrypto->ctrl_lock);
vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
/* Pad ctrl header */
vcrypto->ctrl.header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
/* Set the default virtqueue id to 0 */
vcrypto->ctrl.header.queue_id = 0;
destroy_session = &vcrypto->ctrl.u.destroy_session;
if (encrypt)
destroy_session->session_id =
cpu_to_le64(ctx->enc_sess_info.session_id);
else
destroy_session->session_id =
cpu_to_le64(ctx->dec_sess_info.session_id);
sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
sgs[num_out++] = &outhdr;
/* Return status and session id back */
sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
sizeof(vcrypto->ctrl_status.status));
sgs[num_out + num_in++] = &status_sg;
err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
num_in, vcrypto, GFP_ATOMIC);
if (err < 0) {
spin_unlock(&vcrypto->ctrl_lock);
return err;
}
virtqueue_kick(vcrypto->ctrl_vq);
while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
!virtqueue_is_broken(vcrypto->ctrl_vq))
cpu_relax();
if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
spin_unlock(&vcrypto->ctrl_lock);
pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
vcrypto->ctrl_status.status,
destroy_session->session_id);
return -EINVAL;
}
spin_unlock(&vcrypto->ctrl_lock);
return 0;
}
static int virtio_crypto_alg_ablkcipher_init_sessions(
struct virtio_crypto_ablkcipher_ctx *ctx,
const uint8_t *key, unsigned int keylen)
{
uint32_t alg;
int ret;
struct virtio_crypto *vcrypto = ctx->vcrypto;
if (keylen > vcrypto->max_cipher_key_len) {
pr_err("virtio_crypto: the key is too long\n");
goto bad_key;
}
if (virtio_crypto_alg_validate_key(keylen, &alg))
goto bad_key;
/* Create encryption session */
ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
alg, key, keylen, 1);
if (ret)
return ret;
/* Create decryption session */
ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
alg, key, keylen, 0);
if (ret) {
virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
return ret;
}
return 0;
bad_key:
crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/* Note: kernel crypto API realization */
static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const uint8_t *key,
unsigned int keylen)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
int ret;
if (!ctx->vcrypto) {
/* New key */
int node = virtio_crypto_get_current_node();
struct virtio_crypto *vcrypto =
virtcrypto_get_dev_node(node);
if (!vcrypto) {
pr_err("virtio_crypto: Could not find a virtio device in the system");
return -ENODEV;
}
ctx->vcrypto = vcrypto;
} else {
/* Rekeying, we should close the created sessions previously */
virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
}
ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
if (ret) {
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
return ret;
}
return 0;
}
static int
__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
struct ablkcipher_request *req,
struct data_queue *data_vq,
__u8 op)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
int err;
unsigned long flags;
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
int i;
u64 dst_len;
unsigned int num_out = 0, num_in = 0;
int sg_total;
uint8_t *iv;
src_nents = sg_nents_for_len(req->src, req->nbytes);
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
src_nents, dst_nents);
/* Why 3? outhdr + iv + inhdr */
sg_total = src_nents + dst_nents + 3;
sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
dev_to_node(&vcrypto->vdev->dev));
if (!sgs)
return -ENOMEM;
req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
dev_to_node(&vcrypto->vdev->dev));
if (!req_data) {
kfree(sgs);
return -ENOMEM;
}
vc_req->req_data = req_data;
vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
/* Head of operation */
if (op) {
req_data->header.session_id =
cpu_to_le64(ctx->enc_sess_info.session_id);
req_data->header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
} else {
req_data->header.session_id =
cpu_to_le64(ctx->dec_sess_info.session_id);
req_data->header.opcode =
cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
}
req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
req_data->u.sym_req.u.cipher.para.src_data_len =
cpu_to_le32(req->nbytes);
dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
if (unlikely(dst_len > U32_MAX)) {
pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
err = -EINVAL;
goto free;
}
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
req->nbytes, dst_len);
if (unlikely(req->nbytes + dst_len + ivsize +
sizeof(vc_req->status) > vcrypto->max_size)) {
pr_err("virtio_crypto: The length is too big\n");
err = -EINVAL;
goto free;
}
req_data->u.sym_req.u.cipher.para.dst_data_len =
cpu_to_le32((uint32_t)dst_len);
/* Outhdr */
sg_init_one(&outhdr, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr;
/* IV */
/*
* Avoid to do DMA from the stack, switch to using
* dynamically-allocated for the IV
*/
iv = kzalloc_node(ivsize, GFP_ATOMIC,
dev_to_node(&vcrypto->vdev->dev));
if (!iv) {
err = -ENOMEM;
goto free;
}
memcpy(iv, req->info, ivsize);
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
vc_req->iv = iv;
/* Source data */
for (i = 0; i < src_nents; i++)
sgs[num_out++] = &req->src[i];
/* Destination data */
for (i = 0; i < dst_nents; i++)
sgs[num_out + num_in++] = &req->dst[i];
/* Status */
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
sgs[num_out + num_in++] = &status_sg;
vc_req->sgs = sgs;
spin_lock_irqsave(&data_vq->lock, flags);
err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
num_in, vc_req, GFP_ATOMIC);
virtqueue_kick(data_vq->vq);
spin_unlock_irqrestore(&data_vq->lock, flags);
if (unlikely(err < 0))
goto free_iv;
return 0;
free_iv:
kzfree(iv);
free:
kzfree(req_data);
kfree(sgs);
return err;
}
static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
struct virtio_crypto *vcrypto = ctx->vcrypto;
int ret;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
vc_req->ablkcipher_ctx = ctx;
vc_req->ablkcipher_req = req;
ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1);
if (ret < 0) {
pr_err("virtio_crypto: Encryption failed!\n");
return ret;
}
return -EINPROGRESS;
}
static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
struct virtio_crypto *vcrypto = ctx->vcrypto;
int ret;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
vc_req->ablkcipher_ctx = ctx;
vc_req->ablkcipher_req = req;
ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 0);
if (ret < 0) {
pr_err("virtio_crypto: Decryption failed!\n");
return ret;
}
return -EINPROGRESS;
}
static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
ctx->tfm = tfm;
return 0;
}
static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
if (!ctx->vcrypto)
return;
virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
virtcrypto_dev_put(ctx->vcrypto);
ctx->vcrypto = NULL;
}
static struct crypto_alg virtio_crypto_algs[] = { {
.cra_name = "cbc(aes)",
.cra_driver_name = "virtio_crypto_aes_cbc",
.cra_priority = 501,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_type = &crypto_ablkcipher_type,
.cra_init = virtio_crypto_ablkcipher_init,
.cra_exit = virtio_crypto_ablkcipher_exit,
.cra_u = {
.ablkcipher = {
.setkey = virtio_crypto_ablkcipher_setkey,
.decrypt = virtio_crypto_ablkcipher_decrypt,
.encrypt = virtio_crypto_ablkcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
},
} };
int virtio_crypto_algs_register(void)
{
int ret = 0;
mutex_lock(&algs_lock);
if (++virtio_crypto_active_devs != 1)
goto unlock;
ret = crypto_register_algs(virtio_crypto_algs,
ARRAY_SIZE(virtio_crypto_algs));
if (ret)
virtio_crypto_active_devs--;
unlock:
mutex_unlock(&algs_lock);
return ret;
}
void virtio_crypto_algs_unregister(void)
{
mutex_lock(&algs_lock);
if (--virtio_crypto_active_devs != 0)
goto unlock;
crypto_unregister_algs(virtio_crypto_algs,
ARRAY_SIZE(virtio_crypto_algs));
unlock:
mutex_unlock(&algs_lock);
}
/* Common header for Virtio crypto device.
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _VIRTIO_CRYPTO_COMMON_H
#define _VIRTIO_CRYPTO_COMMON_H
#include <linux/virtio.h>
#include <linux/crypto.h>
#include <linux/spinlock.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
/* Internal representation of a data virtqueue */
struct data_queue {
/* Virtqueue associated with this send _queue */
struct virtqueue *vq;
/* To protect the vq operations for the dataq */
spinlock_t lock;
/* Name of the tx queue: dataq.$index */
char name[32];
};
struct virtio_crypto {
struct virtio_device *vdev;
struct virtqueue *ctrl_vq;
struct data_queue *data_vq;
/* To protect the vq operations for the controlq */
spinlock_t ctrl_lock;
/* Maximum of data queues supported by the device */
u32 max_data_queues;
/* Number of queue currently used by the driver */
u32 curr_queue;
/* Maximum length of cipher key */
u32 max_cipher_key_len;
/* Maximum length of authenticated key */
u32 max_auth_key_len;
/* Maximum size of per request */
u64 max_size;
/* Control VQ buffers: protected by the ctrl_lock */
struct virtio_crypto_op_ctrl_req ctrl;
struct virtio_crypto_session_input input;
struct virtio_crypto_inhdr ctrl_status;
unsigned long status;
atomic_t ref_count;
struct list_head list;
struct module *owner;
uint8_t dev_id;
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
};
struct virtio_crypto_sym_session_info {
/* Backend session id, which come from the host side */
__u64 session_id;
};
struct virtio_crypto_ablkcipher_ctx {
struct virtio_crypto *vcrypto;
struct crypto_tfm *tfm;
struct virtio_crypto_sym_session_info enc_sess_info;
struct virtio_crypto_sym_session_info dec_sess_info;
};
struct virtio_crypto_request {
/* Cipher or aead */
uint32_t type;
uint8_t status;
struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
struct ablkcipher_request *ablkcipher_req;
struct virtio_crypto_op_data_req *req_data;
struct scatterlist **sgs;
uint8_t *iv;
};
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
struct list_head *virtcrypto_devmgr_get_head(void);
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
struct virtio_crypto *virtcrypto_devmgr_get_first(void);
int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
struct virtio_crypto *virtcrypto_get_dev_node(int node);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
static inline int virtio_crypto_get_current_node(void)
{
int cpu, node;
cpu = get_cpu();
node = topology_physical_package_id(cpu);
put_cpu();
return node;
}
int virtio_crypto_algs_register(void);
void virtio_crypto_algs_unregister(void);
#endif /* _VIRTIO_CRYPTO_COMMON_H */
/* Driver for Virtio crypto device.
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/virtio_config.h>
#include <linux/cpu.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
static void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
{
if (vc_req) {
kzfree(vc_req->iv);
kzfree(vc_req->req_data);
kfree(vc_req->sgs);
}
}
static void virtcrypto_dataq_callback(struct virtqueue *vq)
{
struct virtio_crypto *vcrypto = vq->vdev->priv;
struct virtio_crypto_request *vc_req;
unsigned long flags;
unsigned int len;
struct ablkcipher_request *ablk_req;
int error;
unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
switch (vc_req->status) {
case VIRTIO_CRYPTO_OK:
error = 0;
break;
case VIRTIO_CRYPTO_INVSESS:
case VIRTIO_CRYPTO_ERR:
error = -EINVAL;
break;
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
default:
error = -EIO;
break;
}
ablk_req = vc_req->ablkcipher_req;
virtcrypto_clear_request(vc_req);
spin_unlock_irqrestore(
&vcrypto->data_vq[qid].lock, flags);
/* Finish the encrypt or decrypt process */
ablk_req->base.complete(&ablk_req->base, error);
spin_lock_irqsave(
&vcrypto->data_vq[qid].lock, flags);
}
}
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
}
static int virtcrypto_find_vqs(struct virtio_crypto *vi)
{
vq_callback_t **callbacks;
struct virtqueue **vqs;
int ret = -ENOMEM;
int i, total_vqs;
const char **names;
/*
* We expect 1 data virtqueue, followed by
* possible N-1 data queues used in multiqueue mode,
* followed by control vq.
*/
total_vqs = vi->max_data_queues + 1;
/* Allocate space for find_vqs parameters */
vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vq;
callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
if (!callbacks)
goto err_callback;
names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
if (!names)
goto err_names;
/* Parameters for control virtqueue */
callbacks[total_vqs - 1] = NULL;
names[total_vqs - 1] = "controlq";
/* Allocate/initialize parameters for data virtqueues */
for (i = 0; i < vi->max_data_queues; i++) {
callbacks[i] = virtcrypto_dataq_callback;
snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
"dataq.%d", i);
names[i] = vi->data_vq[i].name;
}
ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
names);
if (ret)
goto err_find;
vi->ctrl_vq = vqs[total_vqs - 1];
for (i = 0; i < vi->max_data_queues; i++) {
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
}
kfree(names);
kfree(callbacks);
kfree(vqs);
return 0;
err_find:
kfree(names);
err_names:
kfree(callbacks);
err_callback:
kfree(vqs);
err_vq:
return ret;
}
static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
{
vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
GFP_KERNEL);
if (!vi->data_vq)
return -ENOMEM;
return 0;
}
static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
{
int i;
if (vi->affinity_hint_set) {
for (i = 0; i < vi->max_data_queues; i++)
virtqueue_set_affinity(vi->data_vq[i].vq, -1);
vi->affinity_hint_set = false;
}
}
static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
{
int i = 0;
int cpu;
/*
* In single queue mode, we don't set the cpu affinity.
*/
if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
virtcrypto_clean_affinity(vcrypto, -1);
return;
}
/*
* In multiqueue mode, we let the queue to be private to one cpu
* by setting the affinity hint to eliminate the contention.
*
* TODO: adds cpu hotplug support by register cpu notifier.
*
*/
for_each_online_cpu(cpu) {
virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
if (++i >= vcrypto->max_data_queues)
break;
}
vcrypto->affinity_hint_set = true;
}
static void virtcrypto_free_queues(struct virtio_crypto *vi)
{
kfree(vi->data_vq);
}
static int virtcrypto_init_vqs(struct virtio_crypto *vi)
{
int ret;
/* Allocate send & receive queues */
ret = virtcrypto_alloc_queues(vi);
if (ret)
goto err;
ret = virtcrypto_find_vqs(vi);
if (ret)
goto err_free;
get_online_cpus();
virtcrypto_set_affinity(vi);
put_online_cpus();
return 0;
err_free:
virtcrypto_free_queues(vi);
err:
return ret;
}
static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
{
u32 status;
int err;
virtio_cread(vcrypto->vdev,
struct virtio_crypto_config, status, &status);
/*
* Unknown status bits would be a host error and the driver
* should consider the device to be broken.
*/
if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
dev_warn(&vcrypto->vdev->dev,
"Unknown status bits: 0x%x\n", status);
virtio_break_device(vcrypto->vdev);
return -EPERM;
}
if (vcrypto->status == status)
return 0;
vcrypto->status = status;
if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
err = virtcrypto_dev_start(vcrypto);
if (err) {
dev_err(&vcrypto->vdev->dev,
"Failed to start virtio crypto device.\n");
return -EPERM;
}
dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
} else {
virtcrypto_dev_stop(vcrypto);
dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
}
return 0;
}
static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
{
struct virtio_device *vdev = vcrypto->vdev;
virtcrypto_clean_affinity(vcrypto, -1);
vdev->config->del_vqs(vdev);
virtcrypto_free_queues(vcrypto);
}
static int virtcrypto_probe(struct virtio_device *vdev)
{
int err = -EFAULT;
struct virtio_crypto *vcrypto;
u32 max_data_queues = 0, max_cipher_key_len = 0;
u32 max_auth_key_len = 0;
u64 max_size = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
/*
* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow.
*/
dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
dev_to_node(&vdev->dev));
if (!vcrypto)
return -ENOMEM;
virtio_cread(vdev, struct virtio_crypto_config,
max_dataqueues, &max_data_queues);
if (max_data_queues < 1)
max_data_queues = 1;
virtio_cread(vdev, struct virtio_crypto_config,
max_cipher_key_len, &max_cipher_key_len);
virtio_cread(vdev, struct virtio_crypto_config,
max_auth_key_len, &max_auth_key_len);
virtio_cread(vdev, struct virtio_crypto_config,
max_size, &max_size);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
if (err) {
dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
goto free;
}
vcrypto->owner = THIS_MODULE;
vcrypto = vdev->priv = vcrypto;
vcrypto->vdev = vdev;
spin_lock_init(&vcrypto->ctrl_lock);
/* Use single data queue as default */
vcrypto->curr_queue = 1;
vcrypto->max_data_queues = max_data_queues;
vcrypto->max_cipher_key_len = max_cipher_key_len;
vcrypto->max_auth_key_len = max_auth_key_len;
vcrypto->max_size = max_size;
dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
vcrypto->max_data_queues,
vcrypto->max_cipher_key_len,
vcrypto->max_auth_key_len,
vcrypto->max_size);
err = virtcrypto_init_vqs(vcrypto);
if (err) {
dev_err(&vdev->dev, "Failed to initialize vqs.\n");
goto free_dev;
}
virtio_device_ready(vdev);
err = virtcrypto_update_status(vcrypto);
if (err)
goto free_vqs;
return 0;
free_vqs:
vcrypto->vdev->config->reset(vdev);
virtcrypto_del_vqs(vcrypto);
free_dev:
virtcrypto_devmgr_rm_dev(vcrypto);
free:
kfree(vcrypto);
return err;
}
static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
{
struct virtio_crypto_request *vc_req;
int i;
struct virtqueue *vq;
for (i = 0; i < vcrypto->max_data_queues; i++) {
vq = vcrypto->data_vq[i].vq;
while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
kfree(vc_req->req_data);
kfree(vc_req->sgs);
}
}
}
static void virtcrypto_remove(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
vdev->config->reset(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_del_vqs(vcrypto);
virtcrypto_devmgr_rm_dev(vcrypto);
kfree(vcrypto);
}
static void virtcrypto_config_changed(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
virtcrypto_update_status(vcrypto);
}
#ifdef CONFIG_PM_SLEEP
static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
vdev->config->reset(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
virtcrypto_del_vqs(vcrypto);
return 0;
}
static int virtcrypto_restore(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
int err;
err = virtcrypto_init_vqs(vcrypto);
if (err)
return err;
virtio_device_ready(vdev);
err = virtcrypto_dev_start(vcrypto);
if (err) {
dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
return -EFAULT;
}
return 0;
}
#endif
static unsigned int features[] = {
/* none */
};
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static struct virtio_driver virtio_crypto_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,
.probe = virtcrypto_probe,
.remove = virtcrypto_remove,
.config_changed = virtcrypto_config_changed,
#ifdef CONFIG_PM_SLEEP
.freeze = virtcrypto_freeze,
.restore = virtcrypto_restore,
#endif
};
module_virtio_driver(virtio_crypto_driver);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("virtio crypto device driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
/* Management for virtio crypto devices (refer to adf_dev_mgr.c)
*
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/module.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
static LIST_HEAD(virtio_crypto_table);
static uint32_t num_devices;
/* The table_lock protects the above global list and num_devices */
static DEFINE_MUTEX(table_lock);
#define VIRTIO_CRYPTO_MAX_DEVICES 32
/*
* virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
* framework.
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Function adds virtio crypto device to the global list.
* To be used by virtio crypto device specific drivers.
*
* Return: 0 on success, error code othewise.
*/
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
{
struct list_head *itr;
mutex_lock(&table_lock);
if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
pr_info("virtio_crypto: only support up to %d devices\n",
VIRTIO_CRYPTO_MAX_DEVICES);
mutex_unlock(&table_lock);
return -EFAULT;
}
list_for_each(itr, &virtio_crypto_table) {
struct virtio_crypto *ptr =
list_entry(itr, struct virtio_crypto, list);
if (ptr == vcrypto_dev) {
mutex_unlock(&table_lock);
return -EEXIST;
}
}
atomic_set(&vcrypto_dev->ref_count, 0);
list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
vcrypto_dev->dev_id = num_devices++;
mutex_unlock(&table_lock);
return 0;
}
struct list_head *virtcrypto_devmgr_get_head(void)
{
return &virtio_crypto_table;
}
/*
* virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
* framework.
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Function removes virtio crypto device from the acceleration framework.
* To be used by virtio crypto device specific drivers.
*
* Return: void
*/
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
{
mutex_lock(&table_lock);
list_del(&vcrypto_dev->list);
num_devices--;
mutex_unlock(&table_lock);
}
/*
* virtcrypto_devmgr_get_first()
*
* Function returns the first virtio crypto device from the acceleration
* framework.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
struct virtio_crypto *virtcrypto_devmgr_get_first(void)
{
struct virtio_crypto *dev = NULL;
mutex_lock(&table_lock);
if (!list_empty(&virtio_crypto_table))
dev = list_first_entry(&virtio_crypto_table,
struct virtio_crypto,
list);
mutex_unlock(&table_lock);
return dev;
}
/*
* virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
* @vcrypto_dev: Pointer to virtio crypto device.
*
* To be used by virtio crypto device specific drivers.
*
* Return: 1 when device is in use, 0 otherwise.
*/
int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
{
return atomic_read(&vcrypto_dev->ref_count) != 0;
}
/*
* virtcrypto_dev_get() - Increment vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Increment the vcrypto_dev refcount and if this is the first time
* incrementing it during this period the vcrypto_dev is in use,
* increment the module refcount too.
* To be used by virtio crypto device specific drivers.
*
* Return: 0 when successful, EFAULT when fail to bump module refcount
*/
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
{
if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
if (!try_module_get(vcrypto_dev->owner))
return -EFAULT;
return 0;
}
/*
* virtcrypto_dev_put() - Decrement vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*
* Decrement the vcrypto_dev refcount and if this is the last time
* decrementing it during this period the vcrypto_dev is in use,
* decrement the module refcount too.
* To be used by virtio crypto device specific drivers.
*
* Return: void
*/
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
{
if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
module_put(vcrypto_dev->owner);
}
/*
* virtcrypto_dev_started() - Check whether device has started
* @vcrypto_dev: Pointer to virtio crypto device.
*
* To be used by virtio crypto device specific drivers.
*
* Return: 1 when the device has started, 0 otherwise
*/
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
{
return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
}
/*
* virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
* @node: Node id the driver works.
*
* Function returns the virtio crypto device used fewest on the node.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
struct virtio_crypto *virtcrypto_get_dev_node(int node)
{
struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
unsigned long best = ~0;
unsigned long ctr;
mutex_lock(&table_lock);
list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
dev_to_node(&tmp_dev->vdev->dev) < 0) &&
virtcrypto_dev_started(tmp_dev)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
vcrypto_dev = tmp_dev;
best = ctr;
}
}
}
if (!vcrypto_dev) {
pr_info("virtio_crypto: Could not find a device on node %d\n",
node);
/* Get any started device */
list_for_each_entry(tmp_dev,
virtcrypto_devmgr_get_head(), list) {
if (virtcrypto_dev_started(tmp_dev)) {
vcrypto_dev = tmp_dev;
break;
}
}
}
mutex_unlock(&table_lock);
if (!vcrypto_dev)
return NULL;
virtcrypto_dev_get(vcrypto_dev);
return vcrypto_dev;
}
/*
* virtcrypto_dev_start() - Start virtio crypto device
* @vcrypto: Pointer to virtio crypto device.
*
* Function notifies all the registered services that the virtio crypto device
* is ready to be used.
* To be used by virtio crypto device specific drivers.
*
* Return: 0 on success, EFAULT when fail to register algorithms
*/
int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
{
if (virtio_crypto_algs_register()) {
pr_err("virtio_crypto: Failed to register crypto algs\n");
return -EFAULT;
}
return 0;
}
/*
* virtcrypto_dev_stop() - Stop virtio crypto device
* @vcrypto: Pointer to virtio crypto device.
*
* Function notifies all the registered services that the virtio crypto device
* is ready to be used.
* To be used by virtio crypto device specific drivers.
*
* Return: void
*/
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{
virtio_crypto_algs_unregister();
}
...@@ -88,8 +88,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, ...@@ -88,8 +88,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
(vgdev, handle, 0, (vgdev, handle, 0,
cpu_to_le32(plane->state->src_w >> 16), cpu_to_le32(plane->state->src_w >> 16),
cpu_to_le32(plane->state->src_h >> 16), cpu_to_le32(plane->state->src_h >> 16),
plane->state->src_x >> 16, cpu_to_le32(plane->state->src_x >> 16),
plane->state->src_y >> 16, NULL); cpu_to_le32(plane->state->src_y >> 16), NULL);
} }
} else { } else {
handle = 0; handle = 0;
......
...@@ -109,8 +109,10 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) ...@@ -109,8 +109,10 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
spin_lock(&vgdev->free_vbufs_lock); spin_lock(&vgdev->free_vbufs_lock);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (WARN_ON(list_empty(&vgdev->free_vbufs))) if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
spin_unlock(&vgdev->free_vbufs_lock);
return; return;
}
vbuf = list_first_entry(&vgdev->free_vbufs, vbuf = list_first_entry(&vgdev->free_vbufs,
struct virtio_gpu_vbuffer, list); struct virtio_gpu_vbuffer, list);
list_del(&vbuf->list); list_del(&vbuf->list);
...@@ -295,6 +297,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) ...@@ -295,6 +297,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf) struct virtio_gpu_vbuffer *vbuf)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
{ {
struct virtqueue *vq = vgdev->ctrlq.vq; struct virtqueue *vq = vgdev->ctrlq.vq;
struct scatterlist *sgs[3], vcmd, vout, vresp; struct scatterlist *sgs[3], vcmd, vout, vresp;
......
...@@ -17,9 +17,9 @@ ...@@ -17,9 +17,9 @@
* discard bitset. * discard bitset.
*/ */
typedef dm_block_t __bitwise__ dm_oblock_t; typedef dm_block_t __bitwise dm_oblock_t;
typedef uint32_t __bitwise__ dm_cblock_t; typedef uint32_t __bitwise dm_cblock_t;
typedef dm_block_t __bitwise__ dm_dblock_t; typedef dm_block_t __bitwise dm_dblock_t;
static inline dm_oblock_t to_oblock(dm_block_t b) static inline dm_oblock_t to_oblock(dm_block_t b)
{ {
......
...@@ -31,5 +31,4 @@ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o ...@@ -31,5 +31,4 @@ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
obj-$(CONFIG_PCH_CAN) += pch_can.o obj-$(CONFIG_PCH_CAN) += pch_can.o
subdir-ccflags-y += -D__CHECK_ENDIAN__
subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
...@@ -5,4 +5,3 @@ ...@@ -5,4 +5,3 @@
obj-$(CONFIG_ALTERA_TSE) += altera_tse.o obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
altera_msgdma.o altera_sgdma.o altera_utils.o altera_msgdma.o altera_sgdma.o altera_utils.o
ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_ALX) += alx.o obj-$(CONFIG_ALX) += alx.o
alx-objs := main.o ethtool.o hw.o alx-objs := main.o ethtool.o hw.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
obj-$(CONFIG_FEC) += fec.o obj-$(CONFIG_FEC) += fec.o
fec-objs :=fec_main.o fec_ptp.o fec-objs :=fec_main.o fec_ptp.o
CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
......
...@@ -302,7 +302,7 @@ ...@@ -302,7 +302,7 @@
* Always write the address first before setting the ownership * Always write the address first before setting the ownership
* bits to avoid races with the hardware scanning the ring. * bits to avoid races with the hardware scanning the ring.
*/ */
typedef u32 __bitwise__ hme32; typedef u32 __bitwise hme32;
struct happy_meal_rxd { struct happy_meal_rxd {
hme32 rx_flags; hme32 rx_flags;
......
...@@ -19,6 +19,4 @@ ath-objs := main.o \ ...@@ -19,6 +19,4 @@ ath-objs := main.o \
ath-$(CONFIG_ATH_DEBUG) += debug.o ath-$(CONFIG_ATH_DEBUG) += debug.o
ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o
ccflags-y += -D__CHECK_ENDIAN__
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
...@@ -22,5 +22,3 @@ wil6210-y += p2p.o ...@@ -22,5 +22,3 @@ wil6210-y += p2p.o
# for tracing framework to find trace.h # for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
subdir-ccflags-y += -D__CHECK_ENDIAN__
...@@ -19,8 +19,6 @@ ccflags-y += \ ...@@ -19,8 +19,6 @@ ccflags-y += \
-Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \ -Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \
-Idrivers/net/wireless/broadcom/brcm80211/include -Idrivers/net/wireless/broadcom/brcm80211/include
ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \ brcmfmac-objs += \
cfg80211.o \ cfg80211.o \
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \ ccflags-y := \
-D__CHECK_ENDIAN__ \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \ -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \
-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \ -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \
-Idrivers/net/wireless/broadcom/brcm80211/include -Idrivers/net/wireless/broadcom/brcm80211/include
......
...@@ -13,5 +13,3 @@ iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o ...@@ -13,5 +13,3 @@ iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
obj-$(CONFIG_IWL3945) += iwl3945.o obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -15,7 +15,7 @@ iwlwifi-objs += $(iwlwifi-m) ...@@ -15,7 +15,7 @@ iwlwifi-objs += $(iwlwifi-m)
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src) ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/ obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/ obj-$(CONFIG_IWLMVM) += mvm/
......
...@@ -10,4 +10,4 @@ iwldvm-objs += rxon.o devices.o ...@@ -10,4 +10,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ ccflags-y += -I$(src)/../
...@@ -228,7 +228,7 @@ enum iwl_ucode_tlv_flag { ...@@ -228,7 +228,7 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
}; };
typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
/** /**
* enum iwl_ucode_tlv_api - ucode api * enum iwl_ucode_tlv_api - ucode api
...@@ -258,7 +258,7 @@ enum iwl_ucode_tlv_api { ...@@ -258,7 +258,7 @@ enum iwl_ucode_tlv_api {
#endif #endif
}; };
typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
/** /**
* enum iwl_ucode_tlv_capa - ucode capabilities * enum iwl_ucode_tlv_capa - ucode capabilities
......
...@@ -9,4 +9,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o ...@@ -9,4 +9,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-y += tof.o fw-dbg.o iwlmvm-y += tof.o fw-dbg.o
iwlmvm-$(CONFIG_PM) += d3.o iwlmvm-$(CONFIG_PM) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ ccflags-y += -I$(src)/../
...@@ -12,6 +12,3 @@ obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o ...@@ -12,6 +12,3 @@ obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
# Orinoco should be endian clean.
ccflags-y += -D__CHECK_ENDIAN__
ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_MT7601U) += mt7601u.o obj-$(CONFIG_MT7601U) += mt7601u.o
mt7601u-objs = \ mt7601u-objs = \
......
...@@ -30,5 +30,3 @@ obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/ ...@@ -30,5 +30,3 @@ obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/
obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/ obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/
obj-$(CONFIG_RTL8821AE) += rtl8821ae/ obj-$(CONFIG_RTL8821AE) += rtl8821ae/
obj-$(CONFIG_RTL8192EE) += rtl8192ee/ obj-$(CONFIG_RTL8192EE) += rtl8192ee/
ccflags-y += -D__CHECK_ENDIAN__
...@@ -3,5 +3,3 @@ btcoexist-objs := halbtc8723b2ant.o \ ...@@ -3,5 +3,3 @@ btcoexist-objs := halbtc8723b2ant.o \
rtl_btc.o rtl_btc.o
obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -11,5 +11,3 @@ rtl8188ee-objs := \ ...@@ -11,5 +11,3 @@ rtl8188ee-objs := \
trx.o trx.o
obj-$(CONFIG_RTL8188EE) += rtl8188ee.o obj-$(CONFIG_RTL8188EE) += rtl8188ee.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -5,5 +5,3 @@ rtl8192c-common-objs := \ ...@@ -5,5 +5,3 @@ rtl8192c-common-objs := \
phy_common.o phy_common.o
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -9,5 +9,3 @@ rtl8192ce-objs := \ ...@@ -9,5 +9,3 @@ rtl8192ce-objs := \
trx.o trx.o
obj-$(CONFIG_RTL8192CE) += rtl8192ce.o obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -10,5 +10,3 @@ rtl8192cu-objs := \ ...@@ -10,5 +10,3 @@ rtl8192cu-objs := \
trx.o trx.o
obj-$(CONFIG_RTL8192CU) += rtl8192cu.o obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -10,5 +10,3 @@ rtl8192de-objs := \ ...@@ -10,5 +10,3 @@ rtl8192de-objs := \
trx.o trx.o
obj-$(CONFIG_RTL8192DE) += rtl8192de.o obj-$(CONFIG_RTL8192DE) += rtl8192de.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -12,5 +12,3 @@ rtl8192ee-objs := \ ...@@ -12,5 +12,3 @@ rtl8192ee-objs := \
obj-$(CONFIG_RTL8192EE) += rtl8192ee.o obj-$(CONFIG_RTL8192EE) += rtl8192ee.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -11,5 +11,3 @@ rtl8192se-objs := \ ...@@ -11,5 +11,3 @@ rtl8192se-objs := \
obj-$(CONFIG_RTL8192SE) += rtl8192se.o obj-$(CONFIG_RTL8192SE) += rtl8192se.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -14,5 +14,3 @@ rtl8723ae-objs := \ ...@@ -14,5 +14,3 @@ rtl8723ae-objs := \
obj-$(CONFIG_RTL8723AE) += rtl8723ae.o obj-$(CONFIG_RTL8723AE) += rtl8723ae.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -12,5 +12,3 @@ rtl8723be-objs := \ ...@@ -12,5 +12,3 @@ rtl8723be-objs := \
obj-$(CONFIG_RTL8723BE) += rtl8723be.o obj-$(CONFIG_RTL8723BE) += rtl8723be.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -5,5 +5,3 @@ rtl8723-common-objs := \ ...@@ -5,5 +5,3 @@ rtl8723-common-objs := \
phy_common.o phy_common.o
obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -12,5 +12,3 @@ rtl8821ae-objs := \ ...@@ -12,5 +12,3 @@ rtl8821ae-objs := \
obj-$(CONFIG_RTL8821AE) += rtl8821ae.o obj-$(CONFIG_RTL8821AE) += rtl8821ae.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -6,5 +6,3 @@ wl1251_sdio-objs += sdio.o ...@@ -6,5 +6,3 @@ wl1251_sdio-objs += sdio.o
obj-$(CONFIG_WL1251) += wl1251.o obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -8,5 +8,3 @@ wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o ...@@ -8,5 +8,3 @@ wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WLCORE) += wlcore.o obj-$(CONFIG_WLCORE) += wlcore.o
obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o
obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -53,4 +53,4 @@ r8188eu-y := \ ...@@ -53,4 +53,4 @@ r8188eu-y := \
obj-$(CONFIG_R8188EU) := r8188eu.o obj-$(CONFIG_R8188EU) := r8188eu.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include ccflags-y += -I$(srctree)/$(src)/include
...@@ -17,5 +17,3 @@ obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o ...@@ -17,5 +17,3 @@ obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o
obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o
obj-$(CONFIG_RTL8192E) += rtl8192e/ obj-$(CONFIG_RTL8192E) += rtl8192e/
ccflags-y += -D__CHECK_ENDIAN__
...@@ -16,5 +16,3 @@ r8192e_pci-objs := \ ...@@ -16,5 +16,3 @@ r8192e_pci-objs := \
rtl_wx.o \ rtl_wx.o \
obj-$(CONFIG_RTL8192E) += r8192e_pci.o obj-$(CONFIG_RTL8192E) += r8192e_pci.o
ccflags-y += -D__CHECK_ENDIAN__
...@@ -1749,7 +1749,6 @@ static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg, ...@@ -1749,7 +1749,6 @@ static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name) const char *name)
{ {
struct se_portal_group *se_tpg;
struct vhost_scsi_nexus *tv_nexus; struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
...@@ -1758,7 +1757,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, ...@@ -1758,7 +1757,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
pr_debug("tpg->tpg_nexus already exists\n"); pr_debug("tpg->tpg_nexus already exists\n");
return -EEXIST; return -EEXIST;
} }
se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
if (!tv_nexus) { if (!tv_nexus) {
......
...@@ -49,7 +49,7 @@ enum { ...@@ -49,7 +49,7 @@ enum {
INTERVAL_TREE_DEFINE(struct vhost_umem_node, INTERVAL_TREE_DEFINE(struct vhost_umem_node,
rb, __u64, __subtree_last, rb, __u64, __subtree_last,
START, LAST, , vhost_umem_interval_tree); START, LAST, static inline, vhost_umem_interval_tree);
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
...@@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, ...@@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL; vq->avail = NULL;
vq->used = NULL; vq->used = NULL;
vq->last_avail_idx = 0; vq->last_avail_idx = 0;
vq->last_used_event = 0;
vq->avail_idx = 0; vq->avail_idx = 0;
vq->last_used_idx = 0; vq->last_used_idx = 0;
vq->signalled_used = 0; vq->signalled_used = 0;
...@@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, ...@@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access); struct iovec iov[], int iov_size, int access);
static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to, static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
const void *from, unsigned size) const void *from, unsigned size)
{ {
int ret; int ret;
...@@ -749,7 +750,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to, ...@@ -749,7 +750,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
} }
static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
void *from, unsigned size) void __user *from, unsigned size)
{ {
int ret; int ret;
...@@ -783,7 +784,7 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, ...@@ -783,7 +784,7 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
} }
static void __user *__vhost_get_user(struct vhost_virtqueue *vq, static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
void *addr, unsigned size) void __user *addr, unsigned size)
{ {
int ret; int ret;
...@@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access) ...@@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access)
return 0; return 0;
} }
int vhost_process_iotlb_msg(struct vhost_dev *dev, static int vhost_process_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *msg) struct vhost_iotlb_msg *msg)
{ {
int ret = 0; int ret = 0;
...@@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EINVAL; r = -EINVAL;
break; break;
} }
vq->last_avail_idx = s.num; vq->last_avail_idx = vq->last_used_event = s.num;
/* Forget the cached index value. */ /* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx; vq->avail_idx = vq->last_avail_idx;
break; break;
...@@ -2159,10 +2160,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -2159,10 +2160,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__u16 old, new; __u16 old, new;
__virtio16 event; __virtio16 event;
bool v; bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx)) unlikely(vq->avail_idx == vq->last_avail_idx))
...@@ -2170,6 +2167,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -2170,6 +2167,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags; __virtio16 flags;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_get_user(vq, flags, &vq->avail->flags)) { if (vhost_get_user(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags"); vq_err(vq, "Failed to get flags");
return true; return true;
...@@ -2184,11 +2185,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -2184,11 +2185,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v)) if (unlikely(!v))
return true; return true;
/* We're sure if the following conditions are met, there's no
* need to notify guest:
* 1) cached used event is ahead of new
* 2) old to new updating does not cross cached used event. */
if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
!vring_need_event(vq->last_used_event, new, old))
return false;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_get_user(vq, event, vhost_used_event(vq))) { if (vhost_get_user(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx"); vq_err(vq, "Failed to get used event idx");
return true; return true;
} }
return vring_need_event(vhost16_to_cpu(vq, event), new, old); vq->last_used_event = vhost16_to_cpu(vq, event);
return vring_need_event(vq->last_used_event, new, old);
} }
/* This actually signals the guest, using eventfd. */ /* This actually signals the guest, using eventfd. */
......
...@@ -107,6 +107,9 @@ struct vhost_virtqueue { ...@@ -107,6 +107,9 @@ struct vhost_virtqueue {
/* Last index we used. */ /* Last index we used. */
u16 last_used_idx; u16 last_used_idx;
/* Last used evet we've seen */
u16 last_used_event;
/* Used flags */ /* Used flags */
u16 used_flags; u16 used_flags;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* *
* Since these may be in userspace, we use (inline) accessors. * Since these may be in userspace, we use (inline) accessors.
*/ */
#include <linux/compiler.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/vringh.h> #include <linux/vringh.h>
#include <linux/virtio_ring.h> #include <linux/virtio_ring.h>
...@@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user); ...@@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user);
static inline int getu16_kern(const struct vringh *vrh, static inline int getu16_kern(const struct vringh *vrh,
u16 *val, const __virtio16 *p) u16 *val, const __virtio16 *p)
{ {
*val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p)); *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
return 0; return 0;
} }
static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
{ {
ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val); WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
return 0; return 0;
} }
......
...@@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void) ...@@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID; return VHOST_VSOCK_DEFAULT_HOST_CID;
} }
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
{ {
struct vhost_vsock *vsock; struct vhost_vsock *vsock;
spin_lock_bh(&vhost_vsock_lock);
list_for_each_entry(vsock, &vhost_vsock_list, list) { list_for_each_entry(vsock, &vhost_vsock_list, list) {
u32 other_cid = vsock->guest_cid; u32 other_cid = vsock->guest_cid;
...@@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) ...@@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
continue; continue;
if (other_cid == guest_cid) { if (other_cid == guest_cid) {
spin_unlock_bh(&vhost_vsock_lock);
return vsock; return vsock;
} }
} }
spin_unlock_bh(&vhost_vsock_lock);
return NULL; return NULL;
} }
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
spin_lock_bh(&vhost_vsock_lock);
vsock = __vhost_vsock_get(guest_cid);
spin_unlock_bh(&vhost_vsock_lock);
return vsock;
}
static void static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock, vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq) struct vhost_virtqueue *vq)
...@@ -559,11 +567,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) ...@@ -559,11 +567,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
return -EINVAL; return -EINVAL;
/* Refuse if CID is already in use */ /* Refuse if CID is already in use */
other = vhost_vsock_get(guest_cid);
if (other && other != vsock)
return -EADDRINUSE;
spin_lock_bh(&vhost_vsock_lock); spin_lock_bh(&vhost_vsock_lock);
other = __vhost_vsock_get(guest_cid);
if (other && other != vsock) {
spin_unlock_bh(&vhost_vsock_lock);
return -EADDRINUSE;
}
vsock->guest_cid = guest_cid; vsock->guest_cid = guest_cid;
spin_unlock_bh(&vhost_vsock_lock); spin_unlock_bh(&vhost_vsock_lock);
......
...@@ -489,6 +489,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = { ...@@ -489,6 +489,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
}; };
static void virtio_mmio_release_dev_empty(struct device *_d) {}
/* Platform device */ /* Platform device */
...@@ -511,6 +512,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) ...@@ -511,6 +512,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
vm_dev->vdev.dev.parent = &pdev->dev; vm_dev->vdev.dev.parent = &pdev->dev;
vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty;
vm_dev->vdev.config = &virtio_mmio_config_ops; vm_dev->vdev.config = &virtio_mmio_config_ops;
vm_dev->pdev = pdev; vm_dev->pdev = pdev;
INIT_LIST_HEAD(&vm_dev->virtqueues); INIT_LIST_HEAD(&vm_dev->virtqueues);
......
...@@ -37,7 +37,7 @@ void vp_synchronize_vectors(struct virtio_device *vdev) ...@@ -37,7 +37,7 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
synchronize_irq(vp_dev->pci_dev->irq); synchronize_irq(vp_dev->pci_dev->irq);
for (i = 0; i < vp_dev->msix_vectors; ++i) for (i = 0; i < vp_dev->msix_vectors; ++i)
synchronize_irq(vp_dev->msix_entries[i].vector); synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
} }
/* the notify function used when creating a virt queue */ /* the notify function used when creating a virt queue */
...@@ -102,41 +102,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) ...@@ -102,41 +102,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
return vp_vring_interrupt(irq, opaque); return vp_vring_interrupt(irq, opaque);
} }
static void vp_free_vectors(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
if (vp_dev->intx_enabled) {
free_irq(vp_dev->pci_dev->irq, vp_dev);
vp_dev->intx_enabled = 0;
}
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(vp_dev->msix_entries[i].vector, vp_dev);
for (i = 0; i < vp_dev->msix_vectors; i++)
if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
pci_disable_msix(vp_dev->pci_dev);
vp_dev->msix_enabled = 0;
}
vp_dev->msix_vectors = 0;
vp_dev->msix_used_vectors = 0;
kfree(vp_dev->msix_names);
vp_dev->msix_names = NULL;
kfree(vp_dev->msix_entries);
vp_dev->msix_entries = NULL;
kfree(vp_dev->msix_affinity_masks);
vp_dev->msix_affinity_masks = NULL;
}
static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
bool per_vq_vectors) bool per_vq_vectors)
{ {
...@@ -147,10 +112,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -147,10 +112,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
vp_dev->msix_vectors = nvectors; vp_dev->msix_vectors = nvectors;
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
GFP_KERNEL);
if (!vp_dev->msix_entries)
goto error;
vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
GFP_KERNEL); GFP_KERNEL);
if (!vp_dev->msix_names) if (!vp_dev->msix_names)
...@@ -165,12 +126,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -165,12 +126,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL)) GFP_KERNEL))
goto error; goto error;
for (i = 0; i < nvectors; ++i) err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors,
vp_dev->msix_entries[i].entry = i; PCI_IRQ_MSIX);
if (err < 0)
err = pci_enable_msix_exact(vp_dev->pci_dev,
vp_dev->msix_entries, nvectors);
if (err)
goto error; goto error;
vp_dev->msix_enabled = 1; vp_dev->msix_enabled = 1;
...@@ -178,7 +136,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -178,7 +136,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
v = vp_dev->msix_used_vectors; v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name); "%s-config", name);
err = request_irq(vp_dev->msix_entries[v].vector, err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_config_changed, 0, vp_dev->msix_names[v], vp_config_changed, 0, vp_dev->msix_names[v],
vp_dev); vp_dev);
if (err) if (err)
...@@ -197,7 +155,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -197,7 +155,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
v = vp_dev->msix_used_vectors; v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name); "%s-virtqueues", name);
err = request_irq(vp_dev->msix_entries[v].vector, err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_dev); vp_dev);
if (err) if (err)
...@@ -206,19 +164,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -206,19 +164,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
} }
return 0; return 0;
error: error:
vp_free_vectors(vdev);
return err;
}
static int vp_request_intx(struct virtio_device *vdev)
{
int err;
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
if (!err)
vp_dev->intx_enabled = 1;
return err; return err;
} }
...@@ -276,67 +221,88 @@ void vp_del_vqs(struct virtio_device *vdev) ...@@ -276,67 +221,88 @@ void vp_del_vqs(struct virtio_device *vdev)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq, *n; struct virtqueue *vq, *n;
struct virtio_pci_vq_info *info; int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) { list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
info = vp_dev->vqs[vq->index]; if (vp_dev->per_vq_vectors) {
if (vp_dev->per_vq_vectors && int v = vp_dev->vqs[vq->index]->msix_vector;
info->msix_vector != VIRTIO_MSI_NO_VECTOR)
free_irq(vp_dev->msix_entries[info->msix_vector].vector, if (v != VIRTIO_MSI_NO_VECTOR)
vq); free_irq(pci_irq_vector(vp_dev->pci_dev, v),
vq);
}
vp_del_vq(vq); vp_del_vq(vq);
} }
vp_dev->per_vq_vectors = false; vp_dev->per_vq_vectors = false;
vp_free_vectors(vdev); if (vp_dev->intx_enabled) {
free_irq(vp_dev->pci_dev->irq, vp_dev);
vp_dev->intx_enabled = 0;
}
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
for (i = 0; i < vp_dev->msix_vectors; i++)
if (vp_dev->msix_affinity_masks[i])
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
pci_free_irq_vectors(vp_dev->pci_dev);
vp_dev->msix_enabled = 0;
}
vp_dev->msix_vectors = 0;
vp_dev->msix_used_vectors = 0;
kfree(vp_dev->msix_names);
vp_dev->msix_names = NULL;
kfree(vp_dev->msix_affinity_masks);
vp_dev->msix_affinity_masks = NULL;
kfree(vp_dev->vqs); kfree(vp_dev->vqs);
vp_dev->vqs = NULL; vp_dev->vqs = NULL;
} }
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[], struct virtqueue *vqs[],
vq_callback_t *callbacks[], vq_callback_t *callbacks[],
const char * const names[], const char * const names[],
bool use_msix,
bool per_vq_vectors) bool per_vq_vectors)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u16 msix_vec; u16 msix_vec;
int i, err, nvectors, allocated_vectors; int i, err, nvectors, allocated_vectors;
vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL); vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs) if (!vp_dev->vqs)
return -ENOMEM; return -ENOMEM;
if (!use_msix) { if (per_vq_vectors) {
/* Old style: one normal interrupt for change and all vqs. */ /* Best option: one for change interrupt, one per vq. */
err = vp_request_intx(vdev); nvectors = 1;
if (err) for (i = 0; i < nvqs; ++i)
goto error_find; if (callbacks[i])
++nvectors;
} else { } else {
if (per_vq_vectors) { /* Second best: one for change, shared for all vqs. */
/* Best option: one for change interrupt, one per vq. */ nvectors = 2;
nvectors = 1;
for (i = 0; i < nvqs; ++i)
if (callbacks[i])
++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */
nvectors = 2;
}
err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
if (err)
goto error_find;
} }
err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
if (err)
goto error_find;
vp_dev->per_vq_vectors = per_vq_vectors; vp_dev->per_vq_vectors = per_vq_vectors;
allocated_vectors = vp_dev->msix_used_vectors; allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) { for (i = 0; i < nvqs; ++i) {
if (!names[i]) { if (!names[i]) {
vqs[i] = NULL; vqs[i] = NULL;
continue; continue;
} else if (!callbacks[i] || !vp_dev->msix_enabled) }
if (!callbacks[i])
msix_vec = VIRTIO_MSI_NO_VECTOR; msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vp_dev->per_vq_vectors) else if (vp_dev->per_vq_vectors)
msix_vec = allocated_vectors++; msix_vec = allocated_vectors++;
...@@ -356,14 +322,12 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, ...@@ -356,14 +322,12 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
sizeof *vp_dev->msix_names, sizeof *vp_dev->msix_names,
"%s-%s", "%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]); dev_name(&vp_dev->vdev.dev), names[i]);
err = request_irq(vp_dev->msix_entries[msix_vec].vector, err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, 0, vring_interrupt, 0,
vp_dev->msix_names[msix_vec], vp_dev->msix_names[msix_vec],
vqs[i]); vqs[i]);
if (err) { if (err)
vp_del_vq(vqs[i]);
goto error_find; goto error_find;
}
} }
return 0; return 0;
...@@ -372,6 +336,43 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, ...@@ -372,6 +336,43 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return err; return err;
} }
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char * const names[])
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i, err;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev);
if (err)
goto out_del_vqs;
vp_dev->intx_enabled = 1;
vp_dev->per_vq_vectors = false;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
continue;
}
vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto out_del_vqs;
}
}
return 0;
out_del_vqs:
vp_del_vqs(vdev);
return err;
}
/* the config->find_vqs() implementation */ /* the config->find_vqs() implementation */
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[], struct virtqueue *vqs[],
...@@ -381,17 +382,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, ...@@ -381,17 +382,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int err; int err;
/* Try MSI-X with one vector per queue. */ /* Try MSI-X with one vector per queue. */
err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true);
if (!err) if (!err)
return 0; return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */ /* Fallback: MSI-X with one vector for config, one shared for queues. */
err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false);
true, false);
if (!err) if (!err)
return 0; return 0;
/* Finally fall back to regular interrupts. */ /* Finally fall back to regular interrupts. */
return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
false, false);
} }
const char *vp_bus_name(struct virtio_device *vdev) const char *vp_bus_name(struct virtio_device *vdev)
...@@ -419,7 +418,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) ...@@ -419,7 +418,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (vp_dev->msix_enabled) { if (vp_dev->msix_enabled) {
mask = vp_dev->msix_affinity_masks[info->msix_vector]; mask = vp_dev->msix_affinity_masks[info->msix_vector];
irq = vp_dev->msix_entries[info->msix_vector].vector; irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
if (cpu == -1) if (cpu == -1)
irq_set_affinity_hint(irq, NULL); irq_set_affinity_hint(irq, NULL);
else { else {
......
...@@ -85,7 +85,6 @@ struct virtio_pci_device { ...@@ -85,7 +85,6 @@ struct virtio_pci_device {
/* MSI-X support */ /* MSI-X support */
int msix_enabled; int msix_enabled;
int intx_enabled; int intx_enabled;
struct msix_entry *msix_entries;
cpumask_var_t *msix_affinity_masks; cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough, /* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */ * and I'm too lazy to allocate each name separately. */
......
...@@ -33,12 +33,12 @@ static inline u8 vp_ioread8(u8 __iomem *addr) ...@@ -33,12 +33,12 @@ static inline u8 vp_ioread8(u8 __iomem *addr)
{ {
return ioread8(addr); return ioread8(addr);
} }
static inline u16 vp_ioread16 (u16 __iomem *addr) static inline u16 vp_ioread16 (__le16 __iomem *addr)
{ {
return ioread16(addr); return ioread16(addr);
} }
static inline u32 vp_ioread32(u32 __iomem *addr) static inline u32 vp_ioread32(__le32 __iomem *addr)
{ {
return ioread32(addr); return ioread32(addr);
} }
...@@ -48,12 +48,12 @@ static inline void vp_iowrite8(u8 value, u8 __iomem *addr) ...@@ -48,12 +48,12 @@ static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
iowrite8(value, addr); iowrite8(value, addr);
} }
static inline void vp_iowrite16(u16 value, u16 __iomem *addr) static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
{ {
iowrite16(value, addr); iowrite16(value, addr);
} }
static inline void vp_iowrite32(u32 value, u32 __iomem *addr) static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
{ {
iowrite32(value, addr); iowrite32(value, addr);
} }
......
...@@ -420,7 +420,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, ...@@ -420,7 +420,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
if (i == err_idx) if (i == err_idx)
break; break;
vring_unmap_one(vq, &desc[i]); vring_unmap_one(vq, &desc[i]);
i = vq->vring.desc[i].next; i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
} }
vq->vq.num_free += total_sg; vq->vq.num_free += total_sg;
...@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick); ...@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head) static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{ {
unsigned int i, j; unsigned int i, j;
u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */ /* Clear data ptr. */
vq->desc_state[head].data = NULL; vq->desc_state[head].data = NULL;
...@@ -649,7 +649,7 @@ static inline bool more_used(const struct vring_virtqueue *vq) ...@@ -649,7 +649,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
* @vq: the struct virtqueue we're talking about. * @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer * @len: the length written into the buffer
* *
* If the driver wrote data into the buffer, @len will be set to the * If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer * amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short * beforehand to ensure there's no data leakage in the case of short
* writes. * writes.
......
...@@ -10,9 +10,7 @@ ...@@ -10,9 +10,7 @@
#ifndef FS_LOGFS_LOGFS_H #ifndef FS_LOGFS_LOGFS_H
#define FS_LOGFS_LOGFS_H #define FS_LOGFS_LOGFS_H
#undef __CHECK_ENDIAN__ #include <linux/types.h>
#define __CHECK_ENDIAN__
#include <linux/btree.h> #include <linux/btree.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/fs.h> #include <linux/fs.h>
......
...@@ -246,7 +246,7 @@ struct lruvec { ...@@ -246,7 +246,7 @@ struct lruvec {
#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
/* LRU Isolation modes. */ /* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t; typedef unsigned __bitwise isolate_mode_t;
enum zone_watermarks { enum zone_watermarks {
WMARK_MIN, WMARK_MIN,
......
...@@ -111,8 +111,8 @@ struct uart_icount { ...@@ -111,8 +111,8 @@ struct uart_icount {
__u32 buf_overrun; __u32 buf_overrun;
}; };
typedef unsigned int __bitwise__ upf_t; typedef unsigned int __bitwise upf_t;
typedef unsigned int __bitwise__ upstat_t; typedef unsigned int __bitwise upstat_t;
struct uart_port { struct uart_port {
spinlock_t lock; /* port lock */ spinlock_t lock; /* port lock */
......
...@@ -154,8 +154,8 @@ typedef u64 dma_addr_t; ...@@ -154,8 +154,8 @@ typedef u64 dma_addr_t;
typedef u32 dma_addr_t; typedef u32 dma_addr_t;
#endif #endif
typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise gfp_t;
typedef unsigned __bitwise__ fmode_t; typedef unsigned __bitwise fmode_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT #ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t; typedef u64 phys_addr_t;
......
...@@ -74,7 +74,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2) ...@@ -74,7 +74,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2)
#define zero_data(p) {p[0]=0;p[1]=0;p[2]=0;} #define zero_data(p) {p[0]=0;p[1]=0;p[2]=0;}
/* initiator tags; opaque for target */ /* initiator tags; opaque for target */
typedef uint32_t __bitwise__ itt_t; typedef uint32_t __bitwise itt_t;
/* below makes sense only for initiator that created this tag */ /* below makes sense only for initiator that created this tag */
#define build_itt(itt, age) ((__force itt_t)\ #define build_itt(itt, age) ((__force itt_t)\
((itt) | ((age) << ISCSI_AGE_SHIFT))) ((itt) | ((age) << ISCSI_AGE_SHIFT)))
......
...@@ -149,7 +149,7 @@ enum se_cmd_flags_table { ...@@ -149,7 +149,7 @@ enum se_cmd_flags_table {
* Used by transport_send_check_condition_and_sense() * Used by transport_send_check_condition_and_sense()
* to signal which ASC/ASCQ sense payload should be built. * to signal which ASC/ASCQ sense payload should be built.
*/ */
typedef unsigned __bitwise__ sense_reason_t; typedef unsigned __bitwise sense_reason_t;
enum tcm_sense_reason_table { enum tcm_sense_reason_table {
#define R(x) (__force sense_reason_t )(x) #define R(x) (__force sense_reason_t )(x)
......
...@@ -462,6 +462,7 @@ header-y += virtio_rng.h ...@@ -462,6 +462,7 @@ header-y += virtio_rng.h
header-y += virtio_scsi.h header-y += virtio_scsi.h
header-y += virtio_types.h header-y += virtio_types.h
header-y += virtio_vsock.h header-y += virtio_vsock.h
header-y += virtio_crypto.h
header-y += vm_sockets.h header-y += vm_sockets.h
header-y += vt.h header-y += vt.h
header-y += vtpm_proxy.h header-y += vtpm_proxy.h
......
...@@ -23,11 +23,7 @@ ...@@ -23,11 +23,7 @@
#else #else
#define __bitwise__ #define __bitwise__
#endif #endif
#ifdef __CHECK_ENDIAN__
#define __bitwise __bitwise__ #define __bitwise __bitwise__
#else
#define __bitwise
#endif
typedef __u16 __bitwise __le16; typedef __u16 __bitwise __le16;
typedef __u16 __bitwise __be16; typedef __u16 __bitwise __be16;
......
...@@ -172,8 +172,6 @@ struct vhost_memory { ...@@ -172,8 +172,6 @@ struct vhost_memory {
#define VHOST_F_LOG_ALL 26 #define VHOST_F_LOG_ALL 26
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ /* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27 #define VHOST_NET_F_VIRTIO_NET_HDR 27
/* Vhost have device IOTLB */
#define VHOST_F_DEVICE_IOTLB 63
/* VHOST_SCSI specific definitions */ /* VHOST_SCSI specific definitions */
......
#ifndef _VIRTIO_CRYPTO_H
#define _VIRTIO_CRYPTO_H
/* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/types.h>
#include <linux/virtio_types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
#define VIRTIO_CRYPTO_SERVICE_HASH 1
#define VIRTIO_CRYPTO_SERVICE_MAC 2
#define VIRTIO_CRYPTO_SERVICE_AEAD 3
#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
struct virtio_crypto_ctrl_header {
#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
#define VIRTIO_CRYPTO_HASH_CREATE_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
#define VIRTIO_CRYPTO_MAC_CREATE_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
__le32 opcode;
__le32 algo;
__le32 flag;
/* data virtqueue id */
__le32 queue_id;
};
struct virtio_crypto_cipher_session_para {
#define VIRTIO_CRYPTO_NO_CIPHER 0
#define VIRTIO_CRYPTO_CIPHER_ARC4 1
#define VIRTIO_CRYPTO_CIPHER_AES_ECB 2
#define VIRTIO_CRYPTO_CIPHER_AES_CBC 3
#define VIRTIO_CRYPTO_CIPHER_AES_CTR 4
#define VIRTIO_CRYPTO_CIPHER_DES_ECB 5
#define VIRTIO_CRYPTO_CIPHER_DES_CBC 6
#define VIRTIO_CRYPTO_CIPHER_3DES_ECB 7
#define VIRTIO_CRYPTO_CIPHER_3DES_CBC 8
#define VIRTIO_CRYPTO_CIPHER_3DES_CTR 9
#define VIRTIO_CRYPTO_CIPHER_KASUMI_F8 10
#define VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2 11
#define VIRTIO_CRYPTO_CIPHER_AES_F8 12
#define VIRTIO_CRYPTO_CIPHER_AES_XTS 13
#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14
__le32 algo;
/* length of key */
__le32 keylen;
#define VIRTIO_CRYPTO_OP_ENCRYPT 1
#define VIRTIO_CRYPTO_OP_DECRYPT 2
/* encrypt or decrypt */
__le32 op;
__le32 padding;
};
struct virtio_crypto_session_input {
/* Device-writable part */
__le64 session_id;
__le32 status;
__le32 padding;
};
struct virtio_crypto_cipher_session_req {
struct virtio_crypto_cipher_session_para para;
__u8 padding[32];
};
struct virtio_crypto_hash_session_para {
#define VIRTIO_CRYPTO_NO_HASH 0
#define VIRTIO_CRYPTO_HASH_MD5 1
#define VIRTIO_CRYPTO_HASH_SHA1 2
#define VIRTIO_CRYPTO_HASH_SHA_224 3
#define VIRTIO_CRYPTO_HASH_SHA_256 4
#define VIRTIO_CRYPTO_HASH_SHA_384 5
#define VIRTIO_CRYPTO_HASH_SHA_512 6
#define VIRTIO_CRYPTO_HASH_SHA3_224 7
#define VIRTIO_CRYPTO_HASH_SHA3_256 8
#define VIRTIO_CRYPTO_HASH_SHA3_384 9
#define VIRTIO_CRYPTO_HASH_SHA3_512 10
#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11
#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12
__le32 algo;
/* hash result length */
__le32 hash_result_len;
__u8 padding[8];
};
struct virtio_crypto_hash_create_session_req {
struct virtio_crypto_hash_session_para para;
__u8 padding[40];
};
struct virtio_crypto_mac_session_para {
#define VIRTIO_CRYPTO_NO_MAC 0
#define VIRTIO_CRYPTO_MAC_HMAC_MD5 1
#define VIRTIO_CRYPTO_MAC_HMAC_SHA1 2
#define VIRTIO_CRYPTO_MAC_HMAC_SHA_224 3
#define VIRTIO_CRYPTO_MAC_HMAC_SHA_256 4
#define VIRTIO_CRYPTO_MAC_HMAC_SHA_384 5
#define VIRTIO_CRYPTO_MAC_HMAC_SHA_512 6
#define VIRTIO_CRYPTO_MAC_CMAC_3DES 25
#define VIRTIO_CRYPTO_MAC_CMAC_AES 26
#define VIRTIO_CRYPTO_MAC_KASUMI_F9 27
#define VIRTIO_CRYPTO_MAC_SNOW3G_UIA2 28
#define VIRTIO_CRYPTO_MAC_GMAC_AES 41
#define VIRTIO_CRYPTO_MAC_GMAC_TWOFISH 42
#define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49
#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50
#define VIRTIO_CRYPTO_MAC_XCBC_AES 53
__le32 algo;
/* hash result length */
__le32 hash_result_len;
/* length of authenticated key */
__le32 auth_key_len;
__le32 padding;
};
struct virtio_crypto_mac_create_session_req {
struct virtio_crypto_mac_session_para para;
__u8 padding[40];
};
struct virtio_crypto_aead_session_para {
#define VIRTIO_CRYPTO_NO_AEAD 0
#define VIRTIO_CRYPTO_AEAD_GCM 1
#define VIRTIO_CRYPTO_AEAD_CCM 2
#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3
__le32 algo;
/* length of key */
__le32 key_len;
/* hash result length */
__le32 hash_result_len;
/* length of the additional authenticated data (AAD) in bytes */
__le32 aad_len;
/* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
__le32 op;
__le32 padding;
};
struct virtio_crypto_aead_create_session_req {
struct virtio_crypto_aead_session_para para;
__u8 padding[32];
};
struct virtio_crypto_alg_chain_session_para {
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
__le32 alg_chain_order;
/* Plain hash */
#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1
/* Authenticated hash (mac) */
#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2
/* Nested hash */
#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3
__le32 hash_mode;
struct virtio_crypto_cipher_session_para cipher_param;
union {
struct virtio_crypto_hash_session_para hash_param;
struct virtio_crypto_mac_session_para mac_param;
__u8 padding[16];
} u;
/* length of the additional authenticated data (AAD) in bytes */
__le32 aad_len;
__le32 padding;
};
struct virtio_crypto_alg_chain_session_req {
struct virtio_crypto_alg_chain_session_para para;
};
struct virtio_crypto_sym_create_session_req {
union {
struct virtio_crypto_cipher_session_req cipher;
struct virtio_crypto_alg_chain_session_req chain;
__u8 padding[48];
} u;
/* Device-readable part */
/* No operation */
#define VIRTIO_CRYPTO_SYM_OP_NONE 0
/* Cipher only operation on the data */
#define VIRTIO_CRYPTO_SYM_OP_CIPHER 1
/*
* Chain any cipher with any hash or mac operation. The order
* depends on the value of alg_chain_order param
*/
#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2
__le32 op_type;
__le32 padding;
};
struct virtio_crypto_destroy_session_req {
/* Device-readable part */
__le64 session_id;
__u8 padding[48];
};
/* The request of the control virtqueue's packet */
struct virtio_crypto_op_ctrl_req {
struct virtio_crypto_ctrl_header header;
union {
struct virtio_crypto_sym_create_session_req
sym_create_session;
struct virtio_crypto_hash_create_session_req
hash_create_session;
struct virtio_crypto_mac_create_session_req
mac_create_session;
struct virtio_crypto_aead_create_session_req
aead_create_session;
struct virtio_crypto_destroy_session_req
destroy_session;
__u8 padding[56];
} u;
};
struct virtio_crypto_op_header {
#define VIRTIO_CRYPTO_CIPHER_ENCRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
#define VIRTIO_CRYPTO_CIPHER_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
#define VIRTIO_CRYPTO_HASH \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
#define VIRTIO_CRYPTO_MAC \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
#define VIRTIO_CRYPTO_AEAD_ENCRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
#define VIRTIO_CRYPTO_AEAD_DECRYPT \
VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
__le32 opcode;
/* algo should be service-specific algorithms */
__le32 algo;
/* session_id should be service-specific algorithms */
__le64 session_id;
/* control flag to control the request */
__le32 flag;
__le32 padding;
};
struct virtio_crypto_cipher_para {
/*
* Byte Length of valid IV/Counter
*
* For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
* SNOW3G in UEA2 mode, this is the length of the IV (which
* must be the same as the block length of the cipher).
* For block ciphers in CTR mode, this is the length of the counter
* (which must be the same as the block length of the cipher).
* For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
*
* The IV/Counter will be updated after every partial cryptographic
* operation.
*/
__le32 iv_len;
/* length of source data */
__le32 src_data_len;
/* length of dst data */
__le32 dst_data_len;
__le32 padding;
};
struct virtio_crypto_hash_para {
/* length of source data */
__le32 src_data_len;
/* hash result length */
__le32 hash_result_len;
};
struct virtio_crypto_mac_para {
struct virtio_crypto_hash_para hash;
};
struct virtio_crypto_aead_para {
/*
* Byte Length of valid IV data pointed to by the below iv_addr
* parameter.
*
* For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
* case iv_addr points to J0.
* For CCM mode, this is the length of the nonce, which can be in the
* range 7 to 13 inclusive.
*/
__le32 iv_len;
/* length of additional auth data */
__le32 aad_len;
/* length of source data */
__le32 src_data_len;
/* length of dst data */
__le32 dst_data_len;
};
struct virtio_crypto_cipher_data_req {
/* Device-readable part */
struct virtio_crypto_cipher_para para;
__u8 padding[24];
};
struct virtio_crypto_hash_data_req {
/* Device-readable part */
struct virtio_crypto_hash_para para;
__u8 padding[40];
};
struct virtio_crypto_mac_data_req {
/* Device-readable part */
struct virtio_crypto_mac_para para;
__u8 padding[40];
};
struct virtio_crypto_alg_chain_data_para {
__le32 iv_len;
/* Length of source data */
__le32 src_data_len;
/* Length of destination data */
__le32 dst_data_len;
/* Starting point for cipher processing in source data */
__le32 cipher_start_src_offset;
/* Length of the source data that the cipher will be computed on */
__le32 len_to_cipher;
/* Starting point for hash processing in source data */
__le32 hash_start_src_offset;
/* Length of the source data that the hash will be computed on */
__le32 len_to_hash;
/* Length of the additional auth data */
__le32 aad_len;
/* Length of the hash result */
__le32 hash_result_len;
__le32 reserved;
};
struct virtio_crypto_alg_chain_data_req {
/* Device-readable part */
struct virtio_crypto_alg_chain_data_para para;
};
struct virtio_crypto_sym_data_req {
union {
struct virtio_crypto_cipher_data_req cipher;
struct virtio_crypto_alg_chain_data_req chain;
__u8 padding[40];
} u;
/* See above VIRTIO_CRYPTO_SYM_OP_* */
__le32 op_type;
__le32 padding;
};
struct virtio_crypto_aead_data_req {
/* Device-readable part */
struct virtio_crypto_aead_para para;
__u8 padding[32];
};
/* The request of the data virtqueue's packet */
struct virtio_crypto_op_data_req {
struct virtio_crypto_op_header header;
union {
struct virtio_crypto_sym_data_req sym_req;
struct virtio_crypto_hash_data_req hash_req;
struct virtio_crypto_mac_data_req mac_req;
struct virtio_crypto_aead_data_req aead_req;
__u8 padding[48];
} u;
};
#define VIRTIO_CRYPTO_OK 0
#define VIRTIO_CRYPTO_ERR 1
#define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3
#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
/* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
struct virtio_crypto_config {
/* See VIRTIO_CRYPTO_OP_* above */
__u32 status;
/*
* Maximum number of data queue
*/
__u32 max_dataqueues;
/*
* Specifies the services mask which the device support,
* see VIRTIO_CRYPTO_SERVICE_* above
*/
__u32 crypto_services;
/* Detailed algorithms mask */
__u32 cipher_algo_l;
__u32 cipher_algo_h;
__u32 hash_algo;
__u32 mac_algo_l;
__u32 mac_algo_h;
__u32 aead_algo;
/* Maximum length of cipher key */
__u32 max_cipher_key_len;
/* Maximum length of authenticated key */
__u32 max_auth_key_len;
__u32 reserve;
/* Maximum size of each crypto request's content */
__u64 max_size;
};
struct virtio_crypto_inhdr {
/* See VIRTIO_CRYPTO_* above */
__u8 status;
};
#endif
...@@ -42,5 +42,6 @@ ...@@ -42,5 +42,6 @@
#define VIRTIO_ID_GPU 16 /* virtio GPU */ #define VIRTIO_ID_GPU 16 /* virtio GPU */
#define VIRTIO_ID_INPUT 18 /* virtio input */ #define VIRTIO_ID_INPUT 18 /* virtio input */
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
#endif /* _LINUX_VIRTIO_IDS_H */ #endif /* _LINUX_VIRTIO_IDS_H */
...@@ -39,8 +39,8 @@ ...@@ -39,8 +39,8 @@
* - __le{16,32,64} for standard-compliant virtio devices * - __le{16,32,64} for standard-compliant virtio devices
*/ */
typedef __u16 __bitwise__ __virtio16; typedef __u16 __bitwise __virtio16;
typedef __u32 __bitwise__ __virtio32; typedef __u32 __bitwise __virtio32;
typedef __u64 __bitwise__ __virtio64; typedef __u64 __bitwise __virtio64;
#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */ #endif /* _UAPI_LINUX_VIRTIO_TYPES_H */
...@@ -20,5 +20,3 @@ bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o ...@@ -20,5 +20,3 @@ bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_LEDS) += leds.o bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
subdir-ccflags-y += -D__CHECK_ENDIAN__
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <net/inet_frag.h> #include <net/inet_frag.h>
#include <net/6lowpan.h> #include <net/6lowpan.h>
typedef unsigned __bitwise__ lowpan_rx_result; typedef unsigned __bitwise lowpan_rx_result;
#define RX_CONTINUE ((__force lowpan_rx_result) 0u) #define RX_CONTINUE ((__force lowpan_rx_result) 0u)
#define RX_DROP_UNUSABLE ((__force lowpan_rx_result) 1u) #define RX_DROP_UNUSABLE ((__force lowpan_rx_result) 1u)
#define RX_DROP ((__force lowpan_rx_result) 2u) #define RX_DROP ((__force lowpan_rx_result) 2u)
......
...@@ -7,5 +7,3 @@ ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \ ...@@ -7,5 +7,3 @@ ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \
ieee802154_socket-y := socket.o ieee802154_socket-y := socket.o
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
ccflags-y += -D__CHECK_ENDIAN__
...@@ -61,4 +61,4 @@ rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o ...@@ -61,4 +61,4 @@ rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o
mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y) mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
ccflags-y += -D__CHECK_ENDIAN__ -DDEBUG ccflags-y += -DDEBUG
...@@ -159,7 +159,7 @@ enum ieee80211_bss_valid_data_flags { ...@@ -159,7 +159,7 @@ enum ieee80211_bss_valid_data_flags {
IEEE80211_BSS_VALID_ERP = BIT(3) IEEE80211_BSS_VALID_ERP = BIT(3)
}; };
typedef unsigned __bitwise__ ieee80211_tx_result; typedef unsigned __bitwise ieee80211_tx_result;
#define TX_CONTINUE ((__force ieee80211_tx_result) 0u) #define TX_CONTINUE ((__force ieee80211_tx_result) 0u)
#define TX_DROP ((__force ieee80211_tx_result) 1u) #define TX_DROP ((__force ieee80211_tx_result) 1u)
#define TX_QUEUED ((__force ieee80211_tx_result) 2u) #define TX_QUEUED ((__force ieee80211_tx_result) 2u)
...@@ -180,7 +180,7 @@ struct ieee80211_tx_data { ...@@ -180,7 +180,7 @@ struct ieee80211_tx_data {
}; };
typedef unsigned __bitwise__ ieee80211_rx_result; typedef unsigned __bitwise ieee80211_rx_result;
#define RX_CONTINUE ((__force ieee80211_rx_result) 0u) #define RX_CONTINUE ((__force ieee80211_rx_result) 0u)
#define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u) #define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u)
#define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u) #define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u)
......
...@@ -3,5 +3,3 @@ mac802154-objs := main.o rx.o tx.o mac_cmd.o mib.o \ ...@@ -3,5 +3,3 @@ mac802154-objs := main.o rx.o tx.o mac_cmd.o mib.o \
iface.o llsec.o util.o cfg.o trace.o iface.o llsec.o util.o cfg.o trace.o
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
ccflags-y += -D__CHECK_ENDIAN__
...@@ -379,7 +379,7 @@ static void virtio_vsock_reset_sock(struct sock *sk) ...@@ -379,7 +379,7 @@ static void virtio_vsock_reset_sock(struct sock *sk)
static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
{ {
struct virtio_device *vdev = vsock->vdev; struct virtio_device *vdev = vsock->vdev;
u64 guest_cid; __le64 guest_cid;
vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
&guest_cid, sizeof(guest_cid)); &guest_cid, sizeof(guest_cid));
......
...@@ -32,7 +32,7 @@ static const struct virtio_transport *virtio_transport_get_ops(void) ...@@ -32,7 +32,7 @@ static const struct virtio_transport *virtio_transport_get_ops(void)
return container_of(t, struct virtio_transport, transport); return container_of(t, struct virtio_transport, transport);
} }
struct virtio_vsock_pkt * static struct virtio_vsock_pkt *
virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
size_t len, size_t len,
u32 src_cid, u32 src_cid,
...@@ -82,7 +82,6 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, ...@@ -82,7 +82,6 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
kfree(pkt); kfree(pkt);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(virtio_transport_alloc_pkt);
static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info *info) struct virtio_vsock_pkt_info *info)
...@@ -606,9 +605,9 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) ...@@ -606,9 +605,9 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
return 0; return 0;
pkt = virtio_transport_alloc_pkt(&info, 0, pkt = virtio_transport_alloc_pkt(&info, 0,
le32_to_cpu(pkt->hdr.dst_cid), le64_to_cpu(pkt->hdr.dst_cid),
le32_to_cpu(pkt->hdr.dst_port), le32_to_cpu(pkt->hdr.dst_port),
le32_to_cpu(pkt->hdr.src_cid), le64_to_cpu(pkt->hdr.src_cid),
le32_to_cpu(pkt->hdr.src_port)); le32_to_cpu(pkt->hdr.src_port));
if (!pkt) if (!pkt)
return -ENOMEM; return -ENOMEM;
...@@ -823,7 +822,7 @@ virtio_transport_send_response(struct vsock_sock *vsk, ...@@ -823,7 +822,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info info = { struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RESPONSE, .op = VIRTIO_VSOCK_OP_RESPONSE,
.type = VIRTIO_VSOCK_TYPE_STREAM, .type = VIRTIO_VSOCK_TYPE_STREAM,
.remote_cid = le32_to_cpu(pkt->hdr.src_cid), .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
.remote_port = le32_to_cpu(pkt->hdr.src_port), .remote_port = le32_to_cpu(pkt->hdr.src_port),
.reply = true, .reply = true,
}; };
...@@ -863,9 +862,9 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt) ...@@ -863,9 +862,9 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
child->sk_state = SS_CONNECTED; child->sk_state = SS_CONNECTED;
vchild = vsock_sk(child); vchild = vsock_sk(child);
vsock_addr_init(&vchild->local_addr, le32_to_cpu(pkt->hdr.dst_cid), vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
le32_to_cpu(pkt->hdr.dst_port)); le32_to_cpu(pkt->hdr.dst_port));
vsock_addr_init(&vchild->remote_addr, le32_to_cpu(pkt->hdr.src_cid), vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
le32_to_cpu(pkt->hdr.src_port)); le32_to_cpu(pkt->hdr.src_port));
vsock_insert_connected(vchild); vsock_insert_connected(vchild);
...@@ -904,9 +903,9 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) ...@@ -904,9 +903,9 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
struct sock *sk; struct sock *sk;
bool space_available; bool space_available;
vsock_addr_init(&src, le32_to_cpu(pkt->hdr.src_cid), vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
le32_to_cpu(pkt->hdr.src_port)); le32_to_cpu(pkt->hdr.src_port));
vsock_addr_init(&dst, le32_to_cpu(pkt->hdr.dst_cid), vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
le32_to_cpu(pkt->hdr.dst_port)); le32_to_cpu(pkt->hdr.dst_port));
trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port, trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
......
...@@ -17,8 +17,6 @@ cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o ...@@ -17,8 +17,6 @@ cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
ccflags-y += -D__CHECK_ENDIAN__
$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk $(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
@$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@ @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
......
...@@ -335,7 +335,7 @@ our $Attribute = qr{ ...@@ -335,7 +335,7 @@ our $Attribute = qr{
__percpu| __percpu|
__nocast| __nocast|
__safe| __safe|
__bitwise__| __bitwise|
__packed__| __packed__|
__packed2__| __packed2__|
__naked| __naked|
...@@ -3681,7 +3681,7 @@ sub process { ...@@ -3681,7 +3681,7 @@ sub process {
$line !~ /\btypedef\s+$Type\s*\(\s*\*?$Ident\s*\)\s*\(/ && $line !~ /\btypedef\s+$Type\s*\(\s*\*?$Ident\s*\)\s*\(/ &&
$line !~ /\btypedef\s+$Type\s+$Ident\s*\(/ && $line !~ /\btypedef\s+$Type\s+$Ident\s*\(/ &&
$line !~ /\b$typeTypedefs\b/ && $line !~ /\b$typeTypedefs\b/ &&
$line !~ /\b__bitwise(?:__|)\b/) { $line !~ /\b__bitwise\b/) {
WARN("NEW_TYPEDEFS", WARN("NEW_TYPEDEFS",
"do not add new typedefs\n" . $herecurr); "do not add new typedefs\n" . $herecurr);
} }
......
...@@ -42,11 +42,7 @@ typedef __s8 s8; ...@@ -42,11 +42,7 @@ typedef __s8 s8;
#else #else
#define __bitwise__ #define __bitwise__
#endif #endif
#ifdef __CHECK_ENDIAN__
#define __bitwise __bitwise__ #define __bitwise __bitwise__
#else
#define __bitwise
#endif
#define __force #define __force
#define __user #define __user
......
...@@ -4,6 +4,6 @@ ...@@ -4,6 +4,6 @@
#define WRITE_ONCE(var, val) \ #define WRITE_ONCE(var, val) \
(*((volatile typeof(val) *)(&(var))) = (val)) (*((volatile typeof(val) *)(&(var))) = (val))
#define READ_ONCE(var) (*((volatile typeof(val) *)(&(var)))) #define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
#endif #endif
#ifndef UACCESS_H #ifndef UACCESS_H
#define UACCESS_H #define UACCESS_H
extern void *__user_addr_min, *__user_addr_max;
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #include <linux/compiler.h>
extern void *__user_addr_min, *__user_addr_max;
static inline void __chk_user_ptr(const volatile void *p, size_t size) static inline void __chk_user_ptr(const volatile void *p, size_t size)
{ {
...@@ -13,7 +14,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size) ...@@ -13,7 +14,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size)
({ \ ({ \
typeof(ptr) __pu_ptr = (ptr); \ typeof(ptr) __pu_ptr = (ptr); \
__chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \ __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \
ACCESS_ONCE(*(__pu_ptr)) = x; \ WRITE_ONCE(*(__pu_ptr), x); \
0; \ 0; \
}) })
...@@ -21,7 +22,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size) ...@@ -21,7 +22,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size)
({ \ ({ \
typeof(ptr) __pu_ptr = (ptr); \ typeof(ptr) __pu_ptr = (ptr); \
__chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \ __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \
x = ACCESS_ONCE(*(__pu_ptr)); \ x = READ_ONCE(*(__pu_ptr)); \
0; \ 0; \
}) })
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment