Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
0e24d34a
Commit
0e24d34a
authored
Mar 20, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'vhost-net-next' of
git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
parents
1a0c8330
de4d768a
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
64 additions
and
150 deletions
+64
-150
drivers/vhost/net.c
drivers/vhost/net.c
+26
-133
drivers/vhost/vhost.c
drivers/vhost/vhost.c
+38
-17
No files found.
drivers/vhost/net.c
View file @
0e24d34a
...
@@ -60,6 +60,7 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to,
...
@@ -60,6 +60,7 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to,
{
{
int
seg
=
0
;
int
seg
=
0
;
size_t
size
;
size_t
size
;
while
(
len
&&
seg
<
iov_count
)
{
while
(
len
&&
seg
<
iov_count
)
{
size
=
min
(
from
->
iov_len
,
len
);
size
=
min
(
from
->
iov_len
,
len
);
to
->
iov_base
=
from
->
iov_base
;
to
->
iov_base
=
from
->
iov_base
;
...
@@ -79,6 +80,7 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
...
@@ -79,6 +80,7 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
{
{
int
seg
=
0
;
int
seg
=
0
;
size_t
size
;
size_t
size
;
while
(
len
&&
seg
<
iovcount
)
{
while
(
len
&&
seg
<
iovcount
)
{
size
=
min
(
from
->
iov_len
,
len
);
size
=
min
(
from
->
iov_len
,
len
);
to
->
iov_base
=
from
->
iov_base
;
to
->
iov_base
=
from
->
iov_base
;
...
@@ -211,12 +213,13 @@ static int peek_head_len(struct sock *sk)
...
@@ -211,12 +213,13 @@ static int peek_head_len(struct sock *sk)
{
{
struct
sk_buff
*
head
;
struct
sk_buff
*
head
;
int
len
=
0
;
int
len
=
0
;
unsigned
long
flags
;
lock_sock
(
sk
);
spin_lock_irqsave
(
&
sk
->
sk_receive_queue
.
lock
,
flags
);
head
=
skb_peek
(
&
sk
->
sk_receive_queue
);
head
=
skb_peek
(
&
sk
->
sk_receive_queue
);
if
(
head
)
if
(
likely
(
head
)
)
len
=
head
->
len
;
len
=
head
->
len
;
release_sock
(
sk
);
spin_unlock_irqrestore
(
&
sk
->
sk_receive_queue
.
lock
,
flags
);
return
len
;
return
len
;
}
}
...
@@ -227,6 +230,7 @@ static int peek_head_len(struct sock *sk)
...
@@ -227,6 +230,7 @@ static int peek_head_len(struct sock *sk)
* @iovcount - returned count of io vectors we fill
* @iovcount - returned count of io vectors we fill
* @log - vhost log
* @log - vhost log
* @log_num - log offset
* @log_num - log offset
* @quota - headcount quota, 1 for big buffer
* returns number of buffer heads allocated, negative on error
* returns number of buffer heads allocated, negative on error
*/
*/
static
int
get_rx_bufs
(
struct
vhost_virtqueue
*
vq
,
static
int
get_rx_bufs
(
struct
vhost_virtqueue
*
vq
,
...
@@ -234,7 +238,8 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
...
@@ -234,7 +238,8 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
int
datalen
,
int
datalen
,
unsigned
*
iovcount
,
unsigned
*
iovcount
,
struct
vhost_log
*
log
,
struct
vhost_log
*
log
,
unsigned
*
log_num
)
unsigned
*
log_num
,
unsigned
int
quota
)
{
{
unsigned
int
out
,
in
;
unsigned
int
out
,
in
;
int
seg
=
0
;
int
seg
=
0
;
...
@@ -242,7 +247,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
...
@@ -242,7 +247,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
unsigned
d
;
unsigned
d
;
int
r
,
nlogs
=
0
;
int
r
,
nlogs
=
0
;
while
(
datalen
>
0
)
{
while
(
datalen
>
0
&&
headcount
<
quota
)
{
if
(
unlikely
(
seg
>=
UIO_MAXIOV
))
{
if
(
unlikely
(
seg
>=
UIO_MAXIOV
))
{
r
=
-
ENOBUFS
;
r
=
-
ENOBUFS
;
goto
err
;
goto
err
;
...
@@ -282,117 +287,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
...
@@ -282,117 +287,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
/* Expects to be always run from workqueue - which acts as
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
* read-size critical section for our kind of RCU. */
static
void
handle_rx_big
(
struct
vhost_net
*
net
)
static
void
handle_rx
(
struct
vhost_net
*
net
)
{
struct
vhost_virtqueue
*
vq
=
&
net
->
dev
.
vqs
[
VHOST_NET_VQ_RX
];
unsigned
out
,
in
,
log
,
s
;
int
head
;
struct
vhost_log
*
vq_log
;
struct
msghdr
msg
=
{
.
msg_name
=
NULL
,
.
msg_namelen
=
0
,
.
msg_control
=
NULL
,
/* FIXME: get and handle RX aux data. */
.
msg_controllen
=
0
,
.
msg_iov
=
vq
->
iov
,
.
msg_flags
=
MSG_DONTWAIT
,
};
struct
virtio_net_hdr
hdr
=
{
.
flags
=
0
,
.
gso_type
=
VIRTIO_NET_HDR_GSO_NONE
};
size_t
len
,
total_len
=
0
;
int
err
;
size_t
hdr_size
;
/* TODO: check that we are running from vhost_worker? */
struct
socket
*
sock
=
rcu_dereference_check
(
vq
->
private_data
,
1
);
if
(
!
sock
||
skb_queue_empty
(
&
sock
->
sk
->
sk_receive_queue
))
return
;
mutex_lock
(
&
vq
->
mutex
);
vhost_disable_notify
(
vq
);
hdr_size
=
vq
->
vhost_hlen
;
vq_log
=
unlikely
(
vhost_has_feature
(
&
net
->
dev
,
VHOST_F_LOG_ALL
))
?
vq
->
log
:
NULL
;
for
(;;)
{
head
=
vhost_get_vq_desc
(
&
net
->
dev
,
vq
,
vq
->
iov
,
ARRAY_SIZE
(
vq
->
iov
),
&
out
,
&
in
,
vq_log
,
&
log
);
/* On error, stop handling until the next kick. */
if
(
unlikely
(
head
<
0
))
break
;
/* OK, now we need to know about added descriptors. */
if
(
head
==
vq
->
num
)
{
if
(
unlikely
(
vhost_enable_notify
(
vq
)))
{
/* They have slipped one in as we were
* doing that: check again. */
vhost_disable_notify
(
vq
);
continue
;
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
break
;
}
/* We don't need to be notified again. */
if
(
out
)
{
vq_err
(
vq
,
"Unexpected descriptor format for RX: "
"out %d, int %d
\n
"
,
out
,
in
);
break
;
}
/* Skip header. TODO: support TSO/mergeable rx buffers. */
s
=
move_iovec_hdr
(
vq
->
iov
,
vq
->
hdr
,
hdr_size
,
in
);
msg
.
msg_iovlen
=
in
;
len
=
iov_length
(
vq
->
iov
,
in
);
/* Sanity check */
if
(
!
len
)
{
vq_err
(
vq
,
"Unexpected header len for RX: "
"%zd expected %zd
\n
"
,
iov_length
(
vq
->
hdr
,
s
),
hdr_size
);
break
;
}
err
=
sock
->
ops
->
recvmsg
(
NULL
,
sock
,
&
msg
,
len
,
MSG_DONTWAIT
|
MSG_TRUNC
);
/* TODO: Check specific error and bomb out unless EAGAIN? */
if
(
err
<
0
)
{
vhost_discard_vq_desc
(
vq
,
1
);
break
;
}
/* TODO: Should check and handle checksum. */
if
(
err
>
len
)
{
pr_debug
(
"Discarded truncated rx packet: "
" len %d > %zd
\n
"
,
err
,
len
);
vhost_discard_vq_desc
(
vq
,
1
);
continue
;
}
len
=
err
;
err
=
memcpy_toiovec
(
vq
->
hdr
,
(
unsigned
char
*
)
&
hdr
,
hdr_size
);
if
(
err
)
{
vq_err
(
vq
,
"Unable to write vnet_hdr at addr %p: %d
\n
"
,
vq
->
iov
->
iov_base
,
err
);
break
;
}
len
+=
hdr_size
;
vhost_add_used_and_signal
(
&
net
->
dev
,
vq
,
head
,
len
);
if
(
unlikely
(
vq_log
))
vhost_log_write
(
vq
,
vq_log
,
log
,
len
);
total_len
+=
len
;
if
(
unlikely
(
total_len
>=
VHOST_NET_WEIGHT
))
{
vhost_poll_queue
(
&
vq
->
poll
);
break
;
}
}
mutex_unlock
(
&
vq
->
mutex
);
}
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static
void
handle_rx_mergeable
(
struct
vhost_net
*
net
)
{
{
struct
vhost_virtqueue
*
vq
=
&
net
->
dev
.
vqs
[
VHOST_NET_VQ_RX
];
struct
vhost_virtqueue
*
vq
=
&
net
->
dev
.
vqs
[
VHOST_NET_VQ_RX
];
unsigned
uninitialized_var
(
in
),
log
;
unsigned
uninitialized_var
(
in
),
log
;
...
@@ -405,19 +300,18 @@ static void handle_rx_mergeable(struct vhost_net *net)
...
@@ -405,19 +300,18 @@ static void handle_rx_mergeable(struct vhost_net *net)
.
msg_iov
=
vq
->
iov
,
.
msg_iov
=
vq
->
iov
,
.
msg_flags
=
MSG_DONTWAIT
,
.
msg_flags
=
MSG_DONTWAIT
,
};
};
struct
virtio_net_hdr_mrg_rxbuf
hdr
=
{
struct
virtio_net_hdr_mrg_rxbuf
hdr
=
{
.
hdr
.
flags
=
0
,
.
hdr
.
flags
=
0
,
.
hdr
.
gso_type
=
VIRTIO_NET_HDR_GSO_NONE
.
hdr
.
gso_type
=
VIRTIO_NET_HDR_GSO_NONE
};
};
size_t
total_len
=
0
;
size_t
total_len
=
0
;
int
err
,
headcount
;
int
err
,
headcount
,
mergeable
;
size_t
vhost_hlen
,
sock_hlen
;
size_t
vhost_hlen
,
sock_hlen
;
size_t
vhost_len
,
sock_len
;
size_t
vhost_len
,
sock_len
;
/* TODO: check that we are running from vhost_worker? */
/* TODO: check that we are running from vhost_worker? */
struct
socket
*
sock
=
rcu_dereference_check
(
vq
->
private_data
,
1
);
struct
socket
*
sock
=
rcu_dereference_check
(
vq
->
private_data
,
1
);
if
(
!
sock
||
skb_queue_empty
(
&
sock
->
sk
->
sk_receive_queue
))
if
(
!
sock
)
return
;
return
;
mutex_lock
(
&
vq
->
mutex
);
mutex_lock
(
&
vq
->
mutex
);
...
@@ -427,12 +321,14 @@ static void handle_rx_mergeable(struct vhost_net *net)
...
@@ -427,12 +321,14 @@ static void handle_rx_mergeable(struct vhost_net *net)
vq_log
=
unlikely
(
vhost_has_feature
(
&
net
->
dev
,
VHOST_F_LOG_ALL
))
?
vq_log
=
unlikely
(
vhost_has_feature
(
&
net
->
dev
,
VHOST_F_LOG_ALL
))
?
vq
->
log
:
NULL
;
vq
->
log
:
NULL
;
mergeable
=
vhost_has_feature
(
&
net
->
dev
,
VIRTIO_NET_F_MRG_RXBUF
);
while
((
sock_len
=
peek_head_len
(
sock
->
sk
)))
{
while
((
sock_len
=
peek_head_len
(
sock
->
sk
)))
{
sock_len
+=
sock_hlen
;
sock_len
+=
sock_hlen
;
vhost_len
=
sock_len
+
vhost_hlen
;
vhost_len
=
sock_len
+
vhost_hlen
;
headcount
=
get_rx_bufs
(
vq
,
vq
->
heads
,
vhost_len
,
headcount
=
get_rx_bufs
(
vq
,
vq
->
heads
,
vhost_len
,
&
in
,
vq_log
,
&
log
);
&
in
,
vq_log
,
&
log
,
likely
(
mergeable
)
?
UIO_MAXIOV
:
1
);
/* On error, stop handling until the next kick. */
/* On error, stop handling until the next kick. */
if
(
unlikely
(
headcount
<
0
))
if
(
unlikely
(
headcount
<
0
))
break
;
break
;
...
@@ -476,7 +372,7 @@ static void handle_rx_mergeable(struct vhost_net *net)
...
@@ -476,7 +372,7 @@ static void handle_rx_mergeable(struct vhost_net *net)
break
;
break
;
}
}
/* TODO: Should check and handle checksum. */
/* TODO: Should check and handle checksum. */
if
(
vhost_has_feature
(
&
net
->
dev
,
VIRTIO_NET_F_MRG_RXBUF
)
&&
if
(
likely
(
mergeable
)
&&
memcpy_toiovecend
(
vq
->
hdr
,
(
unsigned
char
*
)
&
headcount
,
memcpy_toiovecend
(
vq
->
hdr
,
(
unsigned
char
*
)
&
headcount
,
offsetof
(
typeof
(
hdr
),
num_buffers
),
offsetof
(
typeof
(
hdr
),
num_buffers
),
sizeof
hdr
.
num_buffers
))
{
sizeof
hdr
.
num_buffers
))
{
...
@@ -498,14 +394,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
...
@@ -498,14 +394,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
mutex_unlock
(
&
vq
->
mutex
);
mutex_unlock
(
&
vq
->
mutex
);
}
}
static
void
handle_rx
(
struct
vhost_net
*
net
)
{
if
(
vhost_has_feature
(
&
net
->
dev
,
VIRTIO_NET_F_MRG_RXBUF
))
handle_rx_mergeable
(
net
);
else
handle_rx_big
(
net
);
}
static
void
handle_tx_kick
(
struct
vhost_work
*
work
)
static
void
handle_tx_kick
(
struct
vhost_work
*
work
)
{
{
struct
vhost_virtqueue
*
vq
=
container_of
(
work
,
struct
vhost_virtqueue
,
struct
vhost_virtqueue
*
vq
=
container_of
(
work
,
struct
vhost_virtqueue
,
...
@@ -654,6 +542,7 @@ static struct socket *get_raw_socket(int fd)
...
@@ -654,6 +542,7 @@ static struct socket *get_raw_socket(int fd)
}
uaddr
;
}
uaddr
;
int
uaddr_len
=
sizeof
uaddr
,
r
;
int
uaddr_len
=
sizeof
uaddr
,
r
;
struct
socket
*
sock
=
sockfd_lookup
(
fd
,
&
r
);
struct
socket
*
sock
=
sockfd_lookup
(
fd
,
&
r
);
if
(
!
sock
)
if
(
!
sock
)
return
ERR_PTR
(
-
ENOTSOCK
);
return
ERR_PTR
(
-
ENOTSOCK
);
...
@@ -682,6 +571,7 @@ static struct socket *get_tap_socket(int fd)
...
@@ -682,6 +571,7 @@ static struct socket *get_tap_socket(int fd)
{
{
struct
file
*
file
=
fget
(
fd
);
struct
file
*
file
=
fget
(
fd
);
struct
socket
*
sock
;
struct
socket
*
sock
;
if
(
!
file
)
if
(
!
file
)
return
ERR_PTR
(
-
EBADF
);
return
ERR_PTR
(
-
EBADF
);
sock
=
tun_get_socket
(
file
);
sock
=
tun_get_socket
(
file
);
...
@@ -696,6 +586,7 @@ static struct socket *get_tap_socket(int fd)
...
@@ -696,6 +586,7 @@ static struct socket *get_tap_socket(int fd)
static
struct
socket
*
get_socket
(
int
fd
)
static
struct
socket
*
get_socket
(
int
fd
)
{
{
struct
socket
*
sock
;
struct
socket
*
sock
;
/* special case to disable backend */
/* special case to disable backend */
if
(
fd
==
-
1
)
if
(
fd
==
-
1
)
return
NULL
;
return
NULL
;
...
@@ -768,6 +659,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
...
@@ -768,6 +659,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct
socket
*
tx_sock
=
NULL
;
struct
socket
*
tx_sock
=
NULL
;
struct
socket
*
rx_sock
=
NULL
;
struct
socket
*
rx_sock
=
NULL
;
long
err
;
long
err
;
mutex_lock
(
&
n
->
dev
.
mutex
);
mutex_lock
(
&
n
->
dev
.
mutex
);
err
=
vhost_dev_check_owner
(
&
n
->
dev
);
err
=
vhost_dev_check_owner
(
&
n
->
dev
);
if
(
err
)
if
(
err
)
...
@@ -829,6 +721,7 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
...
@@ -829,6 +721,7 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
struct
vhost_vring_file
backend
;
struct
vhost_vring_file
backend
;
u64
features
;
u64
features
;
int
r
;
int
r
;
switch
(
ioctl
)
{
switch
(
ioctl
)
{
case
VHOST_NET_SET_BACKEND
:
case
VHOST_NET_SET_BACKEND
:
if
(
copy_from_user
(
&
backend
,
argp
,
sizeof
backend
))
if
(
copy_from_user
(
&
backend
,
argp
,
sizeof
backend
))
...
...
drivers/vhost/vhost.c
View file @
0e24d34a
...
@@ -41,8 +41,8 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
...
@@ -41,8 +41,8 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table
*
pt
)
poll_table
*
pt
)
{
{
struct
vhost_poll
*
poll
;
struct
vhost_poll
*
poll
;
poll
=
container_of
(
pt
,
struct
vhost_poll
,
table
);
poll
=
container_of
(
pt
,
struct
vhost_poll
,
table
);
poll
->
wqh
=
wqh
;
poll
->
wqh
=
wqh
;
add_wait_queue
(
wqh
,
&
poll
->
wait
);
add_wait_queue
(
wqh
,
&
poll
->
wait
);
}
}
...
@@ -85,6 +85,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
...
@@ -85,6 +85,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
void
vhost_poll_start
(
struct
vhost_poll
*
poll
,
struct
file
*
file
)
void
vhost_poll_start
(
struct
vhost_poll
*
poll
,
struct
file
*
file
)
{
{
unsigned
long
mask
;
unsigned
long
mask
;
mask
=
file
->
f_op
->
poll
(
file
,
&
poll
->
table
);
mask
=
file
->
f_op
->
poll
(
file
,
&
poll
->
table
);
if
(
mask
)
if
(
mask
)
vhost_poll_wakeup
(
&
poll
->
wait
,
0
,
0
,
(
void
*
)
mask
);
vhost_poll_wakeup
(
&
poll
->
wait
,
0
,
0
,
(
void
*
)
mask
);
...
@@ -101,6 +102,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
...
@@ -101,6 +102,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
unsigned
seq
)
unsigned
seq
)
{
{
int
left
;
int
left
;
spin_lock_irq
(
&
dev
->
work_lock
);
spin_lock_irq
(
&
dev
->
work_lock
);
left
=
seq
-
work
->
done_seq
;
left
=
seq
-
work
->
done_seq
;
spin_unlock_irq
(
&
dev
->
work_lock
);
spin_unlock_irq
(
&
dev
->
work_lock
);
...
@@ -222,6 +224,7 @@ static int vhost_worker(void *data)
...
@@ -222,6 +224,7 @@ static int vhost_worker(void *data)
static
long
vhost_dev_alloc_iovecs
(
struct
vhost_dev
*
dev
)
static
long
vhost_dev_alloc_iovecs
(
struct
vhost_dev
*
dev
)
{
{
int
i
;
int
i
;
for
(
i
=
0
;
i
<
dev
->
nvqs
;
++
i
)
{
for
(
i
=
0
;
i
<
dev
->
nvqs
;
++
i
)
{
dev
->
vqs
[
i
].
indirect
=
kmalloc
(
sizeof
*
dev
->
vqs
[
i
].
indirect
*
dev
->
vqs
[
i
].
indirect
=
kmalloc
(
sizeof
*
dev
->
vqs
[
i
].
indirect
*
UIO_MAXIOV
,
GFP_KERNEL
);
UIO_MAXIOV
,
GFP_KERNEL
);
...
@@ -235,6 +238,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
...
@@ -235,6 +238,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
goto
err_nomem
;
goto
err_nomem
;
}
}
return
0
;
return
0
;
err_nomem:
err_nomem:
for
(;
i
>=
0
;
--
i
)
{
for
(;
i
>=
0
;
--
i
)
{
kfree
(
dev
->
vqs
[
i
].
indirect
);
kfree
(
dev
->
vqs
[
i
].
indirect
);
...
@@ -247,6 +251,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
...
@@ -247,6 +251,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
static
void
vhost_dev_free_iovecs
(
struct
vhost_dev
*
dev
)
static
void
vhost_dev_free_iovecs
(
struct
vhost_dev
*
dev
)
{
{
int
i
;
int
i
;
for
(
i
=
0
;
i
<
dev
->
nvqs
;
++
i
)
{
for
(
i
=
0
;
i
<
dev
->
nvqs
;
++
i
)
{
kfree
(
dev
->
vqs
[
i
].
indirect
);
kfree
(
dev
->
vqs
[
i
].
indirect
);
dev
->
vqs
[
i
].
indirect
=
NULL
;
dev
->
vqs
[
i
].
indirect
=
NULL
;
...
@@ -304,6 +309,7 @@ struct vhost_attach_cgroups_struct {
...
@@ -304,6 +309,7 @@ struct vhost_attach_cgroups_struct {
static
void
vhost_attach_cgroups_work
(
struct
vhost_work
*
work
)
static
void
vhost_attach_cgroups_work
(
struct
vhost_work
*
work
)
{
{
struct
vhost_attach_cgroups_struct
*
s
;
struct
vhost_attach_cgroups_struct
*
s
;
s
=
container_of
(
work
,
struct
vhost_attach_cgroups_struct
,
work
);
s
=
container_of
(
work
,
struct
vhost_attach_cgroups_struct
,
work
);
s
->
ret
=
cgroup_attach_task_all
(
s
->
owner
,
current
);
s
->
ret
=
cgroup_attach_task_all
(
s
->
owner
,
current
);
}
}
...
@@ -311,6 +317,7 @@ static void vhost_attach_cgroups_work(struct vhost_work *work)
...
@@ -311,6 +317,7 @@ static void vhost_attach_cgroups_work(struct vhost_work *work)
static
int
vhost_attach_cgroups
(
struct
vhost_dev
*
dev
)
static
int
vhost_attach_cgroups
(
struct
vhost_dev
*
dev
)
{
{
struct
vhost_attach_cgroups_struct
attach
;
struct
vhost_attach_cgroups_struct
attach
;
attach
.
owner
=
current
;
attach
.
owner
=
current
;
vhost_work_init
(
&
attach
.
work
,
vhost_attach_cgroups_work
);
vhost_work_init
(
&
attach
.
work
,
vhost_attach_cgroups_work
);
vhost_work_queue
(
dev
,
&
attach
.
work
);
vhost_work_queue
(
dev
,
&
attach
.
work
);
...
@@ -323,11 +330,13 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
...
@@ -323,11 +330,13 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
{
{
struct
task_struct
*
worker
;
struct
task_struct
*
worker
;
int
err
;
int
err
;
/* Is there an owner already? */
/* Is there an owner already? */
if
(
dev
->
mm
)
{
if
(
dev
->
mm
)
{
err
=
-
EBUSY
;
err
=
-
EBUSY
;
goto
err_mm
;
goto
err_mm
;
}
}
/* No owner, become one */
/* No owner, become one */
dev
->
mm
=
get_task_mm
(
current
);
dev
->
mm
=
get_task_mm
(
current
);
worker
=
kthread_create
(
vhost_worker
,
dev
,
"vhost-%d"
,
current
->
pid
);
worker
=
kthread_create
(
vhost_worker
,
dev
,
"vhost-%d"
,
current
->
pid
);
...
@@ -380,6 +389,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
...
@@ -380,6 +389,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
void
vhost_dev_cleanup
(
struct
vhost_dev
*
dev
)
void
vhost_dev_cleanup
(
struct
vhost_dev
*
dev
)
{
{
int
i
;
int
i
;
for
(
i
=
0
;
i
<
dev
->
nvqs
;
++
i
)
{
for
(
i
=
0
;
i
<
dev
->
nvqs
;
++
i
)
{
if
(
dev
->
vqs
[
i
].
kick
&&
dev
->
vqs
[
i
].
handle_kick
)
{
if
(
dev
->
vqs
[
i
].
kick
&&
dev
->
vqs
[
i
].
handle_kick
)
{
vhost_poll_stop
(
&
dev
->
vqs
[
i
].
poll
);
vhost_poll_stop
(
&
dev
->
vqs
[
i
].
poll
);
...
@@ -421,6 +431,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
...
@@ -421,6 +431,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
static
int
log_access_ok
(
void
__user
*
log_base
,
u64
addr
,
unsigned
long
sz
)
static
int
log_access_ok
(
void
__user
*
log_base
,
u64
addr
,
unsigned
long
sz
)
{
{
u64
a
=
addr
/
VHOST_PAGE_SIZE
/
8
;
u64
a
=
addr
/
VHOST_PAGE_SIZE
/
8
;
/* Make sure 64 bit math will not overflow. */
/* Make sure 64 bit math will not overflow. */
if
(
a
>
ULONG_MAX
-
(
unsigned
long
)
log_base
||
if
(
a
>
ULONG_MAX
-
(
unsigned
long
)
log_base
||
a
+
(
unsigned
long
)
log_base
>
ULONG_MAX
)
a
+
(
unsigned
long
)
log_base
>
ULONG_MAX
)
...
@@ -461,6 +472,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
...
@@ -461,6 +472,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
int
log_all
)
int
log_all
)
{
{
int
i
;
int
i
;
for
(
i
=
0
;
i
<
d
->
nvqs
;
++
i
)
{
for
(
i
=
0
;
i
<
d
->
nvqs
;
++
i
)
{
int
ok
;
int
ok
;
mutex_lock
(
&
d
->
vqs
[
i
].
mutex
);
mutex_lock
(
&
d
->
vqs
[
i
].
mutex
);
...
@@ -527,6 +539,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
...
@@ -527,6 +539,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
{
struct
vhost_memory
mem
,
*
newmem
,
*
oldmem
;
struct
vhost_memory
mem
,
*
newmem
,
*
oldmem
;
unsigned
long
size
=
offsetof
(
struct
vhost_memory
,
regions
);
unsigned
long
size
=
offsetof
(
struct
vhost_memory
,
regions
);
if
(
copy_from_user
(
&
mem
,
m
,
size
))
if
(
copy_from_user
(
&
mem
,
m
,
size
))
return
-
EFAULT
;
return
-
EFAULT
;
if
(
mem
.
padding
)
if
(
mem
.
padding
)
...
@@ -544,7 +557,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
...
@@ -544,7 +557,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return
-
EFAULT
;
return
-
EFAULT
;
}
}
if
(
!
memory_access_ok
(
d
,
newmem
,
vhost_has_feature
(
d
,
VHOST_F_LOG_ALL
)))
{
if
(
!
memory_access_ok
(
d
,
newmem
,
vhost_has_feature
(
d
,
VHOST_F_LOG_ALL
)))
{
kfree
(
newmem
);
kfree
(
newmem
);
return
-
EFAULT
;
return
-
EFAULT
;
}
}
...
@@ -560,6 +574,7 @@ static int init_used(struct vhost_virtqueue *vq,
...
@@ -560,6 +574,7 @@ static int init_used(struct vhost_virtqueue *vq,
struct
vring_used
__user
*
used
)
struct
vring_used
__user
*
used
)
{
{
int
r
=
put_user
(
vq
->
used_flags
,
&
used
->
flags
);
int
r
=
put_user
(
vq
->
used_flags
,
&
used
->
flags
);
if
(
r
)
if
(
r
)
return
r
;
return
r
;
return
get_user
(
vq
->
last_used_idx
,
&
used
->
idx
);
return
get_user
(
vq
->
last_used_idx
,
&
used
->
idx
);
...
@@ -849,6 +864,7 @@ static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
...
@@ -849,6 +864,7 @@ static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
{
{
struct
vhost_memory_region
*
reg
;
struct
vhost_memory_region
*
reg
;
int
i
;
int
i
;
/* linear search is not brilliant, but we really have on the order of 6
/* linear search is not brilliant, but we really have on the order of 6
* regions in practice */
* regions in practice */
for
(
i
=
0
;
i
<
mem
->
nregions
;
++
i
)
{
for
(
i
=
0
;
i
<
mem
->
nregions
;
++
i
)
{
...
@@ -871,6 +887,7 @@ static int set_bit_to_user(int nr, void __user *addr)
...
@@ -871,6 +887,7 @@ static int set_bit_to_user(int nr, void __user *addr)
void
*
base
;
void
*
base
;
int
bit
=
nr
+
(
log
%
PAGE_SIZE
)
*
8
;
int
bit
=
nr
+
(
log
%
PAGE_SIZE
)
*
8
;
int
r
;
int
r
;
r
=
get_user_pages_fast
(
log
,
1
,
1
,
&
page
);
r
=
get_user_pages_fast
(
log
,
1
,
1
,
&
page
);
if
(
r
<
0
)
if
(
r
<
0
)
return
r
;
return
r
;
...
@@ -888,6 +905,7 @@ static int log_write(void __user *log_base,
...
@@ -888,6 +905,7 @@ static int log_write(void __user *log_base,
{
{
u64
write_page
=
write_address
/
VHOST_PAGE_SIZE
;
u64
write_page
=
write_address
/
VHOST_PAGE_SIZE
;
int
r
;
int
r
;
if
(
!
write_length
)
if
(
!
write_length
)
return
0
;
return
0
;
write_length
+=
write_address
%
VHOST_PAGE_SIZE
;
write_length
+=
write_address
%
VHOST_PAGE_SIZE
;
...
@@ -1037,8 +1055,8 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
...
@@ -1037,8 +1055,8 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
i
,
count
);
i
,
count
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
(
unlikely
(
memcpy_fromiovec
((
unsigned
char
*
)
&
desc
,
vq
->
indirect
,
if
(
unlikely
(
memcpy_fromiovec
((
unsigned
char
*
)
&
desc
,
sizeof
desc
)))
{
vq
->
indirect
,
sizeof
desc
)))
{
vq_err
(
vq
,
"Failed indirect descriptor: idx %d, %zx
\n
"
,
vq_err
(
vq
,
"Failed indirect descriptor: idx %d, %zx
\n
"
,
i
,
(
size_t
)
indirect
->
addr
+
i
*
sizeof
desc
);
i
,
(
size_t
)
indirect
->
addr
+
i
*
sizeof
desc
);
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -1153,7 +1171,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
...
@@ -1153,7 +1171,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
i
,
vq
->
num
,
head
);
i
,
vq
->
num
,
head
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
ret
=
copy_from_user
(
&
desc
,
vq
->
desc
+
i
,
sizeof
desc
);
ret
=
__
copy_from_user
(
&
desc
,
vq
->
desc
+
i
,
sizeof
desc
);
if
(
unlikely
(
ret
))
{
if
(
unlikely
(
ret
))
{
vq_err
(
vq
,
"Failed to get descriptor: idx %d addr %p
\n
"
,
vq_err
(
vq
,
"Failed to get descriptor: idx %d addr %p
\n
"
,
i
,
vq
->
desc
+
i
);
i
,
vq
->
desc
+
i
);
...
@@ -1317,6 +1335,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
...
@@ -1317,6 +1335,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
void
vhost_signal
(
struct
vhost_dev
*
dev
,
struct
vhost_virtqueue
*
vq
)
void
vhost_signal
(
struct
vhost_dev
*
dev
,
struct
vhost_virtqueue
*
vq
)
{
{
__u16
flags
;
__u16
flags
;
/* Flush out used index updates. This is paired
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* with the barrier that the Guest executes when enabling
* interrupts. */
* interrupts. */
...
@@ -1361,6 +1380,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
...
@@ -1361,6 +1380,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
{
{
u16
avail_idx
;
u16
avail_idx
;
int
r
;
int
r
;
if
(
!
(
vq
->
used_flags
&
VRING_USED_F_NO_NOTIFY
))
if
(
!
(
vq
->
used_flags
&
VRING_USED_F_NO_NOTIFY
))
return
false
;
return
false
;
vq
->
used_flags
&=
~
VRING_USED_F_NO_NOTIFY
;
vq
->
used_flags
&=
~
VRING_USED_F_NO_NOTIFY
;
...
@@ -1387,6 +1407,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
...
@@ -1387,6 +1407,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
void
vhost_disable_notify
(
struct
vhost_virtqueue
*
vq
)
void
vhost_disable_notify
(
struct
vhost_virtqueue
*
vq
)
{
{
int
r
;
int
r
;
if
(
vq
->
used_flags
&
VRING_USED_F_NO_NOTIFY
)
if
(
vq
->
used_flags
&
VRING_USED_F_NO_NOTIFY
)
return
;
return
;
vq
->
used_flags
|=
VRING_USED_F_NO_NOTIFY
;
vq
->
used_flags
|=
VRING_USED_F_NO_NOTIFY
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment