Commit 9f51c05d authored by Wen Yang's avatar Wen Yang Committed by Boris Ostrovsky

pvcalls-front: Avoid get_free_pages(GFP_KERNEL) under spinlock

The problem is that we call this with a spin lock held.
The call tree is:
pvcalls_front_accept() holds bedata->socket_lock.
    -> create_active()
        -> __get_free_pages() uses GFP_KERNEL

The create_active() function is only called from pvcalls_front_accept()
with a spin_lock held, The allocation is not allowed to sleep and
GFP_KERNEL is not sufficient.

This issue was detected by using the Coccinelle software.

v2: Add a function doing the allocations which is called
    outside the lock and passing the allocated data to
    create_active().

v3: Use the matching deallocators i.e., free_page()
    and free_pages(), respectively.

v4: It would be better to pre-populate map (struct sock_mapping),
    rather than introducing one more new struct.

v5: Since allocating the data outside of this call it should also
    be freed outside, when create_active() fails.
    Move kzalloc(sizeof(*map2), GFP_ATOMIC) outside spinlock and
    use GFP_KERNEL instead.

v6: Drop the superfluous calls.
Suggested-by: default avatarJuergen Gross <jgross@suse.com>
Suggested-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Suggested-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Signed-off-by: default avatarWen Yang <wen.yang99@zte.com.cn>
Acked-by: default avatarStefano Stabellini <sstabellini@kernel.org>
CC: Julia Lawall <julia.lawall@lip6.fr>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Juergen Gross <jgross@suse.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: xen-devel@lists.xenproject.org
CC: linux-kernel@vger.kernel.org
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent 1f8ce09b
...@@ -341,6 +341,39 @@ int pvcalls_front_socket(struct socket *sock) ...@@ -341,6 +341,39 @@ int pvcalls_front_socket(struct socket *sock)
return ret; return ret;
} }
static void free_active_ring(struct sock_mapping *map)
{
free_pages((unsigned long)map->active.data.in,
map->active.ring->ring_order);
free_page((unsigned long)map->active.ring);
}
static int alloc_active_ring(struct sock_mapping *map)
{
void *bytes;
map->active.ring = (struct pvcalls_data_intf *)
get_zeroed_page(GFP_KERNEL);
if (!map->active.ring)
goto out;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
if (!bytes)
goto out;
map->active.data.in = bytes;
map->active.data.out = bytes +
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
return 0;
out:
free_active_ring(map);
return -ENOMEM;
}
static int create_active(struct sock_mapping *map, int *evtchn) static int create_active(struct sock_mapping *map, int *evtchn)
{ {
void *bytes; void *bytes;
...@@ -349,15 +382,7 @@ static int create_active(struct sock_mapping *map, int *evtchn) ...@@ -349,15 +382,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
*evtchn = -1; *evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req); init_waitqueue_head(&map->active.inflight_conn_req);
map->active.ring = (struct pvcalls_data_intf *) bytes = map->active.data.in;
__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (map->active.ring == NULL)
goto out_error;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
if (bytes == NULL)
goto out_error;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access( map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id, pvcalls_front_dev->otherend_id,
...@@ -367,10 +392,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) ...@@ -367,10 +392,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
pvcalls_front_dev->otherend_id, pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
map->active.data.in = bytes;
map->active.data.out = bytes +
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret) if (ret)
goto out_error; goto out_error;
...@@ -391,8 +412,6 @@ static int create_active(struct sock_mapping *map, int *evtchn) ...@@ -391,8 +412,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error: out_error:
if (*evtchn >= 0) if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn); xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
free_page((unsigned long)map->active.ring);
return ret; return ret;
} }
...@@ -412,17 +431,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, ...@@ -412,17 +431,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
return PTR_ERR(map); return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev); bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
ret = alloc_active_ring(map);
if (ret < 0) {
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock); spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id); ret = get_request(bedata, &req_id);
if (ret < 0) { if (ret < 0) {
spin_unlock(&bedata->socket_lock); spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
ret = create_active(map, &evtchn); ret = create_active(map, &evtchn);
if (ret < 0) { if (ret < 0) {
spin_unlock(&bedata->socket_lock); spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
...@@ -786,25 +812,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) ...@@ -786,25 +812,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
} }
} }
spin_lock(&bedata->socket_lock); map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
ret = get_request(bedata, &req_id); if (map2 == NULL) {
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags); (void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock); pvcalls_exit_sock(sock);
return -ENOMEM;
}
ret = alloc_active_ring(map2);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
kfree(map2);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); spin_lock(&bedata->socket_lock);
if (map2 == NULL) { ret = get_request(bedata, &req_id);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags); (void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock); spin_unlock(&bedata->socket_lock);
free_active_ring(map2);
kfree(map2);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return -ENOMEM; return ret;
} }
ret = create_active(map2, &evtchn); ret = create_active(map2, &evtchn);
if (ret < 0) { if (ret < 0) {
free_active_ring(map2);
kfree(map2); kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags); (void *)&map->passive.flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment