Commit 35fab927 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-6.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - some cleanups in the Xen blkback driver

 - fix potential sleeps under lock in various Xen drivers

* tag 'for-linus-6.4-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/blkback: move blkif_get_x86_*_req() into blkback.c
  xen/blkback: simplify free_persistent_gnts() interface
  xen/blkback: remove stale prototype
  xen/blkback: fix white space code style issues
  xen/pvcalls: don't call bind_evtchn_to_irqhandler() under lock
  xen/scsiback: don't call scsiback_free_translation_entry() under lock
  xen/pciback: don't call pcistub_device_put() under lock
parents da46b58f cbfac770
...@@ -239,9 +239,9 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring, ...@@ -239,9 +239,9 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring,
atomic_dec(&ring->persistent_gnt_in_use); atomic_dec(&ring->persistent_gnt_in_use);
} }
static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root, static void free_persistent_gnts(struct xen_blkif_ring *ring)
unsigned int num)
{ {
struct rb_root *root = &ring->persistent_gnts;
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt; struct persistent_gnt *persistent_gnt;
...@@ -249,6 +249,9 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro ...@@ -249,6 +249,9 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
int segs_to_unmap = 0; int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data; struct gntab_unmap_queue_data unmap_data;
if (RB_EMPTY_ROOT(root))
return;
unmap_data.pages = pages; unmap_data.pages = pages;
unmap_data.unmap_ops = unmap; unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL; unmap_data.kunmap_ops = NULL;
...@@ -277,9 +280,11 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro ...@@ -277,9 +280,11 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
rb_erase(&persistent_gnt->node, root); rb_erase(&persistent_gnt->node, root);
kfree(persistent_gnt); kfree(persistent_gnt);
num--; ring->persistent_gnt_c--;
} }
BUG_ON(num != 0);
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
BUG_ON(ring->persistent_gnt_c != 0);
} }
void xen_blkbk_unmap_purged_grants(struct work_struct *work) void xen_blkbk_unmap_purged_grants(struct work_struct *work)
...@@ -631,12 +636,7 @@ int xen_blkif_schedule(void *arg) ...@@ -631,12 +636,7 @@ int xen_blkif_schedule(void *arg)
void xen_blkbk_free_caches(struct xen_blkif_ring *ring) void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
{ {
/* Free all persistent grant pages */ /* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&ring->persistent_gnts)) free_persistent_gnts(ring);
free_persistent_gnts(ring, &ring->persistent_gnts,
ring->persistent_gnt_c);
BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
ring->persistent_gnt_c = 0;
/* Since we are shutting down remove all pages from the buffer */ /* Since we are shutting down remove all pages from the buffer */
gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */); gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
...@@ -891,7 +891,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, ...@@ -891,7 +891,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
out: out:
for (i = last_map; i < num; i++) { for (i = last_map; i < num; i++) {
/* Don't zap current batch's valid persistent grants. */ /* Don't zap current batch's valid persistent grants. */
if(i >= map_until) if (i >= map_until)
pages[i]->persistent_gnt = NULL; pages[i]->persistent_gnt = NULL;
pages[i]->handle = BLKBACK_INVALID_HANDLE; pages[i]->handle = BLKBACK_INVALID_HANDLE;
} }
...@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio) ...@@ -1072,7 +1072,111 @@ static void end_block_io_op(struct bio *bio)
bio_put(bio); bio_put(bio);
} }
static void blkif_get_x86_32_req(struct blkif_request *dst,
const struct blkif_x86_32_request *src)
{
unsigned int i, n;
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
dst->u.rw.handle = src->u.rw.handle;
dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
dst->u.rw.nr_segments);
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments =
READ_ONCE(src->u.indirect.nr_segments);
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
n = min(MAX_INDIRECT_PAGES,
INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < n; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
static void blkif_get_x86_64_req(struct blkif_request *dst,
const struct blkif_x86_64_request *src)
{
unsigned int i, n;
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
dst->u.rw.nr_segments = READ_ONCE(src->u.rw.nr_segments);
dst->u.rw.handle = src->u.rw.handle;
dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
n = min_t(unsigned int, BLKIF_MAX_SEGMENTS_PER_REQUEST,
dst->u.rw.nr_segments);
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments =
READ_ONCE(src->u.indirect.nr_segments);
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
n = min(MAX_INDIRECT_PAGES,
INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < n; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
/* /*
* Function to copy the from the ring buffer the 'struct blkif_request' * Function to copy the from the ring buffer the 'struct blkif_request'
......
...@@ -296,7 +296,7 @@ struct xen_blkif_ring { ...@@ -296,7 +296,7 @@ struct xen_blkif_ring {
struct work_struct free_work; struct work_struct free_work;
/* Thread shutdown wait queue. */ /* Thread shutdown wait queue. */
wait_queue_head_t shutdown_wq; wait_queue_head_t shutdown_wq;
struct xen_blkif *blkif; struct xen_blkif *blkif;
}; };
struct xen_blkif { struct xen_blkif {
...@@ -315,7 +315,7 @@ struct xen_blkif { ...@@ -315,7 +315,7 @@ struct xen_blkif {
atomic_t drain; atomic_t drain;
struct work_struct free_work; struct work_struct free_work;
unsigned int nr_ring_pages; unsigned int nr_ring_pages;
bool multi_ref; bool multi_ref;
/* All rings for this device. */ /* All rings for this device. */
struct xen_blkif_ring *rings; struct xen_blkif_ring *rings;
...@@ -329,7 +329,7 @@ struct seg_buf { ...@@ -329,7 +329,7 @@ struct seg_buf {
}; };
struct grant_page { struct grant_page {
struct page *page; struct page *page;
struct persistent_gnt *persistent_gnt; struct persistent_gnt *persistent_gnt;
grant_handle_t handle; grant_handle_t handle;
grant_ref_t gref; grant_ref_t gref;
...@@ -384,7 +384,6 @@ void xen_blkif_xenbus_fini(void); ...@@ -384,7 +384,6 @@ void xen_blkif_xenbus_fini(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id); irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg); int xen_blkif_schedule(void *arg);
int xen_blkif_purge_persistent(void *arg);
void xen_blkbk_free_caches(struct xen_blkif_ring *ring); void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
...@@ -395,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt, ...@@ -395,100 +394,4 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
void xen_blkbk_unmap_purged_grants(struct work_struct *work); void xen_blkbk_unmap_purged_grants(struct work_struct *work);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
dst->u.rw.nr_segments = src->u.rw.nr_segments;
dst->u.rw.handle = src->u.rw.handle;
dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
barrier();
if (n > dst->u.rw.nr_segments)
n = dst->u.rw.nr_segments;
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
barrier();
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < j; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = READ_ONCE(src->operation);
switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
dst->u.rw.nr_segments = src->u.rw.nr_segments;
dst->u.rw.handle = src->u.rw.handle;
dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
barrier();
if (n > dst->u.rw.nr_segments)
n = dst->u.rw.nr_segments;
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
barrier();
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < j; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
...@@ -227,22 +227,30 @@ static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id) ...@@ -227,22 +227,30 @@ static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
static void free_active_ring(struct sock_mapping *map); static void free_active_ring(struct sock_mapping *map);
static void pvcalls_front_free_map(struct pvcalls_bedata *bedata, static void pvcalls_front_destroy_active(struct pvcalls_bedata *bedata,
struct sock_mapping *map) struct sock_mapping *map)
{ {
int i; int i;
unbind_from_irqhandler(map->active.irq, map); unbind_from_irqhandler(map->active.irq, map);
spin_lock(&bedata->socket_lock); if (bedata) {
if (!list_empty(&map->list)) spin_lock(&bedata->socket_lock);
list_del_init(&map->list); if (!list_empty(&map->list))
spin_unlock(&bedata->socket_lock); list_del_init(&map->list);
spin_unlock(&bedata->socket_lock);
}
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
gnttab_end_foreign_access(map->active.ring->ref[i], NULL); gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
gnttab_end_foreign_access(map->active.ref, NULL); gnttab_end_foreign_access(map->active.ref, NULL);
free_active_ring(map); free_active_ring(map);
}
static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
struct sock_mapping *map)
{
pvcalls_front_destroy_active(bedata, map);
kfree(map); kfree(map);
} }
...@@ -433,19 +441,18 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, ...@@ -433,19 +441,18 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
ret = create_active(map, &evtchn);
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) { if (ret < 0) {
spin_unlock(&bedata->socket_lock);
free_active_ring(map); free_active_ring(map);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
ret = create_active(map, &evtchn);
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) { if (ret < 0) {
spin_unlock(&bedata->socket_lock); spin_unlock(&bedata->socket_lock);
free_active_ring(map); pvcalls_front_destroy_active(NULL, map);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
...@@ -821,28 +828,27 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) ...@@ -821,28 +828,27 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
spin_lock(&bedata->socket_lock); ret = create_active(map2, &evtchn);
ret = get_request(bedata, &req_id);
if (ret < 0) { if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
free_active_ring(map2); free_active_ring(map2);
kfree(map2); kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
ret = create_active(map2, &evtchn); spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) { if (ret < 0) {
free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags); (void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock); spin_unlock(&bedata->socket_lock);
pvcalls_front_free_map(bedata, map2);
pvcalls_exit_sock(sock); pvcalls_exit_sock(sock);
return ret; return ret;
} }
list_add_tail(&map2->list, &bedata->socket_mappings); list_add_tail(&map2->list, &bedata->socket_mappings);
req = RING_GET_REQUEST(&bedata->ring, req_id); req = RING_GET_REQUEST(&bedata->ring, req_id);
......
...@@ -194,8 +194,6 @@ static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev, ...@@ -194,8 +194,6 @@ static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *pci_dev = NULL; struct pci_dev *pci_dev = NULL;
unsigned long flags; unsigned long flags;
pcistub_device_get(psdev);
spin_lock_irqsave(&psdev->lock, flags); spin_lock_irqsave(&psdev->lock, flags);
if (!psdev->pdev) { if (!psdev->pdev) {
psdev->pdev = pdev; psdev->pdev = pdev;
...@@ -203,8 +201,8 @@ static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev, ...@@ -203,8 +201,8 @@ static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
} }
spin_unlock_irqrestore(&psdev->lock, flags); spin_unlock_irqrestore(&psdev->lock, flags);
if (!pci_dev) if (pci_dev)
pcistub_device_put(psdev); pcistub_device_get(psdev);
return pci_dev; return pci_dev;
} }
......
...@@ -1010,12 +1010,6 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info, ...@@ -1010,12 +1010,6 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
return err; return err;
} }
static void __scsiback_del_translation_entry(struct v2p_entry *entry)
{
list_del(&entry->l);
kref_put(&entry->kref, scsiback_free_translation_entry);
}
/* /*
Delete the translation entry specified Delete the translation entry specified
*/ */
...@@ -1024,18 +1018,20 @@ static int scsiback_del_translation_entry(struct vscsibk_info *info, ...@@ -1024,18 +1018,20 @@ static int scsiback_del_translation_entry(struct vscsibk_info *info,
{ {
struct v2p_entry *entry; struct v2p_entry *entry;
unsigned long flags; unsigned long flags;
int ret = 0;
spin_lock_irqsave(&info->v2p_lock, flags); spin_lock_irqsave(&info->v2p_lock, flags);
/* Find out the translation entry specified */ /* Find out the translation entry specified */
entry = scsiback_chk_translation_entry(info, v); entry = scsiback_chk_translation_entry(info, v);
if (entry) if (entry)
__scsiback_del_translation_entry(entry); list_del(&entry->l);
else
ret = -ENOENT;
spin_unlock_irqrestore(&info->v2p_lock, flags); spin_unlock_irqrestore(&info->v2p_lock, flags);
return ret;
if (!entry)
return -ENOENT;
kref_put(&entry->kref, scsiback_free_translation_entry);
return 0;
} }
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
...@@ -1239,14 +1235,19 @@ static void scsiback_release_translation_entry(struct vscsibk_info *info) ...@@ -1239,14 +1235,19 @@ static void scsiback_release_translation_entry(struct vscsibk_info *info)
{ {
struct v2p_entry *entry, *tmp; struct v2p_entry *entry, *tmp;
struct list_head *head = &(info->v2p_entry_lists); struct list_head *head = &(info->v2p_entry_lists);
struct list_head tmp_list;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&info->v2p_lock, flags); spin_lock_irqsave(&info->v2p_lock, flags);
list_for_each_entry_safe(entry, tmp, head, l) list_cut_before(&tmp_list, head, head);
__scsiback_del_translation_entry(entry);
spin_unlock_irqrestore(&info->v2p_lock, flags); spin_unlock_irqrestore(&info->v2p_lock, flags);
list_for_each_entry_safe(entry, tmp, &tmp_list, l) {
list_del(&entry->l);
kref_put(&entry->kref, scsiback_free_translation_entry);
}
} }
static void scsiback_remove(struct xenbus_device *dev) static void scsiback_remove(struct xenbus_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment