Commit 492dfb48 authored by James Bottomley's avatar James Bottomley Committed by James Bottomley

[SCSI] block: add support for shared tag maps

The current block queue implementation already contains most of the
machinery for shared tag maps.  The only remaining pieces are a way to
allocate and destroy a tag map independently of the queues (so that
the maps can be managed on the life cycle of the overseeing entity)
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent f19eaa7f
...@@ -848,21 +848,18 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) ...@@ -848,21 +848,18 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag); EXPORT_SYMBOL(blk_queue_find_tag);
/** /**
* __blk_queue_free_tags - release tag maintenance info * __blk_free_tags - release a given set of tag maintenance info
* @q: the request queue for the device * @bqt: the tag map to free
* *
* Notes: * Tries to free the specified @bqt@. Returns true if it was
* blk_cleanup_queue() will take care of calling this function, if tagging * actually freed and false if there are still references using it
* has been used. So there's no need to call this directly. */
**/ static int __blk_free_tags(struct blk_queue_tag *bqt)
static void __blk_queue_free_tags(request_queue_t *q)
{ {
struct blk_queue_tag *bqt = q->queue_tags; int retval;
if (!bqt)
return;
if (atomic_dec_and_test(&bqt->refcnt)) { retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
BUG_ON(bqt->busy); BUG_ON(bqt->busy);
BUG_ON(!list_empty(&bqt->busy_list)); BUG_ON(!list_empty(&bqt->busy_list));
...@@ -873,12 +870,49 @@ static void __blk_queue_free_tags(request_queue_t *q) ...@@ -873,12 +870,49 @@ static void __blk_queue_free_tags(request_queue_t *q)
bqt->tag_map = NULL; bqt->tag_map = NULL;
kfree(bqt); kfree(bqt);
} }
return retval;
}
/**
* __blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
* Notes:
* blk_cleanup_queue() will take care of calling this function, if tagging
* has been used. So there's no need to call this directly.
**/
static void __blk_queue_free_tags(request_queue_t *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
if (!bqt)
return;
__blk_free_tags(bqt);
q->queue_tags = NULL; q->queue_tags = NULL;
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
} }
/**
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* For externally managed @bqt@ frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
void blk_free_tags(struct blk_queue_tag *bqt)
{
if (unlikely(!__blk_free_tags(bqt)))
BUG();
}
EXPORT_SYMBOL(blk_free_tags);
/** /**
* blk_queue_free_tags - release tag maintenance info * blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device * @q: the request queue for the device
...@@ -901,7 +935,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) ...@@ -901,7 +935,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
unsigned long *tag_map; unsigned long *tag_map;
int nr_ulongs; int nr_ulongs;
if (depth > q->nr_requests * 2) { if (q && depth > q->nr_requests * 2) {
depth = q->nr_requests * 2; depth = q->nr_requests * 2;
printk(KERN_ERR "%s: adjusted depth to %d\n", printk(KERN_ERR "%s: adjusted depth to %d\n",
__FUNCTION__, depth); __FUNCTION__, depth);
...@@ -927,6 +961,38 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) ...@@ -927,6 +961,38 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
return -ENOMEM; return -ENOMEM;
} }
static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
int depth)
{
struct blk_queue_tag *tags;
tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
if (!tags)
goto fail;
if (init_tag_map(q, tags, depth))
goto fail;
INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
atomic_set(&tags->refcnt, 1);
return tags;
fail:
kfree(tags);
return NULL;
}
/**
* blk_init_tags - initialize the tag info for an external tag map
* @depth: the maximum queue depth supported
* @tags: the tag to use
**/
struct blk_queue_tag *blk_init_tags(int depth)
{
return __blk_queue_init_tags(NULL, depth);
}
EXPORT_SYMBOL(blk_init_tags);
/** /**
* blk_queue_init_tags - initialize the queue tag info * blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device * @q: the request queue for the device
...@@ -941,16 +1007,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth, ...@@ -941,16 +1007,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth,
BUG_ON(tags && q->queue_tags && tags != q->queue_tags); BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
if (!tags && !q->queue_tags) { if (!tags && !q->queue_tags) {
tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); tags = __blk_queue_init_tags(q, depth);
if (!tags)
goto fail;
if (init_tag_map(q, tags, depth)) if (!tags)
goto fail; goto fail;
INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
atomic_set(&tags->refcnt, 1);
} else if (q->queue_tags) { } else if (q->queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth))) if ((rc = blk_queue_resize_tags(q, depth)))
return rc; return rc;
...@@ -1001,6 +1061,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) ...@@ -1001,6 +1061,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
return 0; return 0;
} }
/*
* Currently cannot replace a shared tag map with a new
* one, so error out if this is the case
*/
if (atomic_read(&bqt->refcnt) != 1)
return -EBUSY;
/* /*
* save the old state info, so we can copy it back * save the old state info, so we can copy it back
*/ */
......
...@@ -746,6 +746,8 @@ extern void blk_queue_free_tags(request_queue_t *); ...@@ -746,6 +746,8 @@ extern void blk_queue_free_tags(request_queue_t *);
extern int blk_queue_resize_tags(request_queue_t *, int); extern int blk_queue_resize_tags(request_queue_t *, int);
extern void blk_queue_invalidate_tags(request_queue_t *); extern void blk_queue_invalidate_tags(request_queue_t *);
extern long blk_congestion_wait(int rw, long timeout); extern long blk_congestion_wait(int rw, long timeout);
extern struct blk_queue_tag *blk_init_tags(int);
extern void blk_free_tags(struct blk_queue_tag *);
extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
extern int blkdev_issue_flush(struct block_device *, sector_t *); extern int blkdev_issue_flush(struct block_device *, sector_t *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment