Commit 6728cb0e authored by Jens Axboe's avatar Jens Axboe

block: make core bits checkpatch compliant

Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 22b13210
...@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, ...@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{ {
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
prepare_flush_fn == NULL) { prepare_flush_fn == NULL) {
printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); printk(KERN_ERR "%s: prepare_flush_fn required\n",
__FUNCTION__);
return -EINVAL; return -EINVAL;
} }
...@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, ...@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_ordered); EXPORT_SYMBOL(blk_queue_ordered);
/* /*
...@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) ...@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
bio_put(bio); bio_put(bio);
return ret; return ret;
} }
EXPORT_SYMBOL(blkdev_issue_flush); EXPORT_SYMBOL(blkdev_issue_flush);
This diff is collapsed.
...@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, ...@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
return err; return err;
} }
EXPORT_SYMBOL(blk_execute_rq); EXPORT_SYMBOL(blk_execute_rq);
...@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, ...@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers * direct dma. else, set up kernel bounce buffers
*/ */
uaddr = (unsigned long) ubuf; uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) if (!(uaddr & queue_dma_alignment(q)) &&
!(len & queue_dma_alignment(q)))
bio = bio_map_user(q, NULL, uaddr, len, reading); bio = bio_map_user(q, NULL, uaddr, len, reading);
else else
bio = bio_copy_user(q, uaddr, len, reading); bio = bio_copy_user(q, uaddr, len, reading);
...@@ -144,7 +145,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ...@@ -144,7 +145,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
return ret; return ret;
} }
EXPORT_SYMBOL(blk_rq_map_user); EXPORT_SYMBOL(blk_rq_map_user);
/** /**
...@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
/* we don't allow misaligned data like bio_map_user() does. If the /* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints * user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */ * and respect them accordingly */
bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); bio = bio_map_user_iov(q, NULL, iov, iov_count,
rq_data_dir(rq) == READ);
if (IS_ERR(bio)) if (IS_ERR(bio))
return PTR_ERR(bio); return PTR_ERR(bio);
...@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_rq_map_user_iov); EXPORT_SYMBOL(blk_rq_map_user_iov);
/** /**
...@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio) ...@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)
return ret; return ret;
} }
EXPORT_SYMBOL(blk_rq_unmap_user); EXPORT_SYMBOL(blk_rq_unmap_user);
/** /**
...@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, ...@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_rq_map_kern); EXPORT_SYMBOL(blk_rq_map_kern);
...@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) ...@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
* size, something has gone terribly wrong * size, something has gone terribly wrong
*/ */
if (rq->nr_sectors < rq->current_nr_sectors) { if (rq->nr_sectors < rq->current_nr_sectors) {
printk("blk: request botched\n"); printk(KERN_ERR "blk: request botched\n");
rq->nr_sectors = rq->current_nr_sectors; rq->nr_sectors = rq->current_nr_sectors;
} }
} }
...@@ -235,7 +235,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -235,7 +235,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
return nsegs; return nsegs;
} }
EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(blk_rq_map_sg);
static inline int ll_new_mergeable(struct request_queue *q, static inline int ll_new_mergeable(struct request_queue *q,
...@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, ...@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio); blk_recount_segments(q, bio);
len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
!BIOVEC_VIRT_OVERSIZE(len)) { && !BIOVEC_VIRT_OVERSIZE(len)) {
int mergeable = ll_new_mergeable(q, req, bio); int mergeable = ll_new_mergeable(q, req, bio);
if (mergeable) { if (mergeable) {
...@@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, ...@@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
return ll_new_hw_segment(q, req, bio); return ll_new_hw_segment(q, req, bio);
} }
int ll_front_merge_fn(struct request_queue *q, struct request *req, int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio) struct bio *bio)
{ {
unsigned short max_sectors; unsigned short max_sectors;
...@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
if (blk_hw_contig_segment(q, req->biotail, next->bio)) { if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; int len = req->biotail->bi_hw_back_size +
next->bio->bi_hw_front_size;
/* /*
* propagate the combined length to the end of the requests * propagate the combined length to the end of the requests
*/ */
......
...@@ -10,8 +10,10 @@ ...@@ -10,8 +10,10 @@
#include "blk.h" #include "blk.h"
unsigned long blk_max_low_pfn, blk_max_pfn; unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn); EXPORT_SYMBOL(blk_max_low_pfn);
unsigned long blk_max_pfn;
EXPORT_SYMBOL(blk_max_pfn); EXPORT_SYMBOL(blk_max_pfn);
/** /**
...@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) ...@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{ {
q->prep_rq_fn = pfn; q->prep_rq_fn = pfn;
} }
EXPORT_SYMBOL(blk_queue_prep_rq); EXPORT_SYMBOL(blk_queue_prep_rq);
/** /**
...@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) ...@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{ {
q->merge_bvec_fn = mbfn; q->merge_bvec_fn = mbfn;
} }
EXPORT_SYMBOL(blk_queue_merge_bvec); EXPORT_SYMBOL(blk_queue_merge_bvec);
void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{ {
q->softirq_done_fn = fn; q->softirq_done_fn = fn;
} }
EXPORT_SYMBOL(blk_queue_softirq_done); EXPORT_SYMBOL(blk_queue_softirq_done);
/** /**
...@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done); ...@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory. * blk_queue_bounce() to create a buffer in normal memory.
**/ **/
void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
{ {
/* /*
* set defaults * set defaults
...@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) ...@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
q->make_request_fn = mfn; q->make_request_fn = mfn;
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, SAFE_MAX_SECTORS); blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
...@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) ...@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
*/ */
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
} }
EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_make_request);
/** /**
...@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request); ...@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
**/ **/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{ {
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0; int dma = 0;
q->bounce_gfp = GFP_NOIO; q->bounce_gfp = GFP_NOIO;
...@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) ...@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU. /* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */ know of a way to test this here. */
if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1; dma = 1;
q->bounce_pfn = max_low_pfn; q->bounce_pfn = max_low_pfn;
#else #else
if (bounce_pfn < blk_max_low_pfn) if (b_pfn < blk_max_low_pfn)
dma = 1; dma = 1;
q->bounce_pfn = bounce_pfn; q->bounce_pfn = b_pfn;
#endif #endif
if (dma) { if (dma) {
init_emergency_isa_pool(); init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA; q->bounce_gfp = GFP_NOIO | GFP_DMA;
q->bounce_pfn = bounce_pfn; q->bounce_pfn = b_pfn;
} }
} }
EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(blk_queue_bounce_limit);
/** /**
...@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) ...@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{ {
if ((max_sectors << 9) < PAGE_CACHE_SIZE) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_sectors);
} }
if (BLK_DEF_MAX_SECTORS > max_sectors) if (BLK_DEF_MAX_SECTORS > max_sectors)
...@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) ...@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
q->max_hw_sectors = max_sectors; q->max_hw_sectors = max_sectors;
} }
} }
EXPORT_SYMBOL(blk_queue_max_sectors); EXPORT_SYMBOL(blk_queue_max_sectors);
/** /**
...@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q, ...@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_segments);
} }
q->max_phys_segments = max_segments; q->max_phys_segments = max_segments;
} }
EXPORT_SYMBOL(blk_queue_max_phys_segments); EXPORT_SYMBOL(blk_queue_max_phys_segments);
/** /**
...@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q, ...@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_segments);
} }
q->max_hw_segments = max_segments; q->max_hw_segments = max_segments;
} }
EXPORT_SYMBOL(blk_queue_max_hw_segments); EXPORT_SYMBOL(blk_queue_max_hw_segments);
/** /**
...@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) ...@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{ {
if (max_size < PAGE_CACHE_SIZE) { if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE; max_size = PAGE_CACHE_SIZE;
printk("%s: set to minimum %d\n", __FUNCTION__, max_size); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_size);
} }
q->max_segment_size = max_size; q->max_segment_size = max_size;
} }
EXPORT_SYMBOL(blk_queue_max_segment_size); EXPORT_SYMBOL(blk_queue_max_segment_size);
/** /**
...@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) ...@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{ {
q->hardsect_size = size; q->hardsect_size = size;
} }
EXPORT_SYMBOL(blk_queue_hardsect_size); EXPORT_SYMBOL(blk_queue_hardsect_size);
/* /*
...@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); ...@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{ {
/* zero is "infinity" */ /* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
t->max_segment_size = min(t->max_segment_size,b->max_segment_size); t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size,b->hardsect_size); t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
} }
EXPORT_SYMBOL(blk_queue_stack_limits); EXPORT_SYMBOL(blk_queue_stack_limits);
/** /**
...@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf, ...@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(blk_queue_dma_drain); EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
/** /**
...@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) ...@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{ {
if (mask < PAGE_CACHE_SIZE - 1) { if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1; mask = PAGE_CACHE_SIZE - 1;
printk("%s: set to minimum %lx\n", __FUNCTION__, mask); printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
mask);
} }
q->seg_boundary_mask = mask; q->seg_boundary_mask = mask;
} }
EXPORT_SYMBOL(blk_queue_segment_boundary); EXPORT_SYMBOL(blk_queue_segment_boundary);
/** /**
...@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) ...@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
{ {
q->dma_alignment = mask; q->dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_dma_alignment); EXPORT_SYMBOL(blk_queue_dma_alignment);
/** /**
...@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) ...@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
if (mask > q->dma_alignment) if (mask > q->dma_alignment)
q->dma_alignment = mask; q->dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_update_dma_alignment); EXPORT_SYMBOL(blk_queue_update_dma_alignment);
int __init blk_settings_init(void) int __init blk_settings_init(void)
......
...@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, ...@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length) const char *page, size_t length)
{ {
struct queue_sysfs_entry *entry = to_queue(attr); struct queue_sysfs_entry *entry = to_queue(attr);
struct request_queue *q = container_of(kobj, struct request_queue, kobj); struct request_queue *q;
ssize_t res; ssize_t res;
if (!entry->store) if (!entry->store)
return -EIO; return -EIO;
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
......
...@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag) ...@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{ {
return blk_map_queue_find_tag(q->queue_tags, tag); return blk_map_queue_find_tag(q->queue_tags, tag);
} }
EXPORT_SYMBOL(blk_queue_find_tag); EXPORT_SYMBOL(blk_queue_find_tag);
/** /**
...@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q) ...@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q)
{ {
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
} }
EXPORT_SYMBOL(blk_queue_free_tags); EXPORT_SYMBOL(blk_queue_free_tags);
static int static int
...@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth, ...@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
if (!tags) if (!tags)
goto fail; goto fail;
} else if (q->queue_tags) { } else if (q->queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth))) rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc; return rc;
set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
return 0; return 0;
...@@ -203,7 +202,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth, ...@@ -203,7 +202,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
kfree(tags); kfree(tags);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(blk_queue_init_tags); EXPORT_SYMBOL(blk_queue_init_tags);
/** /**
...@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth) ...@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth)
kfree(tag_map); kfree(tag_map);
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_resize_tags); EXPORT_SYMBOL(blk_queue_resize_tags);
/** /**
...@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) ...@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
clear_bit_unlock(tag, bqt->tag_map); clear_bit_unlock(tag, bqt->tag_map);
bqt->busy--; bqt->busy--;
} }
EXPORT_SYMBOL(blk_queue_end_tag); EXPORT_SYMBOL(blk_queue_end_tag);
/** /**
...@@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
int tag; int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) { if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d", "%s: request %p for device [%s] already tagged %d",
__FUNCTION__, rq, __FUNCTION__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
...@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
bqt->busy++; bqt->busy++;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_start_tag); EXPORT_SYMBOL(blk_queue_start_tag);
/** /**
...@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q) ...@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q)
list_for_each_safe(tmp, n, &q->tag_busy_list) list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp)); blk_requeue_request(q, list_entry_rq(tmp));
} }
EXPORT_SYMBOL(blk_queue_invalidate_tags); EXPORT_SYMBOL(blk_queue_invalidate_tags);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment