Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
843e8ddb
Commit
843e8ddb
authored
Jun 01, 2015
by
Jens Axboe
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-4.2/core' into for-4.2/drivers
parents
75619bfa
f26cdc85
Changes
24
Hide whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
157 additions
and
302 deletions
+157
-302
block/bio-integrity.c
block/bio-integrity.c
+2
-2
block/bio.c
block/bio.c
+14
-21
block/blk-core.c
block/blk-core.c
+11
-83
block/blk-merge.c
block/blk-merge.c
+2
-1
block/blk-mq-tag.c
block/blk-mq-tag.c
+38
-0
block/blk-mq-tag.h
block/blk-mq-tag.h
+1
-0
block/blk-mq.c
block/blk-mq.c
+10
-2
drivers/md/bcache/io.c
drivers/md/bcache/io.c
+1
-1
drivers/md/dm-cache-target.c
drivers/md/dm-cache-target.c
+0
-6
drivers/md/dm-raid1.c
drivers/md/dm-raid1.c
+0
-2
drivers/md/dm-snap.c
drivers/md/dm-snap.c
+0
-1
drivers/md/dm-table.c
drivers/md/dm-table.c
+16
-9
drivers/md/dm-thin.c
drivers/md/dm-thin.c
+3
-6
drivers/md/dm-verity.c
drivers/md/dm-verity.c
+1
-1
drivers/md/dm.c
drivers/md/dm.c
+40
-131
drivers/md/dm.h
drivers/md/dm.h
+3
-2
fs/btrfs/disk-io.c
fs/btrfs/disk-io.c
+1
-1
fs/btrfs/volumes.c
fs/btrfs/volumes.c
+5
-11
fs/btrfs/volumes.h
fs/btrfs/volumes.h
+0
-2
fs/buffer.c
fs/buffer.c
+1
-2
include/linux/bio.h
include/linux/bio.h
+0
-12
include/linux/blk-mq.h
include/linux/blk-mq.h
+4
-0
include/linux/blk_types.h
include/linux/blk_types.h
+2
-0
include/linux/blkdev.h
include/linux/blkdev.h
+2
-6
No files found.
block/bio-integrity.c
View file @
843e8ddb
...
@@ -361,7 +361,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
...
@@ -361,7 +361,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
/* Restore original bio completion handler */
/* Restore original bio completion handler */
bio
->
bi_end_io
=
bip
->
bip_end_io
;
bio
->
bi_end_io
=
bip
->
bip_end_io
;
bio_endio
_nodec
(
bio
,
error
);
bio_endio
(
bio
,
error
);
}
}
/**
/**
...
@@ -388,7 +388,7 @@ void bio_integrity_endio(struct bio *bio, int error)
...
@@ -388,7 +388,7 @@ void bio_integrity_endio(struct bio *bio, int error)
*/
*/
if
(
error
)
{
if
(
error
)
{
bio
->
bi_end_io
=
bip
->
bip_end_io
;
bio
->
bi_end_io
=
bip
->
bip_end_io
;
bio_endio
_nodec
(
bio
,
error
);
bio_endio
(
bio
,
error
);
return
;
return
;
}
}
...
...
block/bio.c
View file @
843e8ddb
...
@@ -303,6 +303,17 @@ static void bio_chain_endio(struct bio *bio, int error)
...
@@ -303,6 +303,17 @@ static void bio_chain_endio(struct bio *bio, int error)
bio_put
(
bio
);
bio_put
(
bio
);
}
}
/*
* Increment chain count for the bio. Make sure the CHAIN flag update
* is visible before the raised count.
*/
static
inline
void
bio_inc_remaining
(
struct
bio
*
bio
)
{
bio
->
bi_flags
|=
(
1
<<
BIO_CHAIN
);
smp_mb__before_atomic
();
atomic_inc
(
&
bio
->
__bi_remaining
);
}
/**
/**
* bio_chain - chain bio completions
* bio_chain - chain bio completions
* @bio: the target bio
* @bio: the target bio
...
@@ -1756,8 +1767,10 @@ static inline bool bio_remaining_done(struct bio *bio)
...
@@ -1756,8 +1767,10 @@ static inline bool bio_remaining_done(struct bio *bio)
BUG_ON
(
atomic_read
(
&
bio
->
__bi_remaining
)
<=
0
);
BUG_ON
(
atomic_read
(
&
bio
->
__bi_remaining
)
<=
0
);
if
(
atomic_dec_and_test
(
&
bio
->
__bi_remaining
))
if
(
atomic_dec_and_test
(
&
bio
->
__bi_remaining
))
{
clear_bit
(
BIO_CHAIN
,
&
bio
->
bi_flags
);
return
true
;
return
true
;
}
return
false
;
return
false
;
}
}
...
@@ -1808,26 +1821,6 @@ void bio_endio(struct bio *bio, int error)
...
@@ -1808,26 +1821,6 @@ void bio_endio(struct bio *bio, int error)
}
}
EXPORT_SYMBOL
(
bio_endio
);
EXPORT_SYMBOL
(
bio_endio
);
/**
* bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
* @bio: bio
* @error: error, if any
*
* For code that has saved and restored bi_end_io; thing hard before using this
* function, probably you should've cloned the entire bio.
**/
void
bio_endio_nodec
(
struct
bio
*
bio
,
int
error
)
{
/*
* If it's not flagged as a chain, we are not going to dec the count
*/
if
(
bio_flagged
(
bio
,
BIO_CHAIN
))
bio_inc_remaining
(
bio
);
bio_endio
(
bio
,
error
);
}
EXPORT_SYMBOL
(
bio_endio_nodec
);
/**
/**
* bio_split - split a bio
* bio_split - split a bio
* @bio: bio to split
* @bio: bio to split
...
...
block/blk-core.c
View file @
843e8ddb
...
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(blk_rq_init);
...
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(blk_rq_init);
static
void
req_bio_endio
(
struct
request
*
rq
,
struct
bio
*
bio
,
static
void
req_bio_endio
(
struct
request
*
rq
,
struct
bio
*
bio
,
unsigned
int
nbytes
,
int
error
)
unsigned
int
nbytes
,
int
error
)
{
{
if
(
error
)
if
(
error
&&
!
(
rq
->
cmd_flags
&
REQ_CLONE
)
)
clear_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
);
clear_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
);
else
if
(
!
test_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
))
else
if
(
!
test_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
))
error
=
-
EIO
;
error
=
-
EIO
;
...
@@ -128,7 +128,8 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
...
@@ -128,7 +128,8 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio_advance
(
bio
,
nbytes
);
bio_advance
(
bio
,
nbytes
);
/* don't actually finish bio if it's part of flush sequence */
/* don't actually finish bio if it's part of flush sequence */
if
(
bio
->
bi_iter
.
bi_size
==
0
&&
!
(
rq
->
cmd_flags
&
REQ_FLUSH_SEQ
))
if
(
bio
->
bi_iter
.
bi_size
==
0
&&
!
(
rq
->
cmd_flags
&
(
REQ_FLUSH_SEQ
|
REQ_CLONE
)))
bio_endio
(
bio
,
error
);
bio_endio
(
bio
,
error
);
}
}
...
@@ -2909,95 +2910,22 @@ int blk_lld_busy(struct request_queue *q)
...
@@ -2909,95 +2910,22 @@ int blk_lld_busy(struct request_queue *q)
}
}
EXPORT_SYMBOL_GPL
(
blk_lld_busy
);
EXPORT_SYMBOL_GPL
(
blk_lld_busy
);
/**
void
blk_rq_prep_clone
(
struct
request
*
dst
,
struct
request
*
src
)
* blk_rq_unprep_clone - Helper function to free all bios in a cloned request
* @rq: the clone request to be cleaned up
*
* Description:
* Free all bios in @rq for a cloned request.
*/
void
blk_rq_unprep_clone
(
struct
request
*
rq
)
{
struct
bio
*
bio
;
while
((
bio
=
rq
->
bio
)
!=
NULL
)
{
rq
->
bio
=
bio
->
bi_next
;
bio_put
(
bio
);
}
}
EXPORT_SYMBOL_GPL
(
blk_rq_unprep_clone
);
/*
* Copy attributes of the original request to the clone request.
* The actual data parts (e.g. ->cmd, ->sense) are not copied.
*/
static
void
__blk_rq_prep_clone
(
struct
request
*
dst
,
struct
request
*
src
)
{
{
dst
->
cpu
=
src
->
cpu
;
dst
->
cpu
=
src
->
cpu
;
dst
->
cmd_flags
|=
(
src
->
cmd_flags
&
REQ_CLONE_MASK
)
|
REQ_NOMERGE
;
dst
->
cmd_flags
|=
(
src
->
cmd_flags
&
REQ_CLONE_MASK
);
dst
->
cmd_flags
|=
REQ_NOMERGE
|
REQ_CLONE
;
dst
->
cmd_type
=
src
->
cmd_type
;
dst
->
cmd_type
=
src
->
cmd_type
;
dst
->
__sector
=
blk_rq_pos
(
src
);
dst
->
__sector
=
blk_rq_pos
(
src
);
dst
->
__data_len
=
blk_rq_bytes
(
src
);
dst
->
__data_len
=
blk_rq_bytes
(
src
);
dst
->
nr_phys_segments
=
src
->
nr_phys_segments
;
dst
->
nr_phys_segments
=
src
->
nr_phys_segments
;
dst
->
ioprio
=
src
->
ioprio
;
dst
->
ioprio
=
src
->
ioprio
;
dst
->
extra_len
=
src
->
extra_len
;
dst
->
extra_len
=
src
->
extra_len
;
}
dst
->
bio
=
src
->
bio
;
dst
->
biotail
=
src
->
biotail
;
/**
dst
->
cmd
=
src
->
cmd
;
* blk_rq_prep_clone - Helper function to setup clone request
dst
->
cmd_len
=
src
->
cmd_len
;
* @rq: the request to be setup
dst
->
sense
=
src
->
sense
;
* @rq_src: original request to be cloned
* @bs: bio_set that bios for clone are allocated from
* @gfp_mask: memory allocation mask for bio
* @bio_ctr: setup function to be called for each clone bio.
* Returns %0 for success, non %0 for failure.
* @data: private data to be passed to @bio_ctr
*
* Description:
* Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
* The actual data parts of @rq_src (e.g. ->cmd, ->sense)
* are not copied, and copying such parts is the caller's responsibility.
* Also, pages which the original bios are pointing to are not copied
* and the cloned bios just point same pages.
* So cloned bios must be completed before original bios, which means
* the caller must complete @rq before @rq_src.
*/
int
blk_rq_prep_clone
(
struct
request
*
rq
,
struct
request
*
rq_src
,
struct
bio_set
*
bs
,
gfp_t
gfp_mask
,
int
(
*
bio_ctr
)(
struct
bio
*
,
struct
bio
*
,
void
*
),
void
*
data
)
{
struct
bio
*
bio
,
*
bio_src
;
if
(
!
bs
)
bs
=
fs_bio_set
;
__rq_for_each_bio
(
bio_src
,
rq_src
)
{
bio
=
bio_clone_fast
(
bio_src
,
gfp_mask
,
bs
);
if
(
!
bio
)
goto
free_and_out
;
if
(
bio_ctr
&&
bio_ctr
(
bio
,
bio_src
,
data
))
goto
free_and_out
;
if
(
rq
->
bio
)
{
rq
->
biotail
->
bi_next
=
bio
;
rq
->
biotail
=
bio
;
}
else
rq
->
bio
=
rq
->
biotail
=
bio
;
}
__blk_rq_prep_clone
(
rq
,
rq_src
);
return
0
;
free_and_out:
if
(
bio
)
bio_put
(
bio
);
blk_rq_unprep_clone
(
rq
);
return
-
ENOMEM
;
}
}
EXPORT_SYMBOL_GPL
(
blk_rq_prep_clone
);
EXPORT_SYMBOL_GPL
(
blk_rq_prep_clone
);
...
...
block/blk-merge.c
View file @
843e8ddb
...
@@ -589,7 +589,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
...
@@ -589,7 +589,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
!
blk_write_same_mergeable
(
rq
->
bio
,
bio
))
!
blk_write_same_mergeable
(
rq
->
bio
,
bio
))
return
false
;
return
false
;
if
(
q
->
queue_flags
&
(
1
<<
QUEUE_FLAG_SG_GAPS
))
{
/* Only check gaps if the bio carries data */
if
(
q
->
queue_flags
&
(
1
<<
QUEUE_FLAG_SG_GAPS
)
&&
bio_has_data
(
bio
))
{
struct
bio_vec
*
bprev
;
struct
bio_vec
*
bprev
;
bprev
=
&
rq
->
biotail
->
bi_io_vec
[
rq
->
biotail
->
bi_vcnt
-
1
];
bprev
=
&
rq
->
biotail
->
bi_io_vec
[
rq
->
biotail
->
bi_vcnt
-
1
];
...
...
block/blk-mq-tag.c
View file @
843e8ddb
...
@@ -438,6 +438,39 @@ static void bt_for_each(struct blk_mq_hw_ctx *hctx,
...
@@ -438,6 +438,39 @@ static void bt_for_each(struct blk_mq_hw_ctx *hctx,
}
}
}
}
static
void
bt_tags_for_each
(
struct
blk_mq_tags
*
tags
,
struct
blk_mq_bitmap_tags
*
bt
,
unsigned
int
off
,
busy_tag_iter_fn
*
fn
,
void
*
data
,
bool
reserved
)
{
struct
request
*
rq
;
int
bit
,
i
;
if
(
!
tags
->
rqs
)
return
;
for
(
i
=
0
;
i
<
bt
->
map_nr
;
i
++
)
{
struct
blk_align_bitmap
*
bm
=
&
bt
->
map
[
i
];
for
(
bit
=
find_first_bit
(
&
bm
->
word
,
bm
->
depth
);
bit
<
bm
->
depth
;
bit
=
find_next_bit
(
&
bm
->
word
,
bm
->
depth
,
bit
+
1
))
{
rq
=
blk_mq_tag_to_rq
(
tags
,
off
+
bit
);
fn
(
rq
,
data
,
reserved
);
}
off
+=
(
1
<<
bt
->
bits_per_word
);
}
}
void
blk_mq_all_tag_busy_iter
(
struct
blk_mq_tags
*
tags
,
busy_tag_iter_fn
*
fn
,
void
*
priv
)
{
if
(
tags
->
nr_reserved_tags
)
bt_tags_for_each
(
tags
,
&
tags
->
breserved_tags
,
0
,
fn
,
priv
,
true
);
bt_tags_for_each
(
tags
,
&
tags
->
bitmap_tags
,
tags
->
nr_reserved_tags
,
fn
,
priv
,
false
);
}
EXPORT_SYMBOL
(
blk_mq_all_tag_busy_iter
);
void
blk_mq_tag_busy_iter
(
struct
blk_mq_hw_ctx
*
hctx
,
busy_iter_fn
*
fn
,
void
blk_mq_tag_busy_iter
(
struct
blk_mq_hw_ctx
*
hctx
,
busy_iter_fn
*
fn
,
void
*
priv
)
void
*
priv
)
{
{
...
@@ -580,6 +613,11 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
...
@@ -580,6 +613,11 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
if
(
!
tags
)
if
(
!
tags
)
return
NULL
;
return
NULL
;
if
(
!
zalloc_cpumask_var
(
&
tags
->
cpumask
,
GFP_KERNEL
))
{
kfree
(
tags
);
return
NULL
;
}
tags
->
nr_tags
=
total_tags
;
tags
->
nr_tags
=
total_tags
;
tags
->
nr_reserved_tags
=
reserved_tags
;
tags
->
nr_reserved_tags
=
reserved_tags
;
...
...
block/blk-mq-tag.h
View file @
843e8ddb
...
@@ -44,6 +44,7 @@ struct blk_mq_tags {
...
@@ -44,6 +44,7 @@ struct blk_mq_tags {
struct
list_head
page_list
;
struct
list_head
page_list
;
int
alloc_policy
;
int
alloc_policy
;
cpumask_var_t
cpumask
;
};
};
...
...
block/blk-mq.c
View file @
843e8ddb
...
@@ -1525,7 +1525,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
...
@@ -1525,7 +1525,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
i
++
;
i
++
;
}
}
}
}
return
tags
;
return
tags
;
fail:
fail:
...
@@ -1821,6 +1820,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
...
@@ -1821,6 +1820,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx
=
q
->
mq_ops
->
map_queue
(
q
,
i
);
hctx
=
q
->
mq_ops
->
map_queue
(
q
,
i
);
cpumask_set_cpu
(
i
,
hctx
->
cpumask
);
cpumask_set_cpu
(
i
,
hctx
->
cpumask
);
cpumask_set_cpu
(
i
,
hctx
->
tags
->
cpumask
);
ctx
->
index_hw
=
hctx
->
nr_ctx
;
ctx
->
index_hw
=
hctx
->
nr_ctx
;
hctx
->
ctxs
[
hctx
->
nr_ctx
++
]
=
ctx
;
hctx
->
ctxs
[
hctx
->
nr_ctx
++
]
=
ctx
;
}
}
...
@@ -2187,6 +2187,12 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
...
@@ -2187,6 +2187,12 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return
0
;
return
0
;
}
}
struct
cpumask
*
blk_mq_tags_cpumask
(
struct
blk_mq_tags
*
tags
)
{
return
tags
->
cpumask
;
}
EXPORT_SYMBOL_GPL
(
blk_mq_tags_cpumask
);
/*
/*
* Alloc a tag set to be associated with one or more request queues.
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
* May fail with EINVAL for various error conditions. May adjust the
...
@@ -2248,8 +2254,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
...
@@ -2248,8 +2254,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
int
i
;
int
i
;
for
(
i
=
0
;
i
<
set
->
nr_hw_queues
;
i
++
)
{
for
(
i
=
0
;
i
<
set
->
nr_hw_queues
;
i
++
)
{
if
(
set
->
tags
[
i
])
if
(
set
->
tags
[
i
])
{
blk_mq_free_rq_map
(
set
,
set
->
tags
[
i
],
i
);
blk_mq_free_rq_map
(
set
,
set
->
tags
[
i
],
i
);
free_cpumask_var
(
set
->
tags
[
i
]
->
cpumask
);
}
}
}
kfree
(
set
->
tags
);
kfree
(
set
->
tags
);
...
...
drivers/md/bcache/io.c
View file @
843e8ddb
...
@@ -55,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
...
@@ -55,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
s
->
bio
->
bi_end_io
=
s
->
bi_end_io
;
s
->
bio
->
bi_end_io
=
s
->
bi_end_io
;
s
->
bio
->
bi_private
=
s
->
bi_private
;
s
->
bio
->
bi_private
=
s
->
bi_private
;
bio_endio
_nodec
(
s
->
bio
,
0
);
bio_endio
(
s
->
bio
,
0
);
closure_debug_destroy
(
&
s
->
cl
);
closure_debug_destroy
(
&
s
->
cl
);
mempool_free
(
s
,
s
->
p
->
bio_split_hook
);
mempool_free
(
s
,
s
->
p
->
bio_split_hook
);
...
...
drivers/md/dm-cache-target.c
View file @
843e8ddb
...
@@ -86,12 +86,6 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
...
@@ -86,12 +86,6 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{
{
bio
->
bi_end_io
=
h
->
bi_end_io
;
bio
->
bi_end_io
=
h
->
bi_end_io
;
bio
->
bi_private
=
h
->
bi_private
;
bio
->
bi_private
=
h
->
bi_private
;
/*
* Must bump bi_remaining to allow bio to complete with
* restored bi_end_io.
*/
bio_inc_remaining
(
bio
);
}
}
/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/
...
...
drivers/md/dm-raid1.c
View file @
843e8ddb
...
@@ -1254,8 +1254,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
...
@@ -1254,8 +1254,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore
(
bd
,
bio
);
dm_bio_restore
(
bd
,
bio
);
bio_record
->
details
.
bi_bdev
=
NULL
;
bio_record
->
details
.
bi_bdev
=
NULL
;
bio_inc_remaining
(
bio
);
queue_bio
(
ms
,
bio
,
rw
);
queue_bio
(
ms
,
bio
,
rw
);
return
DM_ENDIO_INCOMPLETE
;
return
DM_ENDIO_INCOMPLETE
;
}
}
...
...
drivers/md/dm-snap.c
View file @
843e8ddb
...
@@ -1478,7 +1478,6 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
...
@@ -1478,7 +1478,6 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
if
(
full_bio
)
{
if
(
full_bio
)
{
full_bio
->
bi_end_io
=
pe
->
full_bio_end_io
;
full_bio
->
bi_end_io
=
pe
->
full_bio_end_io
;
full_bio
->
bi_private
=
pe
->
full_bio_private
;
full_bio
->
bi_private
=
pe
->
full_bio_private
;
bio_inc_remaining
(
full_bio
);
}
}
increment_pending_exceptions_done_count
();
increment_pending_exceptions_done_count
();
...
...
drivers/md/dm-table.c
View file @
843e8ddb
...
@@ -940,21 +940,28 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
...
@@ -940,21 +940,28 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
{
{
unsigned
type
=
dm_table_get_type
(
t
);
unsigned
type
=
dm_table_get_type
(
t
);
unsigned
per_bio_data_size
=
0
;
unsigned
per_bio_data_size
=
0
;
struct
dm_target
*
tgt
;
unsigned
i
;
unsigned
i
;
if
(
unlikely
(
type
==
DM_TYPE_NONE
))
{
switch
(
type
)
{
case
DM_TYPE_BIO_BASED
:
for
(
i
=
0
;
i
<
t
->
num_targets
;
i
++
)
{
struct
dm_target
*
tgt
=
t
->
targets
+
i
;
per_bio_data_size
=
max
(
per_bio_data_size
,
tgt
->
per_bio_data_size
);
}
t
->
mempools
=
dm_alloc_bio_mempools
(
t
->
integrity_supported
,
per_bio_data_size
);
break
;
case
DM_TYPE_REQUEST_BASED
:
case
DM_TYPE_MQ_REQUEST_BASED
:
t
->
mempools
=
dm_alloc_rq_mempools
(
md
,
type
);
break
;
default:
DMWARN
(
"no table type is set, can't allocate mempools"
);
DMWARN
(
"no table type is set, can't allocate mempools"
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
if
(
type
==
DM_TYPE_BIO_BASED
)
for
(
i
=
0
;
i
<
t
->
num_targets
;
i
++
)
{
tgt
=
t
->
targets
+
i
;
per_bio_data_size
=
max
(
per_bio_data_size
,
tgt
->
per_bio_data_size
);
}
t
->
mempools
=
dm_alloc_md_mempools
(
md
,
type
,
t
->
integrity_supported
,
per_bio_data_size
);
if
(
!
t
->
mempools
)
if
(
!
t
->
mempools
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
...
drivers/md/dm-thin.c
View file @
843e8ddb
...
@@ -793,10 +793,9 @@ static void inc_remap_and_issue_cell(struct thin_c *tc,
...
@@ -793,10 +793,9 @@ static void inc_remap_and_issue_cell(struct thin_c *tc,
static
void
process_prepared_mapping_fail
(
struct
dm_thin_new_mapping
*
m
)
static
void
process_prepared_mapping_fail
(
struct
dm_thin_new_mapping
*
m
)
{
{
if
(
m
->
bio
)
{
if
(
m
->
bio
)
m
->
bio
->
bi_end_io
=
m
->
saved_bi_end_io
;
m
->
bio
->
bi_end_io
=
m
->
saved_bi_end_io
;
bio_inc_remaining
(
m
->
bio
);
}
cell_error
(
m
->
tc
->
pool
,
m
->
cell
);
cell_error
(
m
->
tc
->
pool
,
m
->
cell
);
list_del
(
&
m
->
list
);
list_del
(
&
m
->
list
);
mempool_free
(
m
,
m
->
tc
->
pool
->
mapping_pool
);
mempool_free
(
m
,
m
->
tc
->
pool
->
mapping_pool
);
...
@@ -810,10 +809,8 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
...
@@ -810,10 +809,8 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
int
r
;
int
r
;
bio
=
m
->
bio
;
bio
=
m
->
bio
;
if
(
bio
)
{
if
(
bio
)
bio
->
bi_end_io
=
m
->
saved_bi_end_io
;
bio
->
bi_end_io
=
m
->
saved_bi_end_io
;
bio_inc_remaining
(
bio
);
}
if
(
m
->
err
)
{
if
(
m
->
err
)
{
cell_error
(
pool
,
m
->
cell
);
cell_error
(
pool
,
m
->
cell
);
...
...
drivers/md/dm-verity.c
View file @
843e8ddb
...
@@ -459,7 +459,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
...
@@ -459,7 +459,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
bio
->
bi_end_io
=
io
->
orig_bi_end_io
;
bio
->
bi_end_io
=
io
->
orig_bi_end_io
;
bio
->
bi_private
=
io
->
orig_bi_private
;
bio
->
bi_private
=
io
->
orig_bi_private
;
bio_endio
_nodec
(
bio
,
error
);
bio_endio
(
bio
,
error
);
}
}
static
void
verity_work
(
struct
work_struct
*
w
)
static
void
verity_work
(
struct
work_struct
*
w
)
...
...
drivers/md/dm.c
View file @
843e8ddb
...
@@ -990,57 +990,6 @@ static void clone_endio(struct bio *bio, int error)
...
@@ -990,57 +990,6 @@ static void clone_endio(struct bio *bio, int error)
dec_pending
(
io
,
error
);
dec_pending
(
io
,
error
);
}
}
/*
* Partial completion handling for request-based dm
*/
static
void
end_clone_bio
(
struct
bio
*
clone
,
int
error
)
{
struct
dm_rq_clone_bio_info
*
info
=
container_of
(
clone
,
struct
dm_rq_clone_bio_info
,
clone
);
struct
dm_rq_target_io
*
tio
=
info
->
tio
;
struct
bio
*
bio
=
info
->
orig
;
unsigned
int
nr_bytes
=
info
->
orig
->
bi_iter
.
bi_size
;
bio_put
(
clone
);
if
(
tio
->
error
)
/*
* An error has already been detected on the request.
* Once error occurred, just let clone->end_io() handle
* the remainder.
*/
return
;
else
if
(
error
)
{
/*
* Don't notice the error to the upper layer yet.
* The error handling decision is made by the target driver,
* when the request is completed.
*/
tio
->
error
=
error
;
return
;
}
/*
* I/O for the bio successfully completed.
* Notice the data completion to the upper layer.
*/
/*
* bios are processed from the head of the list.
* So the completing bio should always be rq->bio.
* If it's not, something wrong is happening.
*/
if
(
tio
->
orig
->
bio
!=
bio
)
DMERR
(
"bio completion is going in the middle of the request"
);
/*
* Update the original request.
* Do not use blk_end_request() here, because it may complete
* the original request before the clone, and break the ordering.
*/
blk_update_request
(
tio
->
orig
,
0
,
nr_bytes
);
}
static
struct
dm_rq_target_io
*
tio_from_request
(
struct
request
*
rq
)
static
struct
dm_rq_target_io
*
tio_from_request
(
struct
request
*
rq
)
{
{
return
(
rq
->
q
->
mq_ops
?
blk_mq_rq_to_pdu
(
rq
)
:
rq
->
special
);
return
(
rq
->
q
->
mq_ops
?
blk_mq_rq_to_pdu
(
rq
)
:
rq
->
special
);
...
@@ -1089,8 +1038,6 @@ static void free_rq_clone(struct request *clone, bool must_be_mapped)
...
@@ -1089,8 +1038,6 @@ static void free_rq_clone(struct request *clone, bool must_be_mapped)
WARN_ON_ONCE
(
must_be_mapped
&&
!
clone
->
q
);
WARN_ON_ONCE
(
must_be_mapped
&&
!
clone
->
q
);
blk_rq_unprep_clone
(
clone
);
if
(
md
->
type
==
DM_TYPE_MQ_REQUEST_BASED
)
if
(
md
->
type
==
DM_TYPE_MQ_REQUEST_BASED
)
/* stacked on blk-mq queue(s) */
/* stacked on blk-mq queue(s) */
tio
->
ti
->
type
->
release_clone_rq
(
clone
);
tio
->
ti
->
type
->
release_clone_rq
(
clone
);
...
@@ -1821,39 +1768,13 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
...
@@ -1821,39 +1768,13 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
dm_complete_request
(
rq
,
r
);
dm_complete_request
(
rq
,
r
);
}
}
static
int
dm_rq_bio_constructor
(
struct
bio
*
bio
,
struct
bio
*
bio_orig
,
static
void
setup_clone
(
struct
request
*
clone
,
struct
request
*
rq
,
void
*
data
)
struct
dm_rq_target_io
*
tio
)
{
{
struct
dm_rq_target_io
*
tio
=
data
;
blk_rq_prep_clone
(
clone
,
rq
);
struct
dm_rq_clone_bio_info
*
info
=
container_of
(
bio
,
struct
dm_rq_clone_bio_info
,
clone
);
info
->
orig
=
bio_orig
;
info
->
tio
=
tio
;
bio
->
bi_end_io
=
end_clone_bio
;
return
0
;
}
static
int
setup_clone
(
struct
request
*
clone
,
struct
request
*
rq
,
struct
dm_rq_target_io
*
tio
,
gfp_t
gfp_mask
)
{
int
r
;
r
=
blk_rq_prep_clone
(
clone
,
rq
,
tio
->
md
->
bs
,
gfp_mask
,
dm_rq_bio_constructor
,
tio
);
if
(
r
)
return
r
;
clone
->
cmd
=
rq
->
cmd
;
clone
->
cmd_len
=
rq
->
cmd_len
;
clone
->
sense
=
rq
->
sense
;
clone
->
end_io
=
end_clone_request
;
clone
->
end_io
=
end_clone_request
;
clone
->
end_io_data
=
tio
;
clone
->
end_io_data
=
tio
;
tio
->
clone
=
clone
;
tio
->
clone
=
clone
;
return
0
;
}
}
static
struct
request
*
clone_rq
(
struct
request
*
rq
,
struct
mapped_device
*
md
,
static
struct
request
*
clone_rq
(
struct
request
*
rq
,
struct
mapped_device
*
md
,
...
@@ -1874,12 +1795,7 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
...
@@ -1874,12 +1795,7 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
clone
=
tio
->
clone
;
clone
=
tio
->
clone
;
blk_rq_init
(
NULL
,
clone
);
blk_rq_init
(
NULL
,
clone
);
if
(
setup_clone
(
clone
,
rq
,
tio
,
gfp_mask
))
{
setup_clone
(
clone
,
rq
,
tio
);
/* -ENOMEM */
if
(
alloc_clone
)
free_clone_request
(
md
,
clone
);
return
NULL
;
}
return
clone
;
return
clone
;
}
}
...
@@ -1973,11 +1889,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
...
@@ -1973,11 +1889,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
}
}
if
(
IS_ERR
(
clone
))
if
(
IS_ERR
(
clone
))
return
DM_MAPIO_REQUEUE
;
return
DM_MAPIO_REQUEUE
;
if
(
setup_clone
(
clone
,
rq
,
tio
,
GFP_ATOMIC
))
{
setup_clone
(
clone
,
rq
,
tio
);
/* -ENOMEM */
ti
->
type
->
release_clone_rq
(
clone
);
return
DM_MAPIO_REQUEUE
;
}
}
}
switch
(
r
)
{
switch
(
r
)
{
...
@@ -2431,8 +2343,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
...
@@ -2431,8 +2343,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto
out
;
goto
out
;
}
}
BUG_ON
(
!
p
||
md
->
io_pool
||
md
->
rq_pool
||
md
->
bs
);
md
->
io_pool
=
p
->
io_pool
;
md
->
io_pool
=
p
->
io_pool
;
p
->
io_pool
=
NULL
;
p
->
io_pool
=
NULL
;
md
->
rq_pool
=
p
->
rq_pool
;
md
->
rq_pool
=
p
->
rq_pool
;
...
@@ -3536,48 +3446,23 @@ int dm_noflush_suspending(struct dm_target *ti)
...
@@ -3536,48 +3446,23 @@ int dm_noflush_suspending(struct dm_target *ti)
}
}
EXPORT_SYMBOL_GPL
(
dm_noflush_suspending
);
EXPORT_SYMBOL_GPL
(
dm_noflush_suspending
);
struct
dm_md_mempools
*
dm_alloc_
md_mempools
(
struct
mapped_device
*
md
,
unsigned
type
,
struct
dm_md_mempools
*
dm_alloc_
bio_mempools
(
unsigned
integrity
,
unsigned
integrity
,
unsigned
per_bio_data_size
)
unsigned
per_bio_data_size
)
{
{
struct
dm_md_mempools
*
pools
=
kzalloc
(
sizeof
(
*
pools
),
GFP_KERNEL
);
struct
dm_md_mempools
*
pools
;
struct
kmem_cache
*
cachep
=
NULL
;
unsigned
int
pool_size
=
dm_get_reserved_bio_based_ios
();
unsigned
int
pool_size
=
0
;
unsigned
int
front_pad
;
unsigned
int
front_pad
;
pools
=
kzalloc
(
sizeof
(
*
pools
),
GFP_KERNEL
);
if
(
!
pools
)
if
(
!
pools
)
return
NULL
;
return
NULL
;
type
=
filter_md_type
(
type
,
md
);
front_pad
=
roundup
(
per_bio_data_size
,
__alignof__
(
struct
dm_target_io
))
+
offsetof
(
struct
dm_target_io
,
clone
);
switch
(
type
)
{
pools
->
io_pool
=
mempool_create_slab_pool
(
pool_size
,
_io_cache
);
case
DM_TYPE_BIO_BASED
:
if
(
!
pools
->
io_pool
)
cachep
=
_io_cache
;
goto
out
;
pool_size
=
dm_get_reserved_bio_based_ios
();
front_pad
=
roundup
(
per_bio_data_size
,
__alignof__
(
struct
dm_target_io
))
+
offsetof
(
struct
dm_target_io
,
clone
);
break
;
case
DM_TYPE_REQUEST_BASED
:
cachep
=
_rq_tio_cache
;
pool_size
=
dm_get_reserved_rq_based_ios
();
pools
->
rq_pool
=
mempool_create_slab_pool
(
pool_size
,
_rq_cache
);
if
(
!
pools
->
rq_pool
)
goto
out
;
/* fall through to setup remaining rq-based pools */
case
DM_TYPE_MQ_REQUEST_BASED
:
if
(
!
pool_size
)
pool_size
=
dm_get_reserved_rq_based_ios
();
front_pad
=
offsetof
(
struct
dm_rq_clone_bio_info
,
clone
);
/* per_bio_data_size is not used. See __bind_mempools(). */
WARN_ON
(
per_bio_data_size
!=
0
);
break
;
default:
BUG
();
}
if
(
cachep
)
{
pools
->
io_pool
=
mempool_create_slab_pool
(
pool_size
,
cachep
);
if
(
!
pools
->
io_pool
)
goto
out
;
}
pools
->
bs
=
bioset_create_nobvec
(
pool_size
,
front_pad
);
pools
->
bs
=
bioset_create_nobvec
(
pool_size
,
front_pad
);
if
(
!
pools
->
bs
)
if
(
!
pools
->
bs
)
...
@@ -3587,10 +3472,34 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
...
@@ -3587,10 +3472,34 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
goto
out
;
goto
out
;
return
pools
;
return
pools
;
out:
out:
dm_free_md_mempools
(
pools
);
dm_free_md_mempools
(
pools
);
return
NULL
;
}
struct
dm_md_mempools
*
dm_alloc_rq_mempools
(
struct
mapped_device
*
md
,
unsigned
type
)
{
unsigned
int
pool_size
=
dm_get_reserved_rq_based_ios
();
struct
dm_md_mempools
*
pools
;
pools
=
kzalloc
(
sizeof
(
*
pools
),
GFP_KERNEL
);
if
(
!
pools
)
return
NULL
;
if
(
filter_md_type
(
type
,
md
)
==
DM_TYPE_REQUEST_BASED
)
{
pools
->
rq_pool
=
mempool_create_slab_pool
(
pool_size
,
_rq_cache
);
if
(
!
pools
->
rq_pool
)
goto
out
;
}
pools
->
io_pool
=
mempool_create_slab_pool
(
pool_size
,
_rq_tio_cache
);
if
(
!
pools
->
io_pool
)
goto
out
;
return
pools
;
out:
dm_free_md_mempools
(
pools
);
return
NULL
;
return
NULL
;
}
}
...
...
drivers/md/dm.h
View file @
843e8ddb
...
@@ -222,8 +222,9 @@ void dm_kcopyd_exit(void);
...
@@ -222,8 +222,9 @@ void dm_kcopyd_exit(void);
/*
/*
* Mempool operations
* Mempool operations
*/
*/
struct
dm_md_mempools
*
dm_alloc_md_mempools
(
struct
mapped_device
*
md
,
unsigned
type
,
struct
dm_md_mempools
*
dm_alloc_bio_mempools
(
unsigned
integrity
,
unsigned
integrity
,
unsigned
per_bio_data_size
);
unsigned
per_bio_data_size
);
struct
dm_md_mempools
*
dm_alloc_rq_mempools
(
struct
mapped_device
*
md
,
unsigned
type
);
void
dm_free_md_mempools
(
struct
dm_md_mempools
*
pools
);
void
dm_free_md_mempools
(
struct
dm_md_mempools
*
pools
);
/*
/*
...
...
fs/btrfs/disk-io.c
View file @
843e8ddb
...
@@ -1745,7 +1745,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
...
@@ -1745,7 +1745,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio
->
bi_private
=
end_io_wq
->
private
;
bio
->
bi_private
=
end_io_wq
->
private
;
bio
->
bi_end_io
=
end_io_wq
->
end_io
;
bio
->
bi_end_io
=
end_io_wq
->
end_io
;
kmem_cache_free
(
btrfs_end_io_wq_cache
,
end_io_wq
);
kmem_cache_free
(
btrfs_end_io_wq_cache
,
end_io_wq
);
bio_endio
_nodec
(
bio
,
error
);
bio_endio
(
bio
,
error
);
}
}
static
int
cleaner_kthread
(
void
*
arg
)
static
int
cleaner_kthread
(
void
*
arg
)
...
...
fs/btrfs/volumes.c
View file @
843e8ddb
...
@@ -5585,10 +5585,10 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
...
@@ -5585,10 +5585,10 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
static
inline
void
btrfs_end_bbio
(
struct
btrfs_bio
*
bbio
,
struct
bio
*
bio
,
int
err
)
static
inline
void
btrfs_end_bbio
(
struct
btrfs_bio
*
bbio
,
struct
bio
*
bio
,
int
err
)
{
{
if
(
likely
(
bbio
->
flags
&
BTRFS_BIO_ORIG_BIO_SUBMITTED
))
bio
->
bi_private
=
bbio
->
private
;
bio_endio_nodec
(
bio
,
err
)
;
bio
->
bi_end_io
=
bbio
->
end_io
;
else
bio_endio
(
bio
,
err
);
bio_endio
(
bio
,
err
);
btrfs_put_bbio
(
bbio
);
btrfs_put_bbio
(
bbio
);
}
}
...
@@ -5632,8 +5632,6 @@ static void btrfs_end_bio(struct bio *bio, int err)
...
@@ -5632,8 +5632,6 @@ static void btrfs_end_bio(struct bio *bio, int err)
bio
=
bbio
->
orig_bio
;
bio
=
bbio
->
orig_bio
;
}
}
bio
->
bi_private
=
bbio
->
private
;
bio
->
bi_end_io
=
bbio
->
end_io
;
btrfs_io_bio
(
bio
)
->
mirror_num
=
bbio
->
mirror_num
;
btrfs_io_bio
(
bio
)
->
mirror_num
=
bbio
->
mirror_num
;
/* only send an error to the higher layers if it is
/* only send an error to the higher layers if it is
* beyond the tolerance of the btrfs bio
* beyond the tolerance of the btrfs bio
...
@@ -5815,8 +5813,6 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
...
@@ -5815,8 +5813,6 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
/* Shoud be the original bio. */
/* Shoud be the original bio. */
WARN_ON
(
bio
!=
bbio
->
orig_bio
);
WARN_ON
(
bio
!=
bbio
->
orig_bio
);
bio
->
bi_private
=
bbio
->
private
;
bio
->
bi_end_io
=
bbio
->
end_io
;
btrfs_io_bio
(
bio
)
->
mirror_num
=
bbio
->
mirror_num
;
btrfs_io_bio
(
bio
)
->
mirror_num
=
bbio
->
mirror_num
;
bio
->
bi_iter
.
bi_sector
=
logical
>>
9
;
bio
->
bi_iter
.
bi_sector
=
logical
>>
9
;
...
@@ -5897,10 +5893,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
...
@@ -5897,10 +5893,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
if
(
dev_nr
<
total_devs
-
1
)
{
if
(
dev_nr
<
total_devs
-
1
)
{
bio
=
btrfs_bio_clone
(
first_bio
,
GFP_NOFS
);
bio
=
btrfs_bio_clone
(
first_bio
,
GFP_NOFS
);
BUG_ON
(
!
bio
);
/* -ENOMEM */
BUG_ON
(
!
bio
);
/* -ENOMEM */
}
else
{
}
else
bio
=
first_bio
;
bio
=
first_bio
;
bbio
->
flags
|=
BTRFS_BIO_ORIG_BIO_SUBMITTED
;
}
submit_stripe_bio
(
root
,
bbio
,
bio
,
submit_stripe_bio
(
root
,
bbio
,
bio
,
bbio
->
stripes
[
dev_nr
].
physical
,
dev_nr
,
rw
,
bbio
->
stripes
[
dev_nr
].
physical
,
dev_nr
,
rw
,
...
...
fs/btrfs/volumes.h
View file @
843e8ddb
...
@@ -292,8 +292,6 @@ struct btrfs_bio_stripe {
...
@@ -292,8 +292,6 @@ struct btrfs_bio_stripe {
struct
btrfs_bio
;
struct
btrfs_bio
;
typedef
void
(
btrfs_bio_end_io_t
)
(
struct
btrfs_bio
*
bio
,
int
err
);
typedef
void
(
btrfs_bio_end_io_t
)
(
struct
btrfs_bio
*
bio
,
int
err
);
#define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0)
struct
btrfs_bio
{
struct
btrfs_bio
{
atomic_t
refs
;
atomic_t
refs
;
atomic_t
stripes_pending
;
atomic_t
stripes_pending
;
...
...
fs/buffer.c
View file @
843e8ddb
...
@@ -2996,7 +2996,6 @@ void guard_bio_eod(int rw, struct bio *bio)
...
@@ -2996,7 +2996,6 @@ void guard_bio_eod(int rw, struct bio *bio)
int
_submit_bh
(
int
rw
,
struct
buffer_head
*
bh
,
unsigned
long
bio_flags
)
int
_submit_bh
(
int
rw
,
struct
buffer_head
*
bh
,
unsigned
long
bio_flags
)
{
{
struct
bio
*
bio
;
struct
bio
*
bio
;
int
ret
=
0
;
BUG_ON
(
!
buffer_locked
(
bh
));
BUG_ON
(
!
buffer_locked
(
bh
));
BUG_ON
(
!
buffer_mapped
(
bh
));
BUG_ON
(
!
buffer_mapped
(
bh
));
...
@@ -3038,7 +3037,7 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
...
@@ -3038,7 +3037,7 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
rw
|=
REQ_PRIO
;
rw
|=
REQ_PRIO
;
submit_bio
(
rw
,
bio
);
submit_bio
(
rw
,
bio
);
return
ret
;
return
0
;
}
}
EXPORT_SYMBOL_GPL
(
_submit_bh
);
EXPORT_SYMBOL_GPL
(
_submit_bh
);
...
...
include/linux/bio.h
View file @
843e8ddb
...
@@ -427,7 +427,6 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
...
@@ -427,7 +427,6 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
}
extern
void
bio_endio
(
struct
bio
*
,
int
);
extern
void
bio_endio
(
struct
bio
*
,
int
);
extern
void
bio_endio_nodec
(
struct
bio
*
,
int
);
struct
request_queue
;
struct
request_queue
;
extern
int
bio_phys_segments
(
struct
request_queue
*
,
struct
bio
*
);
extern
int
bio_phys_segments
(
struct
request_queue
*
,
struct
bio
*
);
...
@@ -658,17 +657,6 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
...
@@ -658,17 +657,6 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
return
bio
;
return
bio
;
}
}
/*
* Increment chain count for the bio. Make sure the CHAIN flag update
* is visible before the raised count.
*/
static
inline
void
bio_inc_remaining
(
struct
bio
*
bio
)
{
bio
->
bi_flags
|=
(
1
<<
BIO_CHAIN
);
smp_mb__before_atomic
();
atomic_inc
(
&
bio
->
__bi_remaining
);
}
/*
/*
* bio_set is used to allow other portions of the IO system to
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
* allocate their own private memory pools for bio and iovec structures.
...
...
include/linux/blk-mq.h
View file @
843e8ddb
...
@@ -96,6 +96,7 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
...
@@ -96,6 +96,7 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
typedef
void
(
busy_iter_fn
)(
struct
blk_mq_hw_ctx
*
,
struct
request
*
,
void
*
,
typedef
void
(
busy_iter_fn
)(
struct
blk_mq_hw_ctx
*
,
struct
request
*
,
void
*
,
bool
);
bool
);
typedef
void
(
busy_tag_iter_fn
)(
struct
request
*
,
void
*
,
bool
);
struct
blk_mq_ops
{
struct
blk_mq_ops
{
/*
/*
...
@@ -182,6 +183,7 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
...
@@ -182,6 +183,7 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct
request
*
blk_mq_alloc_request
(
struct
request_queue
*
q
,
int
rw
,
struct
request
*
blk_mq_alloc_request
(
struct
request_queue
*
q
,
int
rw
,
gfp_t
gfp
,
bool
reserved
);
gfp_t
gfp
,
bool
reserved
);
struct
request
*
blk_mq_tag_to_rq
(
struct
blk_mq_tags
*
tags
,
unsigned
int
tag
);
struct
request
*
blk_mq_tag_to_rq
(
struct
blk_mq_tags
*
tags
,
unsigned
int
tag
);
struct
cpumask
*
blk_mq_tags_cpumask
(
struct
blk_mq_tags
*
tags
);
enum
{
enum
{
BLK_MQ_UNIQUE_TAG_BITS
=
16
,
BLK_MQ_UNIQUE_TAG_BITS
=
16
,
...
@@ -224,6 +226,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async);
...
@@ -224,6 +226,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void
blk_mq_delay_queue
(
struct
blk_mq_hw_ctx
*
hctx
,
unsigned
long
msecs
);
void
blk_mq_delay_queue
(
struct
blk_mq_hw_ctx
*
hctx
,
unsigned
long
msecs
);
void
blk_mq_tag_busy_iter
(
struct
blk_mq_hw_ctx
*
hctx
,
busy_iter_fn
*
fn
,
void
blk_mq_tag_busy_iter
(
struct
blk_mq_hw_ctx
*
hctx
,
busy_iter_fn
*
fn
,
void
*
priv
);
void
*
priv
);
void
blk_mq_all_tag_busy_iter
(
struct
blk_mq_tags
*
tags
,
busy_tag_iter_fn
*
fn
,
void
*
priv
);
void
blk_mq_freeze_queue
(
struct
request_queue
*
q
);
void
blk_mq_freeze_queue
(
struct
request_queue
*
q
);
void
blk_mq_unfreeze_queue
(
struct
request_queue
*
q
);
void
blk_mq_unfreeze_queue
(
struct
request_queue
*
q
);
void
blk_mq_freeze_queue_start
(
struct
request_queue
*
q
);
void
blk_mq_freeze_queue_start
(
struct
request_queue
*
q
);
...
...
include/linux/blk_types.h
View file @
843e8ddb
...
@@ -192,6 +192,7 @@ enum rq_flag_bits {
...
@@ -192,6 +192,7 @@ enum rq_flag_bits {
__REQ_HASHED
,
/* on IO scheduler merge hash */
__REQ_HASHED
,
/* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT
,
/* track inflight for MQ */
__REQ_MQ_INFLIGHT
,
/* track inflight for MQ */
__REQ_NO_TIMEOUT
,
/* requests may never expire */
__REQ_NO_TIMEOUT
,
/* requests may never expire */
__REQ_CLONE
,
/* cloned bios */
__REQ_NR_BITS
,
/* stops here */
__REQ_NR_BITS
,
/* stops here */
};
};
...
@@ -246,5 +247,6 @@ enum rq_flag_bits {
...
@@ -246,5 +247,6 @@ enum rq_flag_bits {
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
#define REQ_CLONE (1ULL << __REQ_CLONE)
#endif
/* __LINUX_BLK_TYPES_H */
#endif
/* __LINUX_BLK_TYPES_H */
include/linux/blkdev.h
View file @
843e8ddb
...
@@ -775,11 +775,7 @@ extern void blk_add_request_payload(struct request *rq, struct page *page,
...
@@ -775,11 +775,7 @@ extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned
int
len
);
unsigned
int
len
);
extern
int
blk_rq_check_limits
(
struct
request_queue
*
q
,
struct
request
*
rq
);
extern
int
blk_rq_check_limits
(
struct
request_queue
*
q
,
struct
request
*
rq
);
extern
int
blk_lld_busy
(
struct
request_queue
*
q
);
extern
int
blk_lld_busy
(
struct
request_queue
*
q
);
extern
int
blk_rq_prep_clone
(
struct
request
*
rq
,
struct
request
*
rq_src
,
extern
void
blk_rq_prep_clone
(
struct
request
*
rq
,
struct
request
*
rq_src
);
struct
bio_set
*
bs
,
gfp_t
gfp_mask
,
int
(
*
bio_ctr
)(
struct
bio
*
,
struct
bio
*
,
void
*
),
void
*
data
);
extern
void
blk_rq_unprep_clone
(
struct
request
*
rq
);
extern
int
blk_insert_cloned_request
(
struct
request_queue
*
q
,
extern
int
blk_insert_cloned_request
(
struct
request_queue
*
q
,
struct
request
*
rq
);
struct
request
*
rq
);
extern
void
blk_delay_queue
(
struct
request_queue
*
,
unsigned
long
);
extern
void
blk_delay_queue
(
struct
request_queue
*
,
unsigned
long
);
...
@@ -907,7 +903,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
...
@@ -907,7 +903,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if
(
unlikely
(
rq
->
cmd_type
==
REQ_TYPE_BLOCK_PC
))
if
(
unlikely
(
rq
->
cmd_type
==
REQ_TYPE_BLOCK_PC
))
return
q
->
limits
.
max_hw_sectors
;
return
q
->
limits
.
max_hw_sectors
;
if
(
!
q
->
limits
.
chunk_sectors
)
if
(
!
q
->
limits
.
chunk_sectors
||
(
rq
->
cmd_flags
&
REQ_DISCARD
)
)
return
blk_queue_get_max_sectors
(
q
,
rq
->
cmd_flags
);
return
blk_queue_get_max_sectors
(
q
,
rq
->
cmd_flags
);
return
min
(
blk_max_size_offset
(
q
,
blk_rq_pos
(
rq
)),
return
min
(
blk_max_size_offset
(
q
,
blk_rq_pos
(
rq
)),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment