Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
2f9a0b33
Commit
2f9a0b33
authored
Apr 12, 2016
by
Jens Axboe
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-4.7/core' into for-4.7/drivers
parents
e8f1e163
93e9d8e8
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
77 additions
and
0 deletions
+77
-0
Documentation/block/queue-sysfs.txt
Documentation/block/queue-sysfs.txt
+9
-0
block/blk-settings.c
block/blk-settings.c
+26
-0
block/blk-sysfs.c
block/blk-sysfs.c
+39
-0
include/linux/blkdev.h
include/linux/blkdev.h
+3
-0
No files found.
Documentation/block/queue-sysfs.txt
View file @
2f9a0b33
...
...
@@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing
an IO scheduler name to this file will attempt to load that IO scheduler
module, if it isn't already present in the system.
write_cache (RW)
----------------
When read, this file will display whether the device has write back
caching enabled or not. It will return "write back" for the former
case, and "write through" for the latter. Writing to this file can
change the kernels view of the device, but it doesn't alter the
device state. This means that it might not be safe to toggle the
setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel.
Jens Axboe <jens.axboe@oracle.com>, February 2009
block/blk-settings.c
View file @
2f9a0b33
...
...
@@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
}
EXPORT_SYMBOL_GPL
(
blk_queue_flush_queueable
);
/**
* blk_queue_write_cache - configure queue's write cache
* @q: the request queue for the device
* @wc: write back cache on or off
* @fua: device supports FUA writes, if true
*
* Tell the block layer about the write cache of @q.
*/
void
blk_queue_write_cache
(
struct
request_queue
*
q
,
bool
wc
,
bool
fua
)
{
spin_lock_irq
(
q
->
queue_lock
);
if
(
wc
)
{
queue_flag_set
(
QUEUE_FLAG_WC
,
q
);
q
->
flush_flags
=
REQ_FLUSH
;
}
else
queue_flag_clear
(
QUEUE_FLAG_WC
,
q
);
if
(
fua
)
{
if
(
wc
)
q
->
flush_flags
|=
REQ_FUA
;
queue_flag_set
(
QUEUE_FLAG_FUA
,
q
);
}
else
queue_flag_clear
(
QUEUE_FLAG_FUA
,
q
);
spin_unlock_irq
(
q
->
queue_lock
);
}
EXPORT_SYMBOL_GPL
(
blk_queue_write_cache
);
static
int
__init
blk_settings_init
(
void
)
{
blk_max_low_pfn
=
max_low_pfn
-
1
;
...
...
block/blk-sysfs.c
View file @
2f9a0b33
...
...
@@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return
ret
;
}
static
ssize_t
queue_wc_show
(
struct
request_queue
*
q
,
char
*
page
)
{
if
(
test_bit
(
QUEUE_FLAG_WC
,
&
q
->
queue_flags
))
return
sprintf
(
page
,
"write back
\n
"
);
return
sprintf
(
page
,
"write through
\n
"
);
}
static
ssize_t
queue_wc_store
(
struct
request_queue
*
q
,
const
char
*
page
,
size_t
count
)
{
int
set
=
-
1
;
if
(
!
strncmp
(
page
,
"write back"
,
10
))
set
=
1
;
else
if
(
!
strncmp
(
page
,
"write through"
,
13
)
||
!
strncmp
(
page
,
"none"
,
4
))
set
=
0
;
if
(
set
==
-
1
)
return
-
EINVAL
;
spin_lock_irq
(
q
->
queue_lock
);
if
(
set
)
queue_flag_set
(
QUEUE_FLAG_WC
,
q
);
else
queue_flag_clear
(
QUEUE_FLAG_WC
,
q
);
spin_unlock_irq
(
q
->
queue_lock
);
return
count
;
}
static
struct
queue_sysfs_entry
queue_requests_entry
=
{
.
attr
=
{.
name
=
"nr_requests"
,
.
mode
=
S_IRUGO
|
S_IWUSR
},
.
show
=
queue_requests_show
,
...
...
@@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
.
store
=
queue_poll_store
,
};
static
struct
queue_sysfs_entry
queue_wc_entry
=
{
.
attr
=
{.
name
=
"write_cache"
,
.
mode
=
S_IRUGO
|
S_IWUSR
},
.
show
=
queue_wc_show
,
.
store
=
queue_wc_store
,
};
static
struct
attribute
*
default_attrs
[]
=
{
&
queue_requests_entry
.
attr
,
&
queue_ra_entry
.
attr
,
...
...
@@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = {
&
queue_iostats_entry
.
attr
,
&
queue_random_entry
.
attr
,
&
queue_poll_entry
.
attr
,
&
queue_wc_entry
.
attr
,
NULL
,
};
...
...
include/linux/blkdev.h
View file @
2f9a0b33
...
...
@@ -491,6 +491,8 @@ struct request_queue {
#define QUEUE_FLAG_INIT_DONE 20
/* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21
/* don't attempt to merge SG segments*/
#define QUEUE_FLAG_POLL 22
/* IO polling enabled if set */
#define QUEUE_FLAG_WC 23
/* Write back caching */
#define QUEUE_FLAG_FUA 24
/* device supports FUA writes */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
...
...
@@ -1009,6 +1011,7 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern
void
blk_queue_rq_timeout
(
struct
request_queue
*
,
unsigned
int
);
extern
void
blk_queue_flush
(
struct
request_queue
*
q
,
unsigned
int
flush
);
extern
void
blk_queue_flush_queueable
(
struct
request_queue
*
q
,
bool
queueable
);
extern
void
blk_queue_write_cache
(
struct
request_queue
*
q
,
bool
enabled
,
bool
fua
);
extern
struct
backing_dev_info
*
blk_get_backing_dev_info
(
struct
block_device
*
bdev
);
extern
int
blk_rq_map_sg
(
struct
request_queue
*
,
struct
request
*
,
struct
scatterlist
*
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment