Commit 8e8ef297 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  docbook: add pipes, other fixes
  blktrace: use cpu_clock() instead of sched_clock()
  bsg: Fix build for CONFIG_BLOCK=n
  [patch] QUEUE_FLAG_READFULL QUEUE_FLAG_WRITEFULL comment fix
parents 933a6208 79685b8d
...@@ -704,14 +704,23 @@ X!Idrivers/video/console/fonts.c ...@@ -704,14 +704,23 @@ X!Idrivers/video/console/fonts.c
<chapter id="splice"> <chapter id="splice">
<title>splice API</title> <title>splice API</title>
<para>) <para>
splice is a method for moving blocks of data around inside the splice is a method for moving blocks of data around inside the
kernel, without continually transferring it between the kernel kernel, without continually transferring them between the kernel
and user space. and user space.
</para> </para>
!Iinclude/linux/splice.h !Iinclude/linux/splice.h
!Ffs/splice.c !Ffs/splice.c
</chapter> </chapter>
<chapter id="pipes">
<title>pipes API</title>
<para>
Pipe interfaces are all for in-kernel (builtin image) use.
They are not exported for use by modules.
</para>
!Iinclude/linux/pipe_fs_i.h
!Ffs/pipe.c
</chapter>
</book> </book>
...@@ -49,8 +49,6 @@ config LSF ...@@ -49,8 +49,6 @@ config LSF
If unsure, say Y. If unsure, say Y.
endif # BLOCK
config BLK_DEV_BSG config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)" bool "Block layer SG support v4 (EXPERIMENTAL)"
depends on EXPERIMENTAL depends on EXPERIMENTAL
...@@ -64,4 +62,6 @@ config BLK_DEV_BSG ...@@ -64,4 +62,6 @@ config BLK_DEV_BSG
protocols (e.g. Task Management Functions and SMP in Serial protocols (e.g. Task Management Functions and SMP in Serial
Attached SCSI). Attached SCSI).
endif # BLOCK
source block/Kconfig.iosched source block/Kconfig.iosched
...@@ -41,7 +41,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, ...@@ -41,7 +41,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
const int cpu = smp_processor_id(); const int cpu = smp_processor_id();
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
t->device = bt->dev; t->device = bt->dev;
t->action = action; t->action = action;
t->pid = pid; t->pid = pid;
...@@ -159,7 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -159,7 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->sequence = ++(*sequence); t->sequence = ++(*sequence);
t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
t->sector = sector; t->sector = sector;
t->bytes = bytes; t->bytes = bytes;
t->action = what; t->action = what;
...@@ -488,17 +488,17 @@ void blk_trace_shutdown(struct request_queue *q) ...@@ -488,17 +488,17 @@ void blk_trace_shutdown(struct request_queue *q)
} }
/* /*
* Average offset over two calls to sched_clock() with a gettimeofday() * Average offset over two calls to cpu_clock() with a gettimeofday()
* in the middle * in the middle
*/ */
static void blk_check_time(unsigned long long *t) static void blk_check_time(unsigned long long *t, int this_cpu)
{ {
unsigned long long a, b; unsigned long long a, b;
struct timeval tv; struct timeval tv;
a = sched_clock(); a = cpu_clock(this_cpu);
do_gettimeofday(&tv); do_gettimeofday(&tv);
b = sched_clock(); b = cpu_clock(this_cpu);
*t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
*t -= (a + b) / 2; *t -= (a + b) / 2;
...@@ -510,16 +510,16 @@ static void blk_check_time(unsigned long long *t) ...@@ -510,16 +510,16 @@ static void blk_check_time(unsigned long long *t)
static void blk_trace_check_cpu_time(void *data) static void blk_trace_check_cpu_time(void *data)
{ {
unsigned long long *t; unsigned long long *t;
int cpu = get_cpu(); int this_cpu = get_cpu();
t = &per_cpu(blk_trace_cpu_offset, cpu); t = &per_cpu(blk_trace_cpu_offset, this_cpu);
/* /*
* Just call it twice, hopefully the second call will be cache hot * Just call it twice, hopefully the second call will be cache hot
* and a little more precise * and a little more precise
*/ */
blk_check_time(t); blk_check_time(t, this_cpu);
blk_check_time(t); blk_check_time(t, this_cpu);
put_cpu(); put_cpu();
} }
......
...@@ -255,7 +255,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) ...@@ -255,7 +255,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
/** /**
* generic_pipe_buf_confirm - verify contents of the pipe buffer * generic_pipe_buf_confirm - verify contents of the pipe buffer
* @pipe: the pipe that the buffer belongs to * @info: the pipe that the buffer belongs to
* @buf: the buffer to confirm * @buf: the buffer to confirm
* *
* Description: * Description:
......
...@@ -164,7 +164,7 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = { ...@@ -164,7 +164,7 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = {
* @spd: data to fill * @spd: data to fill
* *
* Description: * Description:
* @spd contains a map of pages and len/offset tupples, a long with * @spd contains a map of pages and len/offset tuples, along with
* the struct pipe_buf_operations associated with these pages. This * the struct pipe_buf_operations associated with these pages. This
* function will link that data to the pipe. * function will link that data to the pipe.
* *
...@@ -1000,7 +1000,7 @@ static long do_splice_to(struct file *in, loff_t *ppos, ...@@ -1000,7 +1000,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
* Description: * Description:
* This is a special case helper to splice directly between two * This is a special case helper to splice directly between two
* points, without requiring an explicit pipe. Internally an allocated * points, without requiring an explicit pipe. Internally an allocated
* pipe is cached in the process, and reused during the life time of * pipe is cached in the process, and reused during the lifetime of
* that process. * that process.
* *
*/ */
......
...@@ -483,8 +483,8 @@ struct request_queue ...@@ -483,8 +483,8 @@ struct request_queue
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */ #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */ #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment