Commit 3fd396af authored by Mikhail Zaslonko's avatar Mikhail Zaslonko Committed by Linus Torvalds

btrfs: use larger zlib buffer for s390 hardware compression

In order to benefit from s390 zlib hardware compression support,
increase the btrfs zlib workspace buffer size from 1 to 4 pages (if s390
zlib hardware support is enabled on the machine).

This brings up to 60% better performance in hardware on s390 compared to
the PAGE_SIZE buffer and much more compared to the software zlib
processing in btrfs.  In case of memory pressure, fall back to a single
page buffer during workspace allocation.

The data compressed with larger input buffers will still conform to zlib
standard and thus can be decompressed also on a systems that uses only
PAGE_SIZE buffer for btrfs zlib.

Link: http://lkml.kernel.org/r/20200108105103.29028-1-zaslonko@linux.ibm.comSigned-off-by: default avatarMikhail Zaslonko <zaslonko@linux.ibm.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: David Sterba <dsterba@suse.com>
Cc: Richard Purdie <rpurdie@rpsys.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Eduard Shishkin <edward6@linux.ibm.com>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 803521b1
...@@ -1290,7 +1290,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, ...@@ -1290,7 +1290,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
/* copy bytes from the working buffer into the pages */ /* copy bytes from the working buffer into the pages */
while (working_bytes > 0) { while (working_bytes > 0) {
bytes = min_t(unsigned long, bvec.bv_len, bytes = min_t(unsigned long, bvec.bv_len,
PAGE_SIZE - buf_offset); PAGE_SIZE - (buf_offset % PAGE_SIZE));
bytes = min(bytes, working_bytes); bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(bvec.bv_page); kaddr = kmap_atomic(bvec.bv_page);
......
...@@ -20,9 +20,13 @@ ...@@ -20,9 +20,13 @@
#include <linux/refcount.h> #include <linux/refcount.h>
#include "compression.h" #include "compression.h"
/* workspace buffer size for s390 zlib hardware support */
#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
struct workspace { struct workspace {
z_stream strm; z_stream strm;
char *buf; char *buf;
unsigned int buf_size;
struct list_head list; struct list_head list;
int level; int level;
}; };
...@@ -61,7 +65,21 @@ struct list_head *zlib_alloc_workspace(unsigned int level) ...@@ -61,7 +65,21 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
zlib_inflate_workspacesize()); zlib_inflate_workspacesize());
workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL); workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
workspace->level = level; workspace->level = level;
workspace->buf = NULL;
/*
* In case of s390 zlib hardware support, allocate lager workspace
* buffer. If allocator fails, fall back to a single page buffer.
*/
if (zlib_deflate_dfltcc_enabled()) {
workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
__GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_NOWARN | GFP_NOIO);
workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
}
if (!workspace->buf) {
workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
workspace->buf_size = PAGE_SIZE;
}
if (!workspace->strm.workspace || !workspace->buf) if (!workspace->strm.workspace || !workspace->buf)
goto fail; goto fail;
...@@ -85,6 +103,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, ...@@ -85,6 +103,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
struct page *in_page = NULL; struct page *in_page = NULL;
struct page *out_page = NULL; struct page *out_page = NULL;
unsigned long bytes_left; unsigned long bytes_left;
unsigned int in_buf_pages;
unsigned long len = *total_out; unsigned long len = *total_out;
unsigned long nr_dest_pages = *out_pages; unsigned long nr_dest_pages = *out_pages;
const unsigned long max_out = nr_dest_pages * PAGE_SIZE; const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
...@@ -102,9 +121,6 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, ...@@ -102,9 +121,6 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->strm.total_in = 0; workspace->strm.total_in = 0;
workspace->strm.total_out = 0; workspace->strm.total_out = 0;
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
data_in = kmap(in_page);
out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (out_page == NULL) { if (out_page == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -114,12 +130,51 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, ...@@ -114,12 +130,51 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
pages[0] = out_page; pages[0] = out_page;
nr_pages = 1; nr_pages = 1;
workspace->strm.next_in = data_in; workspace->strm.next_in = workspace->buf;
workspace->strm.avail_in = 0;
workspace->strm.next_out = cpage_out; workspace->strm.next_out = cpage_out;
workspace->strm.avail_out = PAGE_SIZE; workspace->strm.avail_out = PAGE_SIZE;
workspace->strm.avail_in = min(len, PAGE_SIZE);
while (workspace->strm.total_in < len) { while (workspace->strm.total_in < len) {
/*
* Get next input pages and copy the contents to
* the workspace buffer if required.
*/
if (workspace->strm.avail_in == 0) {
bytes_left = len - workspace->strm.total_in;
in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
workspace->buf_size / PAGE_SIZE);
if (in_buf_pages > 1) {
int i;
for (i = 0; i < in_buf_pages; i++) {
if (in_page) {
kunmap(in_page);
put_page(in_page);
}
in_page = find_get_page(mapping,
start >> PAGE_SHIFT);
data_in = kmap(in_page);
memcpy(workspace->buf + i * PAGE_SIZE,
data_in, PAGE_SIZE);
start += PAGE_SIZE;
}
workspace->strm.next_in = workspace->buf;
} else {
if (in_page) {
kunmap(in_page);
put_page(in_page);
}
in_page = find_get_page(mapping,
start >> PAGE_SHIFT);
data_in = kmap(in_page);
start += PAGE_SIZE;
workspace->strm.next_in = data_in;
}
workspace->strm.avail_in = min(bytes_left,
(unsigned long) workspace->buf_size);
}
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH); ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
if (ret != Z_OK) { if (ret != Z_OK) {
pr_debug("BTRFS: deflate in loop returned %d\n", pr_debug("BTRFS: deflate in loop returned %d\n",
...@@ -161,33 +216,43 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, ...@@ -161,33 +216,43 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
/* we're all done */ /* we're all done */
if (workspace->strm.total_in >= len) if (workspace->strm.total_in >= len)
break; break;
/* we've read in a full page, get a new one */
if (workspace->strm.avail_in == 0) {
if (workspace->strm.total_out > max_out) if (workspace->strm.total_out > max_out)
break; break;
bytes_left = len - workspace->strm.total_in;
kunmap(in_page);
put_page(in_page);
start += PAGE_SIZE;
in_page = find_get_page(mapping,
start >> PAGE_SHIFT);
data_in = kmap(in_page);
workspace->strm.avail_in = min(bytes_left,
PAGE_SIZE);
workspace->strm.next_in = data_in;
}
} }
workspace->strm.avail_in = 0; workspace->strm.avail_in = 0;
/*
* Call deflate with Z_FINISH flush parameter providing more output
* space but no more input data, until it returns with Z_STREAM_END.
*/
while (ret != Z_STREAM_END) {
ret = zlib_deflate(&workspace->strm, Z_FINISH); ret = zlib_deflate(&workspace->strm, Z_FINISH);
if (ret == Z_STREAM_END)
break;
if (ret != Z_OK && ret != Z_BUF_ERROR) {
zlib_deflateEnd(&workspace->strm); zlib_deflateEnd(&workspace->strm);
if (ret != Z_STREAM_END) {
ret = -EIO; ret = -EIO;
goto out; goto out;
} else if (workspace->strm.avail_out == 0) {
/* get another page for the stream end */
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
ret = -E2BIG;
goto out;
}
out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
} }
cpage_out = kmap(out_page);
pages[nr_pages] = out_page;
nr_pages++;
workspace->strm.avail_out = PAGE_SIZE;
workspace->strm.next_out = cpage_out;
}
}
zlib_deflateEnd(&workspace->strm);
if (workspace->strm.total_out >= workspace->strm.total_in) { if (workspace->strm.total_out >= workspace->strm.total_in) {
ret = -E2BIG; ret = -E2BIG;
...@@ -231,7 +296,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) ...@@ -231,7 +296,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->strm.total_out = 0; workspace->strm.total_out = 0;
workspace->strm.next_out = workspace->buf; workspace->strm.next_out = workspace->buf;
workspace->strm.avail_out = PAGE_SIZE; workspace->strm.avail_out = workspace->buf_size;
/* If it's deflate, and it's got no preset dictionary, then /* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */ we can tell zlib to skip the adler32 check. */
...@@ -270,7 +335,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb) ...@@ -270,7 +335,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
} }
workspace->strm.next_out = workspace->buf; workspace->strm.next_out = workspace->buf;
workspace->strm.avail_out = PAGE_SIZE; workspace->strm.avail_out = workspace->buf_size;
if (workspace->strm.avail_in == 0) { if (workspace->strm.avail_in == 0) {
unsigned long tmp; unsigned long tmp;
...@@ -320,7 +385,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in, ...@@ -320,7 +385,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
workspace->strm.total_in = 0; workspace->strm.total_in = 0;
workspace->strm.next_out = workspace->buf; workspace->strm.next_out = workspace->buf;
workspace->strm.avail_out = PAGE_SIZE; workspace->strm.avail_out = workspace->buf_size;
workspace->strm.total_out = 0; workspace->strm.total_out = 0;
/* If it's deflate, and it's got no preset dictionary, then /* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */ we can tell zlib to skip the adler32 check. */
...@@ -364,7 +429,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in, ...@@ -364,7 +429,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
buf_offset = 0; buf_offset = 0;
bytes = min(PAGE_SIZE - pg_offset, bytes = min(PAGE_SIZE - pg_offset,
PAGE_SIZE - buf_offset); PAGE_SIZE - (buf_offset % PAGE_SIZE));
bytes = min(bytes, bytes_left); bytes = min(bytes, bytes_left);
kaddr = kmap_atomic(dest_page); kaddr = kmap_atomic(dest_page);
...@@ -375,7 +440,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in, ...@@ -375,7 +440,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
bytes_left -= bytes; bytes_left -= bytes;
next: next:
workspace->strm.next_out = workspace->buf; workspace->strm.next_out = workspace->buf;
workspace->strm.avail_out = PAGE_SIZE; workspace->strm.avail_out = workspace->buf_size;
} }
if (ret != Z_STREAM_END && bytes_left != 0) if (ret != Z_STREAM_END && bytes_left != 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment