Commit 6016fc91 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iomap-6.6-merge-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull iomap updates from Darrick Wong:
 "We've got some big changes for this release -- I'm very happy to be
  landing willy's work to enable large folios for the page cache for
  general read and write IOs when the fs can make contiguous space
  allocations, and Ritesh's work to track sub-folio dirty state to
  eliminate the write amplification problems inherent in using large
  folios.

  As a bonus, io_uring can now process write completions in the caller's
  context instead of bouncing through a workqueue, which should reduce
  io latency dramatically. IOWs, XFS should see a nice performance bump
  for both IO paths.

  Summary:

   - Make large writes to the page cache fill sparse parts of the cache
     with large folios, then use large memcpy calls for the large folio.

   - Track the per-block dirty state of each large folio so that a
     buffered write to a single byte on a large folio does not result in
     a (potentially) multi-megabyte writeback IO.

   - Allow some directio completions to be performed in the initiating
     task's context instead of punting through a workqueue. This will
     reduce latency for some io_uring requests"

* tag 'iomap-6.6-merge-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (26 commits)
  iomap: support IOCB_DIO_CALLER_COMP
  io_uring/rw: add write support for IOCB_DIO_CALLER_COMP
  fs: add IOCB flags related to passing back dio completions
  iomap: add IOMAP_DIO_INLINE_COMP
  iomap: only set iocb->private for polled bio
  iomap: treat a write through cache the same as FUA
  iomap: use an unsigned type for IOMAP_DIO_* defines
  iomap: cleanup up iomap_dio_bio_end_io()
  iomap: Add per-block dirty state tracking to improve performance
  iomap: Allocate ifs in ->write_begin() early
  iomap: Refactor iomap_write_delalloc_punch() function out
  iomap: Use iomap_punch_t typedef
  iomap: Fix possible overflow condition in iomap_write_delalloc_scan
  iomap: Add some uptodate state handling helpers for ifs state bitmap
  iomap: Drop ifs argument from iomap_set_range_uptodate()
  iomap: Rename iomap_page to iomap_folio_state and others
  iomap: Copy larger chunks from userspace
  iomap: Create large folios in the buffered write path
  filemap: Allow __filemap_get_folio to allocate large folios
  filemap: Add fgf_t typedef
  ...
parents dd2c0198 377698d4
...@@ -376,10 +376,17 @@ invalidate_lock before invalidating page cache in truncate / hole punch ...@@ -376,10 +376,17 @@ invalidate_lock before invalidating page cache in truncate / hole punch
path (and thus calling into ->invalidate_folio) to block races between page path (and thus calling into ->invalidate_folio) to block races between page
cache invalidation and page cache filling functions (fault, read, ...). cache invalidation and page cache filling functions (fault, read, ...).
->release_folio() is called when the kernel is about to try to drop the ->release_folio() is called when the MM wants to make a change to the
buffers from the folio in preparation for freeing it. It returns false to folio that would invalidate the filesystem's private data. For example,
indicate that the buffers are (or may be) freeable. If ->release_folio is it may be about to be removed from the address_space or split. The folio
NULL, the kernel assumes that the fs has no private interest in the buffers. is locked and not under writeback. It may be dirty. The gfp parameter
is not usually used for allocation, but rather to indicate what the
filesystem may do to attempt to free the private data. The filesystem may
return false to indicate that the folio's private data cannot be freed.
If it returns true, it should have already removed the private data from
the folio. If a filesystem does not provide a ->release_folio method,
the pagecache will assume that private data is buffer_heads and call
try_to_free_buffers().
->free_folio() is called when the kernel has dropped the folio ->free_folio() is called when the kernel has dropped the folio
from the page cache. from the page cache.
......
...@@ -876,9 +876,9 @@ static int prepare_uptodate_page(struct inode *inode, ...@@ -876,9 +876,9 @@ static int prepare_uptodate_page(struct inode *inode,
return 0; return 0;
} }
static unsigned int get_prepare_fgp_flags(bool nowait) static fgf_t get_prepare_fgp_flags(bool nowait)
{ {
unsigned int fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT; fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
if (nowait) if (nowait)
fgp_flags |= FGP_NOWAIT; fgp_flags |= FGP_NOWAIT;
...@@ -910,7 +910,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, ...@@ -910,7 +910,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
int i; int i;
unsigned long index = pos >> PAGE_SHIFT; unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = get_prepare_gfp_flags(inode, nowait); gfp_t mask = get_prepare_gfp_flags(inode, nowait);
unsigned int fgp_flags = get_prepare_fgp_flags(nowait); fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
int err = 0; int err = 0;
int faili; int faili;
......
...@@ -1045,7 +1045,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, ...@@ -1045,7 +1045,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct address_space *mapping = cc->inode->i_mapping; struct address_space *mapping = cc->inode->i_mapping;
struct page *page; struct page *page;
sector_t last_block_in_bio; sector_t last_block_in_bio;
unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc); pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret; int i, ret;
......
...@@ -2736,7 +2736,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, ...@@ -2736,7 +2736,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
static inline struct page *f2fs_pagecache_get_page( static inline struct page *f2fs_pagecache_get_page(
struct address_space *mapping, pgoff_t index, struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask) fgf_t fgp_flags, gfp_t gfp_mask)
{ {
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
return NULL; return NULL;
......
...@@ -747,7 +747,7 @@ static const struct address_space_operations gfs2_aops = { ...@@ -747,7 +747,7 @@ static const struct address_space_operations gfs2_aops = {
.writepages = gfs2_writepages, .writepages = gfs2_writepages,
.read_folio = gfs2_read_folio, .read_folio = gfs2_read_folio,
.readahead = gfs2_readahead, .readahead = gfs2_readahead,
.dirty_folio = filemap_dirty_folio, .dirty_folio = iomap_dirty_folio,
.release_folio = iomap_release_folio, .release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio, .invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap, .bmap = gfs2_bmap,
......
...@@ -971,7 +971,7 @@ gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len) ...@@ -971,7 +971,7 @@ gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
if (status) if (status)
return ERR_PTR(status); return ERR_PTR(status);
folio = iomap_get_folio(iter, pos); folio = iomap_get_folio(iter, pos, len);
if (IS_ERR(folio)) if (IS_ERR(folio))
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
return folio; return folio;
......
This diff is collapsed.
...@@ -20,10 +20,12 @@ ...@@ -20,10 +20,12 @@
* Private flags for iomap_dio, must not overlap with the public ones in * Private flags for iomap_dio, must not overlap with the public ones in
* iomap.h: * iomap.h:
*/ */
#define IOMAP_DIO_WRITE_FUA (1 << 28) #define IOMAP_DIO_CALLER_COMP (1U << 26)
#define IOMAP_DIO_NEED_SYNC (1 << 29) #define IOMAP_DIO_INLINE_COMP (1U << 27)
#define IOMAP_DIO_WRITE (1 << 30) #define IOMAP_DIO_WRITE_THROUGH (1U << 28)
#define IOMAP_DIO_DIRTY (1 << 31) #define IOMAP_DIO_NEED_SYNC (1U << 29)
#define IOMAP_DIO_WRITE (1U << 30)
#define IOMAP_DIO_DIRTY (1U << 31)
struct iomap_dio { struct iomap_dio {
struct kiocb *iocb; struct kiocb *iocb;
...@@ -41,7 +43,6 @@ struct iomap_dio { ...@@ -41,7 +43,6 @@ struct iomap_dio {
struct { struct {
struct iov_iter *iter; struct iov_iter *iter;
struct task_struct *waiter; struct task_struct *waiter;
struct bio *poll_bio;
} submit; } submit;
/* used for aio completion: */ /* used for aio completion: */
...@@ -63,12 +64,14 @@ static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter, ...@@ -63,12 +64,14 @@ static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
static void iomap_dio_submit_bio(const struct iomap_iter *iter, static void iomap_dio_submit_bio(const struct iomap_iter *iter,
struct iomap_dio *dio, struct bio *bio, loff_t pos) struct iomap_dio *dio, struct bio *bio, loff_t pos)
{ {
struct kiocb *iocb = dio->iocb;
atomic_inc(&dio->ref); atomic_inc(&dio->ref);
/* Sync dio can't be polled reliably */ /* Sync dio can't be polled reliably */
if ((dio->iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(dio->iocb)) { if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) {
bio_set_polled(bio, dio->iocb); bio_set_polled(bio, iocb);
dio->submit.poll_bio = bio; WRITE_ONCE(iocb->private, bio);
} }
if (dio->dops && dio->dops->submit_io) if (dio->dops && dio->dops->submit_io)
...@@ -130,6 +133,11 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio) ...@@ -130,6 +133,11 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
} }
EXPORT_SYMBOL_GPL(iomap_dio_complete); EXPORT_SYMBOL_GPL(iomap_dio_complete);
static ssize_t iomap_dio_deferred_complete(void *data)
{
return iomap_dio_complete(data);
}
static void iomap_dio_complete_work(struct work_struct *work) static void iomap_dio_complete_work(struct work_struct *work)
{ {
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
...@@ -152,27 +160,69 @@ void iomap_dio_bio_end_io(struct bio *bio) ...@@ -152,27 +160,69 @@ void iomap_dio_bio_end_io(struct bio *bio)
{ {
struct iomap_dio *dio = bio->bi_private; struct iomap_dio *dio = bio->bi_private;
bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
struct kiocb *iocb = dio->iocb;
if (bio->bi_status) if (bio->bi_status)
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
if (!atomic_dec_and_test(&dio->ref))
goto release_bio;
if (atomic_dec_and_test(&dio->ref)) { /*
if (dio->wait_for_completion) { * Synchronous dio, task itself will handle any completion work
struct task_struct *waiter = dio->submit.waiter; * that needs after IO. All we need to do is wake the task.
WRITE_ONCE(dio->submit.waiter, NULL); */
blk_wake_io_task(waiter); if (dio->wait_for_completion) {
} else if (dio->flags & IOMAP_DIO_WRITE) { struct task_struct *waiter = dio->submit.waiter;
struct inode *inode = file_inode(dio->iocb->ki_filp);
WRITE_ONCE(dio->submit.waiter, NULL);
WRITE_ONCE(dio->iocb->private, NULL); blk_wake_io_task(waiter);
INIT_WORK(&dio->aio.work, iomap_dio_complete_work); goto release_bio;
queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); }
} else {
WRITE_ONCE(dio->iocb->private, NULL); /*
iomap_dio_complete_work(&dio->aio.work); * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline
} */
if (dio->flags & IOMAP_DIO_INLINE_COMP) {
WRITE_ONCE(iocb->private, NULL);
iomap_dio_complete_work(&dio->aio.work);
goto release_bio;
}
/*
* If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule
* our completion that way to avoid an async punt to a workqueue.
*/
if (dio->flags & IOMAP_DIO_CALLER_COMP) {
/* only polled IO cares about private cleared */
iocb->private = dio;
iocb->dio_complete = iomap_dio_deferred_complete;
/*
* Invoke ->ki_complete() directly. We've assigned our
* dio_complete callback handler, and since the issuer set
* IOCB_DIO_CALLER_COMP, we know their ki_complete handler will
* notice ->dio_complete being set and will defer calling that
* handler until it can be done from a safe task context.
*
* Note that the 'res' being passed in here is not important
* for this case. The actual completion value of the request
* will be gotten from dio_complete when that is run by the
* issuer.
*/
iocb->ki_complete(iocb, 0);
goto release_bio;
} }
/*
* Async DIO completion that requires filesystem level completion work
* gets punted to a work queue to complete as the operation may require
* more IO to be issued to finalise filesystem metadata changes or
* guarantee data integrity.
*/
INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq,
&dio->aio.work);
release_bio:
if (should_dirty) { if (should_dirty) {
bio_check_pages_dirty(bio); bio_check_pages_dirty(bio);
} else { } else {
...@@ -203,7 +253,7 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, ...@@ -203,7 +253,7 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
/* /*
* Figure out the bio's operation flags from the dio request, the * Figure out the bio's operation flags from the dio request, the
* mapping, and whether or not we want FUA. Note that we can end up * mapping, and whether or not we want FUA. Note that we can end up
* clearing the WRITE_FUA flag in the dio request. * clearing the WRITE_THROUGH flag in the dio request.
*/ */
static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
const struct iomap *iomap, bool use_fua) const struct iomap *iomap, bool use_fua)
...@@ -217,7 +267,7 @@ static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, ...@@ -217,7 +267,7 @@ static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
if (use_fua) if (use_fua)
opflags |= REQ_FUA; opflags |= REQ_FUA;
else else
dio->flags &= ~IOMAP_DIO_WRITE_FUA; dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
return opflags; return opflags;
} }
...@@ -257,12 +307,19 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, ...@@ -257,12 +307,19 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
* Use a FUA write if we need datasync semantics, this is a pure * Use a FUA write if we need datasync semantics, this is a pure
* data IO that doesn't require any metadata updates (including * data IO that doesn't require any metadata updates (including
* after IO completion such as unwritten extent conversion) and * after IO completion such as unwritten extent conversion) and
* the underlying device supports FUA. This allows us to avoid * the underlying device either supports FUA or doesn't have
* cache flushes on IO completion. * a volatile write cache. This allows us to avoid cache flushes
* on IO completion. If we can't use writethrough and need to
* sync, disable in-task completions as dio completion will
* need to call generic_write_sync() which will do a blocking
* fsync / cache flush call.
*/ */
if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
(dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev)) (dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
(bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
use_fua = true; use_fua = true;
else if (dio->flags & IOMAP_DIO_NEED_SYNC)
dio->flags &= ~IOMAP_DIO_CALLER_COMP;
} }
/* /*
...@@ -277,10 +334,23 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, ...@@ -277,10 +334,23 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out; goto out;
/* /*
* We can only poll for single bio I/Os. * We can only do deferred completion for pure overwrites that
* don't require additional IO at completion. This rules out
* writes that need zeroing or extent conversion, extend
* the file size, or issue journal IO or cache flushes
* during completion processing.
*/ */
if (need_zeroout || if (need_zeroout ||
((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) ||
((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
dio->flags &= ~IOMAP_DIO_CALLER_COMP;
/*
* The rules for polled IO completions follow the guidelines as the
* ones we set for inline and deferred completions. If none of those
* are available for this IO, clear the polled flag.
*/
if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP)))
dio->iocb->ki_flags &= ~IOCB_HIPRI; dio->iocb->ki_flags &= ~IOCB_HIPRI;
if (need_zeroout) { if (need_zeroout) {
...@@ -505,12 +575,14 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -505,12 +575,14 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->submit.iter = iter; dio->submit.iter = iter;
dio->submit.waiter = current; dio->submit.waiter = current;
dio->submit.poll_bio = NULL;
if (iocb->ki_flags & IOCB_NOWAIT) if (iocb->ki_flags & IOCB_NOWAIT)
iomi.flags |= IOMAP_NOWAIT; iomi.flags |= IOMAP_NOWAIT;
if (iov_iter_rw(iter) == READ) { if (iov_iter_rw(iter) == READ) {
/* reads can always complete inline */
dio->flags |= IOMAP_DIO_INLINE_COMP;
if (iomi.pos >= dio->i_size) if (iomi.pos >= dio->i_size)
goto out_free_dio; goto out_free_dio;
...@@ -524,6 +596,15 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -524,6 +596,15 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_WRITE; iomi.flags |= IOMAP_WRITE;
dio->flags |= IOMAP_DIO_WRITE; dio->flags |= IOMAP_DIO_WRITE;
/*
* Flag as supporting deferred completions, if the issuer
* groks it. This can avoid a workqueue punt for writes.
* We may later clear this flag if we need to do other IO
* as part of this IO completion.
*/
if (iocb->ki_flags & IOCB_DIO_CALLER_COMP)
dio->flags |= IOMAP_DIO_CALLER_COMP;
if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
ret = -EAGAIN; ret = -EAGAIN;
if (iomi.pos >= dio->i_size || if (iomi.pos >= dio->i_size ||
...@@ -537,13 +618,16 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -537,13 +618,16 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->flags |= IOMAP_DIO_NEED_SYNC; dio->flags |= IOMAP_DIO_NEED_SYNC;
/* /*
* For datasync only writes, we optimistically try * For datasync only writes, we optimistically try using
* using FUA for this IO. Any non-FUA write that * WRITE_THROUGH for this IO. This flag requires either
* occurs will clear this flag, hence we know before * FUA writes through the device's write cache, or a
* completion whether a cache flush is necessary. * normal write to a device without a volatile write
* cache. For the former, Any non-FUA write that occurs
* will clear this flag, hence we know before completion
* whether a cache flush is necessary.
*/ */
if (!(iocb->ki_flags & IOCB_SYNC)) if (!(iocb->ki_flags & IOCB_SYNC))
dio->flags |= IOMAP_DIO_WRITE_FUA; dio->flags |= IOMAP_DIO_WRITE_THROUGH;
} }
/* /*
...@@ -605,14 +689,13 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -605,14 +689,13 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomap_dio_set_error(dio, ret); iomap_dio_set_error(dio, ret);
/* /*
* If all the writes we issued were FUA, we don't need to flush the * If all the writes we issued were already written through to the
* cache on IO completion. Clear the sync flag for this case. * media, we don't need to flush the cache on IO completion. Clear the
* sync flag for this case.
*/ */
if (dio->flags & IOMAP_DIO_WRITE_FUA) if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
dio->flags &= ~IOMAP_DIO_NEED_SYNC; dio->flags &= ~IOMAP_DIO_NEED_SYNC;
WRITE_ONCE(iocb->private, dio->submit.poll_bio);
/* /*
* We are about to drop our additional submission reference, which * We are about to drop our additional submission reference, which
* might be the last reference to the dio. There are three different * might be the last reference to the dio. There are three different
......
...@@ -578,7 +578,7 @@ const struct address_space_operations xfs_address_space_operations = { ...@@ -578,7 +578,7 @@ const struct address_space_operations xfs_address_space_operations = {
.read_folio = xfs_vm_read_folio, .read_folio = xfs_vm_read_folio,
.readahead = xfs_vm_readahead, .readahead = xfs_vm_readahead,
.writepages = xfs_vm_writepages, .writepages = xfs_vm_writepages,
.dirty_folio = filemap_dirty_folio, .dirty_folio = iomap_dirty_folio,
.release_folio = iomap_release_folio, .release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio, .invalidate_folio = iomap_invalidate_folio,
.bmap = xfs_vm_bmap, .bmap = xfs_vm_bmap,
......
...@@ -175,7 +175,7 @@ const struct address_space_operations zonefs_file_aops = { ...@@ -175,7 +175,7 @@ const struct address_space_operations zonefs_file_aops = {
.read_folio = zonefs_read_folio, .read_folio = zonefs_read_folio,
.readahead = zonefs_readahead, .readahead = zonefs_readahead,
.writepages = zonefs_writepages, .writepages = zonefs_writepages,
.dirty_folio = filemap_dirty_folio, .dirty_folio = iomap_dirty_folio,
.release_folio = iomap_release_folio, .release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio, .invalidate_folio = iomap_invalidate_folio,
.migrate_folio = filemap_migrate_folio, .migrate_folio = filemap_migrate_folio,
......
...@@ -338,6 +338,20 @@ enum rw_hint { ...@@ -338,6 +338,20 @@ enum rw_hint {
#define IOCB_NOIO (1 << 20) #define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */ /* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21) #define IOCB_ALLOC_CACHE (1 << 21)
/*
* IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
* iocb completion can be passed back to the owner for execution from a safe
* context rather than needing to be punted through a workqueue. If this
* flag is set, the bio completion handling may set iocb->dio_complete to a
* handler function and iocb->private to context information for that handler.
* The issuer should call the handler with that context information from task
* context to complete the processing of the iocb. Note that while this
* provides a task context for the dio_complete() callback, it should only be
* used on the completion side for non-IO generating completions. It's fine to
* call blocking functions from this callback, but they should not wait for
* unrelated IO (like cache flushing, new IO generation, etc).
*/
#define IOCB_DIO_CALLER_COMP (1 << 22)
/* for use in trace events */ /* for use in trace events */
#define TRACE_IOCB_STRINGS \ #define TRACE_IOCB_STRINGS \
...@@ -351,7 +365,8 @@ enum rw_hint { ...@@ -351,7 +365,8 @@ enum rw_hint {
{ IOCB_WRITE, "WRITE" }, \ { IOCB_WRITE, "WRITE" }, \
{ IOCB_WAITQ, "WAITQ" }, \ { IOCB_WAITQ, "WAITQ" }, \
{ IOCB_NOIO, "NOIO" }, \ { IOCB_NOIO, "NOIO" }, \
{ IOCB_ALLOC_CACHE, "ALLOC_CACHE" } { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
{ IOCB_DIO_CALLER_COMP, "CALLER_COMP" }
struct kiocb { struct kiocb {
struct file *ki_filp; struct file *ki_filp;
...@@ -360,7 +375,23 @@ struct kiocb { ...@@ -360,7 +375,23 @@ struct kiocb {
void *private; void *private;
int ki_flags; int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */ u16 ki_ioprio; /* See linux/ioprio.h */
struct wait_page_queue *ki_waitq; /* for async buffered IO */ union {
/*
* Only used for async buffered reads, where it denotes the
* page waitqueue associated with completing the read. Valid
* IFF IOCB_WAITQ is set.
*/
struct wait_page_queue *ki_waitq;
/*
* Can be used for O_DIRECT IO, where the completion handling
* is punted back to the issuer of the IO. May only be set
* if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
* must then check for presence of this handler when ki_complete
* is invoked. The data passed in to this handler must be
* assigned to ->private when dio_complete is assigned.
*/
ssize_t (*dio_complete)(void *data);
};
}; };
static inline bool is_sync_kiocb(struct kiocb *kiocb) static inline bool is_sync_kiocb(struct kiocb *kiocb)
......
...@@ -261,9 +261,10 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode, ...@@ -261,9 +261,10 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos); struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops); const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
......
...@@ -470,6 +470,19 @@ static inline void *detach_page_private(struct page *page) ...@@ -470,6 +470,19 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page)); return folio_detach_private(page_folio(page));
} }
/*
* There are some parts of the kernel which assume that PMD entries
* are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
* limit the maximum allocation order to PMD size. I'm not aware of any
* assumptions about maximum order if THP are disabled, but 8 seems like
* a good order (that's 1MB if you're using 4kB pages)
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
#else
#define MAX_PAGECACHE_ORDER 8
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else #else
...@@ -501,22 +514,69 @@ pgoff_t page_cache_next_miss(struct address_space *mapping, ...@@ -501,22 +514,69 @@ pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan); pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001 /**
#define FGP_LOCK 0x00000002 * typedef fgf_t - Flags for getting folios from the page cache.
#define FGP_CREAT 0x00000004 *
#define FGP_WRITE 0x00000008 * Most users of the page cache will not need to use these flags;
#define FGP_NOFS 0x00000010 * there are convenience functions such as filemap_get_folio() and
#define FGP_NOWAIT 0x00000020 * filemap_lock_folio(). For users which need more control over exactly
#define FGP_FOR_MMAP 0x00000040 * what is done with the folios, these flags to __filemap_get_folio()
#define FGP_STABLE 0x00000080 * are available.
*
* * %FGP_ACCESSED - The folio will be marked accessed.
* * %FGP_LOCK - The folio is returned locked.
* * %FGP_CREAT - If no folio is present then a new folio is allocated,
* added to the page cache and the VM's LRU list. The folio is
* returned locked.
* * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
* folio is already in cache. If the folio was allocated, unlock it
* before returning so the caller can do the same dance.
* * %FGP_WRITE - The folio will be written to by the caller.
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't block on the folio lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
* * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
* implementation.
*/
typedef unsigned int __bitwise fgf_t;
#define FGP_ACCESSED ((__force fgf_t)0x00000001)
#define FGP_LOCK ((__force fgf_t)0x00000002)
#define FGP_CREAT ((__force fgf_t)0x00000004)
#define FGP_WRITE ((__force fgf_t)0x00000008)
#define FGP_NOFS ((__force fgf_t)0x00000010)
#define FGP_NOWAIT ((__force fgf_t)0x00000020)
#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
#define FGP_STABLE ((__force fgf_t)0x00000080)
#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
/**
* fgf_set_order - Encode a length in the fgf_t flags.
* @size: The suggested size of the folio to create.
*
* The caller of __filemap_get_folio() can use this to suggest a preferred
* size for the folio that is created. If there is already a folio at
* the index, it will be returned, no matter what its size. If a folio
* is freshly created, it may be of a different size than requested
* due to alignment constraints, memory pressure, or the presence of
* other folios at nearby indices.
*/
static inline fgf_t fgf_set_order(size_t size)
{
unsigned int shift = ilog2(size);
if (shift <= PAGE_SHIFT)
return 0;
return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
}
void *filemap_get_entry(struct address_space *mapping, pgoff_t index); void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp); fgf_t fgp_flags, gfp_t gfp);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp); fgf_t fgp_flags, gfp_t gfp);
/** /**
* filemap_get_folio - Find and get a folio. * filemap_get_folio - Find and get a folio.
...@@ -590,7 +650,7 @@ static inline struct page *find_get_page(struct address_space *mapping, ...@@ -590,7 +650,7 @@ static inline struct page *find_get_page(struct address_space *mapping,
} }
static inline struct page *find_get_page_flags(struct address_space *mapping, static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags) pgoff_t offset, fgf_t fgp_flags)
{ {
return pagecache_get_page(mapping, offset, fgp_flags, 0); return pagecache_get_page(mapping, offset, fgp_flags, 0);
} }
......
...@@ -163,7 +163,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) ...@@ -163,7 +163,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
return ret; return ret;
} }
size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i); size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes); void iov_iter_revert(struct iov_iter *i, size_t bytes);
...@@ -184,6 +184,13 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, ...@@ -184,6 +184,13 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
{ {
return copy_page_to_iter(&folio->page, offset, bytes, i); return copy_page_to_iter(&folio->page, offset, bytes, i);
} }
static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
size_t offset, size_t bytes, struct iov_iter *i)
{
return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
}
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
size_t bytes, struct iov_iter *i); size_t bytes, struct iov_iter *i);
......
...@@ -105,6 +105,7 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -105,6 +105,7 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} else { } else {
rw->kiocb.ki_ioprio = get_current_ioprio(); rw->kiocb.ki_ioprio = get_current_ioprio();
} }
rw->kiocb.dio_complete = NULL;
rw->addr = READ_ONCE(sqe->addr); rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len); rw->len = READ_ONCE(sqe->len);
...@@ -280,6 +281,15 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res) ...@@ -280,6 +281,15 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
{ {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
long res = kiocb->dio_complete(rw->kiocb.private);
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
}
io_req_io_end(req); io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
...@@ -295,9 +305,11 @@ static void io_complete_rw(struct kiocb *kiocb, long res) ...@@ -295,9 +305,11 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
struct io_kiocb *req = cmd_to_io_kiocb(rw); struct io_kiocb *req = cmd_to_io_kiocb(rw);
if (__io_complete_rw_common(req, res)) if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
return; if (__io_complete_rw_common(req, res))
io_req_set_res(req, io_fixup_rw_res(req, res), 0); return;
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
}
req->io_task_work.func = io_req_rw_complete; req->io_task_work.func = io_req_rw_complete;
__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
} }
...@@ -901,6 +913,15 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) ...@@ -901,6 +913,15 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
kiocb_start_write(kiocb); kiocb_start_write(kiocb);
kiocb->ki_flags |= IOCB_WRITE; kiocb->ki_flags |= IOCB_WRITE;
/*
* For non-polled IO, set IOCB_DIO_CALLER_COMP, stating that our handler
* groks deferring the completion to task context. This isn't
* necessary and useful for polled IO as that can always complete
* directly.
*/
if (!(kiocb->ki_flags & IOCB_HIPRI))
kiocb->ki_flags |= IOCB_DIO_CALLER_COMP;
if (likely(req->file->f_op->write_iter)) if (likely(req->file->f_op->write_iter))
ret2 = call_write_iter(req->file, kiocb, &s->iter); ret2 = call_write_iter(req->file, kiocb, &s->iter);
else if (req->file->f_op->write) else if (req->file->f_op->write)
......
...@@ -566,24 +566,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) ...@@ -566,24 +566,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
} }
EXPORT_SYMBOL(iov_iter_zero); EXPORT_SYMBOL(iov_iter_zero);
size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
struct iov_iter *i) size_t bytes, struct iov_iter *i)
{ {
char *kaddr = kmap_atomic(page), *p = kaddr + offset; size_t n, copied = 0;
if (!page_copy_sane(page, offset, bytes)) {
kunmap_atomic(kaddr); if (!page_copy_sane(page, offset, bytes))
return 0; return 0;
} if (WARN_ON_ONCE(!i->data_source))
if (WARN_ON_ONCE(!i->data_source)) {
kunmap_atomic(kaddr);
return 0; return 0;
}
iterate_and_advance(i, bytes, base, len, off, do {
copyin(p + off, base, len), char *p;
memcpy_from_iter(i, p + off, base, len)
) n = bytes - copied;
kunmap_atomic(kaddr); if (PageHighMem(page)) {
return bytes; page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
n = min_t(size_t, n, PAGE_SIZE - offset);
}
p = kmap_atomic(page) + offset;
iterate_and_advance(i, n, base, len, off,
copyin(p + off, base, len),
memcpy_from_iter(i, p + off, base, len)
)
kunmap_atomic(p);
copied += n;
offset += n;
} while (PageHighMem(page) && copied != bytes && n > 0);
return copied;
} }
EXPORT_SYMBOL(copy_page_from_iter_atomic); EXPORT_SYMBOL(copy_page_from_iter_atomic);
......
...@@ -1855,30 +1855,15 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index) ...@@ -1855,30 +1855,15 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
* *
* Looks up the page cache entry at @mapping & @index. * Looks up the page cache entry at @mapping & @index.
* *
* @fgp_flags can be zero or more of these flags:
*
* * %FGP_ACCESSED - The folio will be marked accessed.
* * %FGP_LOCK - The folio is returned locked.
* * %FGP_CREAT - If no page is present then a new page is allocated using
* @gfp and added to the page cache and the VM's LRU list.
* The page is returned locked and with an increased refcount.
* * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
* page is already in cache. If the page was allocated, unlock it before
* returning so the caller can do the same dance.
* * %FGP_WRITE - The page will be written to by the caller.
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't get blocked by page lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
*
* If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
* if the %GFP flags specified for %FGP_CREAT are atomic. * if the %GFP flags specified for %FGP_CREAT are atomic.
* *
* If there is a page cache page, it is returned with an increased refcount. * If this function returns a folio, it is returned with an increased refcount.
* *
* Return: The found folio or an ERR_PTR() otherwise. * Return: The found folio or an ERR_PTR() otherwise.
*/ */
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp) fgf_t fgp_flags, gfp_t gfp)
{ {
struct folio *folio; struct folio *folio;
...@@ -1920,7 +1905,9 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, ...@@ -1920,7 +1905,9 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
folio_wait_stable(folio); folio_wait_stable(folio);
no_page: no_page:
if (!folio && (fgp_flags & FGP_CREAT)) { if (!folio && (fgp_flags & FGP_CREAT)) {
unsigned order = FGF_GET_ORDER(fgp_flags);
int err; int err;
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp |= __GFP_WRITE; gfp |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS) if (fgp_flags & FGP_NOFS)
...@@ -1929,26 +1916,44 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, ...@@ -1929,26 +1916,44 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
gfp &= ~GFP_KERNEL; gfp &= ~GFP_KERNEL;
gfp |= GFP_NOWAIT | __GFP_NOWARN; gfp |= GFP_NOWAIT | __GFP_NOWARN;
} }
folio = filemap_alloc_folio(gfp, 0);
if (!folio)
return ERR_PTR(-ENOMEM);
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK; fgp_flags |= FGP_LOCK;
/* Init accessed so avoid atomic mark_page_accessed later */ if (!mapping_large_folio_support(mapping))
if (fgp_flags & FGP_ACCESSED) order = 0;
__folio_set_referenced(folio); if (order > MAX_PAGECACHE_ORDER)
order = MAX_PAGECACHE_ORDER;
/* If we're not aligned, allocate a smaller folio */
if (index & ((1UL << order) - 1))
order = __ffs(index);
err = filemap_add_folio(mapping, folio, index, gfp); do {
if (unlikely(err)) { gfp_t alloc_gfp = gfp;
err = -ENOMEM;
if (order == 1)
order = 0;
if (order > 0)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order);
if (!folio)
continue;
/* Init accessed so avoid atomic mark_page_accessed later */
if (fgp_flags & FGP_ACCESSED)
__folio_set_referenced(folio);
err = filemap_add_folio(mapping, folio, index, gfp);
if (!err)
break;
folio_put(folio); folio_put(folio);
folio = NULL; folio = NULL;
if (err == -EEXIST) } while (order-- > 0);
goto repeat;
}
if (err == -EEXIST)
goto repeat;
if (err)
return ERR_PTR(err);
/* /*
* filemap_add_folio locks the page, and for mmap * filemap_add_folio locks the page, and for mmap
* we expect an unlocked page. * we expect an unlocked page.
......
...@@ -92,7 +92,7 @@ EXPORT_SYMBOL(add_to_page_cache_lru); ...@@ -92,7 +92,7 @@ EXPORT_SYMBOL(add_to_page_cache_lru);
noinline noinline
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp) fgf_t fgp_flags, gfp_t gfp)
{ {
struct folio *folio; struct folio *folio;
......
...@@ -461,19 +461,6 @@ static int try_context_readahead(struct address_space *mapping, ...@@ -461,19 +461,6 @@ static int try_context_readahead(struct address_space *mapping,
return 1; return 1;
} }
/*
* There are some parts of the kernel which assume that PMD entries
* are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
* limit the maximum allocation order to PMD size. I'm not aware of any
* assumptions about maximum order if THP are disabled, but 8 seems like
* a good order (that's 1MB if you're using 4kB pages)
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
#else
#define MAX_PAGECACHE_ORDER 8
#endif
static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
pgoff_t mark, unsigned int order, gfp_t gfp) pgoff_t mark, unsigned int order, gfp_t gfp)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment