file.c 74.5 KB
Newer Older
1 2
/*
  FUSE: Filesystem in Userspace
Miklos Szeredi's avatar
Miklos Szeredi committed
3
  Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4 5 6 7 8 9 10 11 12 13

  This program can be distributed under the terms of the GNU GPL.
  See the file COPYING.
*/

#include "fuse_i.h"

#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/kernel.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
14
#include <linux/sched.h>
15
#include <linux/module.h>
16
#include <linux/compat.h>
17
#include <linux/swap.h>
18
#include <linux/falloc.h>
19
#include <linux/uio.h>
20

21
static const struct file_operations fuse_direct_io_file_operations;
22

23 24
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
			  int opcode, struct fuse_open_out *outargp)
25 26
{
	struct fuse_open_in inarg;
27
	FUSE_ARGS(args);
28 29

	memset(&inarg, 0, sizeof(inarg));
30 31 32
	inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
	if (!fc->atomic_o_trunc)
		inarg.flags &= ~O_TRUNC;
33 34 35 36 37 38 39 40
	args.in.h.opcode = opcode;
	args.in.h.nodeid = nodeid;
	args.in.numargs = 1;
	args.in.args[0].size = sizeof(inarg);
	args.in.args[0].value = &inarg;
	args.out.numargs = 1;
	args.out.args[0].size = sizeof(*outargp);
	args.out.args[0].value = outargp;
41

42
	return fuse_simple_request(fc, &args);
43 44
}

Tejun Heo's avatar
Tejun Heo committed
45
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
46 47
{
	struct fuse_file *ff;
Tejun Heo's avatar
Tejun Heo committed
48

49
	ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
Tejun Heo's avatar
Tejun Heo committed
50 51 52
	if (unlikely(!ff))
		return NULL;

53
	ff->fc = fc;
54
	ff->reserved_req = fuse_request_alloc(0);
Tejun Heo's avatar
Tejun Heo committed
55 56 57
	if (unlikely(!ff->reserved_req)) {
		kfree(ff);
		return NULL;
58
	}
Tejun Heo's avatar
Tejun Heo committed
59 60 61 62 63 64 65 66 67 68

	INIT_LIST_HEAD(&ff->write_entry);
	atomic_set(&ff->count, 0);
	RB_CLEAR_NODE(&ff->polled_node);
	init_waitqueue_head(&ff->poll_wait);

	spin_lock(&fc->lock);
	ff->kh = ++fc->khctr;
	spin_unlock(&fc->lock);

69 70 71 72 73
	return ff;
}

void fuse_file_free(struct fuse_file *ff)
{
74
	fuse_request_free(ff->reserved_req);
75 76 77
	kfree(ff);
}

78
struct fuse_file *fuse_file_get(struct fuse_file *ff)
79 80 81 82 83
{
	atomic_inc(&ff->count);
	return ff;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
84 85
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
86
	iput(req->misc.release.inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
87 88
}

89
static void fuse_file_put(struct fuse_file *ff, bool sync)
90 91 92
{
	if (atomic_dec_and_test(&ff->count)) {
		struct fuse_req *req = ff->reserved_req;
93

94 95 96 97 98
		if (ff->fc->no_open) {
			/*
			 * Drop the release request when client does not
			 * implement 'open'
			 */
Miklos Szeredi's avatar
Miklos Szeredi committed
99
			__clear_bit(FR_BACKGROUND, &req->flags);
100
			iput(req->misc.release.inode);
101 102
			fuse_put_request(ff->fc, req);
		} else if (sync) {
Miklos Szeredi's avatar
Miklos Szeredi committed
103
			__clear_bit(FR_BACKGROUND, &req->flags);
104
			fuse_request_send(ff->fc, req);
105
			iput(req->misc.release.inode);
106 107 108
			fuse_put_request(ff->fc, req);
		} else {
			req->end = fuse_release_end;
Miklos Szeredi's avatar
Miklos Szeredi committed
109
			__set_bit(FR_BACKGROUND, &req->flags);
110 111
			fuse_request_send_background(ff->fc, req);
		}
112 113 114 115
		kfree(ff);
	}
}

116 117
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
		 bool isdir)
118 119 120 121 122 123 124 125
{
	struct fuse_file *ff;
	int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;

	ff = fuse_file_alloc(fc);
	if (!ff)
		return -ENOMEM;

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
	ff->fh = 0;
	ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */
	if (!fc->no_open || isdir) {
		struct fuse_open_out outarg;
		int err;

		err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
		if (!err) {
			ff->fh = outarg.fh;
			ff->open_flags = outarg.open_flags;

		} else if (err != -ENOSYS || isdir) {
			fuse_file_free(ff);
			return err;
		} else {
			fc->no_open = 1;
		}
143 144 145
	}

	if (isdir)
146
		ff->open_flags &= ~FOPEN_DIRECT_IO;
147 148 149 150 151 152

	ff->nodeid = nodeid;
	file->private_data = fuse_file_get(ff);

	return 0;
}
153
EXPORT_SYMBOL_GPL(fuse_do_open);
154

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static void fuse_link_write_file(struct file *file)
{
	struct inode *inode = file_inode(file);
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
	struct fuse_file *ff = file->private_data;
	/*
	 * file may be written through mmap, so chain it onto the
	 * inodes's write_file list
	 */
	spin_lock(&fc->lock);
	if (list_empty(&ff->write_entry))
		list_add(&ff->write_entry, &fi->write_files);
	spin_unlock(&fc->lock);
}

171
void fuse_finish_open(struct inode *inode, struct file *file)
172
{
173
	struct fuse_file *ff = file->private_data;
174
	struct fuse_conn *fc = get_fuse_conn(inode);
175 176

	if (ff->open_flags & FOPEN_DIRECT_IO)
177
		file->f_op = &fuse_direct_io_file_operations;
178
	if (!(ff->open_flags & FOPEN_KEEP_CACHE))
Miklos Szeredi's avatar
Miklos Szeredi committed
179
		invalidate_inode_pages2(inode->i_mapping);
180
	if (ff->open_flags & FOPEN_NONSEEKABLE)
Tejun Heo's avatar
Tejun Heo committed
181
		nonseekable_open(inode, file);
182 183 184 185 186 187 188 189
	if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
		struct fuse_inode *fi = get_fuse_inode(inode);

		spin_lock(&fc->lock);
		fi->attr_version = ++fc->attr_version;
		i_size_write(inode, 0);
		spin_unlock(&fc->lock);
		fuse_invalidate_attr(inode);
190 191
		if (fc->writeback_cache)
			file_update_time(file);
192
	}
193 194
	if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
		fuse_link_write_file(file);
195 196
}

197
int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
198
{
Tejun Heo's avatar
Tejun Heo committed
199
	struct fuse_conn *fc = get_fuse_conn(inode);
200
	int err;
201 202 203
	bool lock_inode = (file->f_flags & O_TRUNC) &&
			  fc->atomic_o_trunc &&
			  fc->writeback_cache;
204 205 206 207 208

	err = generic_file_open(inode, file);
	if (err)
		return err;

209
	if (lock_inode)
Al Viro's avatar
Al Viro committed
210
		inode_lock(inode);
211

212
	err = fuse_do_open(fc, get_node_id(inode), file, isdir);
213

214 215
	if (!err)
		fuse_finish_open(inode, file);
216

217
	if (lock_inode)
Al Viro's avatar
Al Viro committed
218
		inode_unlock(inode);
219 220

	return err;
221 222
}

223
static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
224
{
225
	struct fuse_conn *fc = ff->fc;
226
	struct fuse_req *req = ff->reserved_req;
227
	struct fuse_release_in *inarg = &req->misc.release.in;
228

229 230 231 232 233 234
	spin_lock(&fc->lock);
	list_del(&ff->write_entry);
	if (!RB_EMPTY_NODE(&ff->polled_node))
		rb_erase(&ff->polled_node, &fc->polled_files);
	spin_unlock(&fc->lock);

235
	wake_up_interruptible_all(&ff->poll_wait);
236

237
	inarg->fh = ff->fh;
238
	inarg->flags = flags;
239
	req->in.h.opcode = opcode;
240
	req->in.h.nodeid = ff->nodeid;
241 242 243
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(struct fuse_release_in);
	req->in.args[0].value = inarg;
244 245
}

246
void fuse_release_common(struct file *file, int opcode)
247
{
Tejun Heo's avatar
Tejun Heo committed
248 249
	struct fuse_file *ff;
	struct fuse_req *req;
250

Tejun Heo's avatar
Tejun Heo committed
251 252
	ff = file->private_data;
	if (unlikely(!ff))
253
		return;
Tejun Heo's avatar
Tejun Heo committed
254 255

	req = ff->reserved_req;
256
	fuse_prepare_release(ff, file->f_flags, opcode);
Tejun Heo's avatar
Tejun Heo committed
257

Miklos Szeredi's avatar
Miklos Szeredi committed
258 259 260 261 262 263
	if (ff->flock) {
		struct fuse_release_in *inarg = &req->misc.release.in;
		inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
		inarg->lock_owner = fuse_lock_owner_id(ff->fc,
						       (fl_owner_t) file);
	}
264 265
	/* Hold inode until release is finished */
	req->misc.release.inode = igrab(file_inode(file));
Tejun Heo's avatar
Tejun Heo committed
266 267 268 269 270

	/*
	 * Normally this will send the RELEASE request, however if
	 * some asynchronous READ or WRITE requests are outstanding,
	 * the sending will be delayed.
271 272 273 274
	 *
	 * Make the release synchronous if this is a fuseblk mount,
	 * synchronous RELEASE is allowed (and desirable) in this case
	 * because the server can be trusted not to screw up.
Tejun Heo's avatar
Tejun Heo committed
275
	 */
276
	fuse_file_put(ff, ff->fc->destroy_req != NULL);
277 278
}

279 280
static int fuse_open(struct inode *inode, struct file *file)
{
281
	return fuse_open_common(inode, file, false);
282 283 284 285
}

static int fuse_release(struct inode *inode, struct file *file)
{
286 287 288 289
	struct fuse_conn *fc = get_fuse_conn(inode);

	/* see fuse_vma_close() for !writeback_cache case */
	if (fc->writeback_cache)
Miklos Szeredi's avatar
Miklos Szeredi committed
290
		write_inode_now(inode, 1);
291

292 293 294 295 296 297 298 299 300 301
	fuse_release_common(file, FUSE_RELEASE);

	/* return value is ignored by VFS */
	return 0;
}

void fuse_sync_release(struct fuse_file *ff, int flags)
{
	WARN_ON(atomic_read(&ff->count) > 1);
	fuse_prepare_release(ff, flags, FUSE_RELEASE);
Miklos Szeredi's avatar
Miklos Szeredi committed
302 303
	__set_bit(FR_FORCE, &ff->reserved_req->flags);
	__clear_bit(FR_BACKGROUND, &ff->reserved_req->flags);
304 305 306
	fuse_request_send(ff->fc, ff->reserved_req);
	fuse_put_request(ff->fc, ff->reserved_req);
	kfree(ff);
307
}
308
EXPORT_SYMBOL_GPL(fuse_sync_release);
309

310
/*
311 312
 * Scramble the ID space with XTEA, so that the value of the files_struct
 * pointer is not exposed to userspace.
313
 */
314
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
315
{
316 317 318 319 320 321 322 323 324 325 326 327 328 329
	u32 *k = fc->scramble_key;
	u64 v = (unsigned long) id;
	u32 v0 = v;
	u32 v1 = v >> 32;
	u32 sum = 0;
	int i;

	for (i = 0; i < 32; i++) {
		v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
		sum += 0x9E3779B9;
		v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
	}

	return (u64) v0 + ((u64) v1 << 32);
330 331
}

Miklos Szeredi's avatar
Miklos Szeredi committed
332
/*
333
 * Check if any page in a range is under writeback
Miklos Szeredi's avatar
Miklos Szeredi committed
334 335 336 337
 *
 * This is currently done by walking the list of writepage requests
 * for the inode, which can be pretty inefficient.
 */
338 339
static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
				   pgoff_t idx_to)
Miklos Szeredi's avatar
Miklos Szeredi committed
340 341 342 343 344 345 346 347 348 349 350
{
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
	struct fuse_req *req;
	bool found = false;

	spin_lock(&fc->lock);
	list_for_each_entry(req, &fi->writepages, writepages_entry) {
		pgoff_t curr_index;

		BUG_ON(req->inode != inode);
351
		curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
352 353
		if (idx_from < curr_index + req->num_pages &&
		    curr_index <= idx_to) {
Miklos Szeredi's avatar
Miklos Szeredi committed
354 355 356 357 358 359 360 361 362
			found = true;
			break;
		}
	}
	spin_unlock(&fc->lock);

	return found;
}

363 364 365 366 367
static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
{
	return fuse_range_is_writeback(inode, index, index);
}

Miklos Szeredi's avatar
Miklos Szeredi committed
368 369 370 371 372 373 374 375 376 377 378 379 380 381
/*
 * Wait for page writeback to be completed.
 *
 * Since fuse doesn't rely on the VM writeback tracking, this has to
 * use some other means.
 */
static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
{
	struct fuse_inode *fi = get_fuse_inode(inode);

	wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
	return 0;
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
/*
 * Wait for all pending writepages on the inode to finish.
 *
 * This is currently done by blocking further writes with FUSE_NOWRITE
 * and waiting for all sent writes to complete.
 *
 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
 * could conflict with truncation.
 */
static void fuse_sync_writes(struct inode *inode)
{
	fuse_set_nowrite(inode);
	fuse_release_nowrite(inode);
}

397
static int fuse_flush(struct file *file, fl_owner_t id)
398
{
399
	struct inode *inode = file_inode(file);
400 401 402 403 404 405
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_file *ff = file->private_data;
	struct fuse_req *req;
	struct fuse_flush_in inarg;
	int err;

406 407 408
	if (is_bad_inode(inode))
		return -EIO;

409 410 411
	if (fc->no_flush)
		return 0;

Miklos Szeredi's avatar
Miklos Szeredi committed
412
	err = write_inode_now(inode, 1);
413 414 415
	if (err)
		return err;

Al Viro's avatar
Al Viro committed
416
	inode_lock(inode);
417
	fuse_sync_writes(inode);
Al Viro's avatar
Al Viro committed
418
	inode_unlock(inode);
419

420
	req = fuse_get_req_nofail_nopages(fc, file);
421 422
	memset(&inarg, 0, sizeof(inarg));
	inarg.fh = ff->fh;
423
	inarg.lock_owner = fuse_lock_owner_id(fc, id);
424 425 426 427 428
	req->in.h.opcode = FUSE_FLUSH;
	req->in.h.nodeid = get_node_id(inode);
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(inarg);
	req->in.args[0].value = &inarg;
Miklos Szeredi's avatar
Miklos Szeredi committed
429
	__set_bit(FR_FORCE, &req->flags);
430
	fuse_request_send(fc, req);
431 432 433 434 435 436 437 438 439
	err = req->out.h.error;
	fuse_put_request(fc, req);
	if (err == -ENOSYS) {
		fc->no_flush = 1;
		err = 0;
	}
	return err;
}

440 441
int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
		      int datasync, int isdir)
442
{
443
	struct inode *inode = file->f_mapping->host;
444 445
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_file *ff = file->private_data;
446
	FUSE_ARGS(args);
447 448 449
	struct fuse_fsync_in inarg;
	int err;

450 451 452
	if (is_bad_inode(inode))
		return -EIO;

Al Viro's avatar
Al Viro committed
453
	inode_lock(inode);
454

Miklos Szeredi's avatar
Miklos Szeredi committed
455 456 457 458 459
	/*
	 * Start writeback against all dirty pages of the inode, then
	 * wait for all outstanding writes, before sending the FSYNC
	 * request.
	 */
Miklos Szeredi's avatar
Miklos Szeredi committed
460
	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
Miklos Szeredi's avatar
Miklos Szeredi committed
461
	if (err)
462
		goto out;
Miklos Szeredi's avatar
Miklos Szeredi committed
463 464

	fuse_sync_writes(inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
465 466 467
	err = sync_inode_metadata(inode, 1);
	if (err)
		goto out;
Miklos Szeredi's avatar
Miklos Szeredi committed
468

Miklos Szeredi's avatar
Miklos Szeredi committed
469 470
	if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
		goto out;
471

472 473 474
	memset(&inarg, 0, sizeof(inarg));
	inarg.fh = ff->fh;
	inarg.fsync_flags = datasync ? 1 : 0;
475 476 477 478 479 480
	args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
	args.in.h.nodeid = get_node_id(inode);
	args.in.numargs = 1;
	args.in.args[0].size = sizeof(inarg);
	args.in.args[0].value = &inarg;
	err = fuse_simple_request(fc, &args);
481
	if (err == -ENOSYS) {
482 483 484 485
		if (isdir)
			fc->no_fsyncdir = 1;
		else
			fc->no_fsync = 1;
486 487
		err = 0;
	}
488
out:
Al Viro's avatar
Al Viro committed
489
	inode_unlock(inode);
490 491 492
	return err;
}

493 494
static int fuse_fsync(struct file *file, loff_t start, loff_t end,
		      int datasync)
495
{
496
	return fuse_fsync_common(file, start, end, datasync, 0);
497 498
}

499 500
void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
		    size_t count, int opcode)
501
{
502
	struct fuse_read_in *inarg = &req->misc.read.in;
503
	struct fuse_file *ff = file->private_data;
504

505 506 507
	inarg->fh = ff->fh;
	inarg->offset = pos;
	inarg->size = count;
508
	inarg->flags = file->f_flags;
509
	req->in.h.opcode = opcode;
510
	req->in.h.nodeid = ff->nodeid;
511 512
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(struct fuse_read_in);
513
	req->in.args[0].value = inarg;
514 515 516 517 518
	req->out.argvar = 1;
	req->out.numargs = 1;
	req->out.args[0].size = count;
}

519 520 521 522 523 524 525 526 527 528 529 530
static void fuse_release_user_pages(struct fuse_req *req, int write)
{
	unsigned i;

	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];
		if (write)
			set_page_dirty_lock(page);
		put_page(page);
	}
}

531 532 533 534 535
static void fuse_io_release(struct kref *kref)
{
	kfree(container_of(kref, struct fuse_io_priv, refcnt));
}

536 537 538 539 540 541 542 543 544 545 546
static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
{
	if (io->err)
		return io->err;

	if (io->bytes >= 0 && io->write)
		return -EIO;

	return io->bytes < 0 ? io->size : io->bytes;
}

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
/**
 * In case of short read, the caller sets 'pos' to the position of
 * actual end of fuse request in IO request. Otherwise, if bytes_requested
 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
 *
 * An example:
 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
 * both submitted asynchronously. The first of them was ACKed by userspace as
 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
 * second request was ACKed as short, e.g. only 1K was read, resulting in
 * pos == 33K.
 *
 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
 * will be equal to the length of the longest contiguous fragment of
 * transferred data starting from the beginning of IO request.
 */
static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
{
565
	bool is_sync = is_sync_kiocb(io->iocb);
566 567 568 569 570 571 572 573 574
	int left;

	spin_lock(&io->lock);
	if (err)
		io->err = io->err ? : err;
	else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
		io->bytes = pos;

	left = --io->reqs;
575 576
	if (!left && is_sync)
		complete(io->done);
577 578
	spin_unlock(&io->lock);

579 580
	if (!left && !is_sync) {
		ssize_t res = fuse_get_res_by_io(io);
581

582 583 584 585
		if (res >= 0) {
			struct inode *inode = file_inode(io->iocb->ki_filp);
			struct fuse_conn *fc = get_fuse_conn(inode);
			struct fuse_inode *fi = get_fuse_inode(inode);
586

587 588 589
			spin_lock(&fc->lock);
			fi->attr_version = ++fc->attr_version;
			spin_unlock(&fc->lock);
590 591
		}

592
		io->iocb->ki_complete(io->iocb, res, 0);
593
	}
594 595

	kref_put(&io->refcnt, fuse_io_release);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
}

static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
{
	struct fuse_io_priv *io = req->io;
	ssize_t pos = -1;

	fuse_release_user_pages(req, !io->write);

	if (io->write) {
		if (req->misc.write.in.size != req->misc.write.out.size)
			pos = req->misc.write.in.offset - io->offset +
				req->misc.write.out.size;
	} else {
		if (req->misc.read.in.size != req->out.args[0].size)
			pos = req->misc.read.in.offset - io->offset +
				req->out.args[0].size;
	}

	fuse_aio_complete(io, req->out.h.error, pos);
}

static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
		size_t num_bytes, struct fuse_io_priv *io)
{
	spin_lock(&io->lock);
622
	kref_get(&io->refcnt);
623 624 625 626 627 628 629
	io->size += num_bytes;
	io->reqs++;
	spin_unlock(&io->lock);

	req->io = io;
	req->end = fuse_aio_complete_req;

630
	__fuse_get_request(req);
631 632 633 634 635
	fuse_request_send_background(fc, req);

	return num_bytes;
}

636
static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
637
			     loff_t pos, size_t count, fl_owner_t owner)
638
{
639
	struct file *file = io->file;
640 641
	struct fuse_file *ff = file->private_data;
	struct fuse_conn *fc = ff->fc;
642

643
	fuse_read_fill(req, file, pos, count, FUSE_READ);
644
	if (owner != NULL) {
645
		struct fuse_read_in *inarg = &req->misc.read.in;
646 647 648 649

		inarg->read_flags |= FUSE_READ_LOCKOWNER;
		inarg->lock_owner = fuse_lock_owner_id(fc, owner);
	}
650 651 652 653

	if (io->async)
		return fuse_async_req_send(fc, req, count, io);

654
	fuse_request_send(fc, req);
655
	return req->out.args[0].size;
656 657
}

658 659 660 661 662 663 664
static void fuse_read_update_size(struct inode *inode, loff_t size,
				  u64 attr_ver)
{
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);

	spin_lock(&fc->lock);
665 666
	if (attr_ver == fi->attr_version && size < inode->i_size &&
	    !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
667 668 669 670 671 672
		fi->attr_version = ++fc->attr_version;
		i_size_write(inode, size);
	}
	spin_unlock(&fc->lock);
}

673 674 675 676
static void fuse_short_read(struct fuse_req *req, struct inode *inode,
			    u64 attr_ver)
{
	size_t num_read = req->out.args[0].size;
677 678 679 680 681 682 683 684 685
	struct fuse_conn *fc = get_fuse_conn(inode);

	if (fc->writeback_cache) {
		/*
		 * A hole in a file. Some data after the hole are in page cache,
		 * but have not reached the client fs yet. So, the hole is not
		 * present there.
		 */
		int i;
686 687
		int start_idx = num_read >> PAGE_SHIFT;
		size_t off = num_read & (PAGE_SIZE - 1);
688

689
		for (i = start_idx; i < req->num_pages; i++) {
690
			zero_user_segment(req->pages[i], off, PAGE_SIZE);
691 692 693 694 695 696
			off = 0;
		}
	} else {
		loff_t pos = page_offset(req->pages[0]) + num_read;
		fuse_read_update_size(inode, pos, attr_ver);
	}
697 698
}

699
static int fuse_do_readpage(struct file *file, struct page *page)
700
{
701
	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
702 703
	struct inode *inode = page->mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
704
	struct fuse_req *req;
705 706
	size_t num_read;
	loff_t pos = page_offset(page);
707
	size_t count = PAGE_SIZE;
708
	u64 attr_ver;
709 710
	int err;

Miklos Szeredi's avatar
Miklos Szeredi committed
711
	/*
Lucas De Marchi's avatar
Lucas De Marchi committed
712
	 * Page writeback can extend beyond the lifetime of the
Miklos Szeredi's avatar
Miklos Szeredi committed
713 714 715 716 717
	 * page-cache page, so make sure we read a properly synced
	 * page.
	 */
	fuse_wait_on_page_writeback(inode, page->index);

718
	req = fuse_get_req(fc, 1);
719
	if (IS_ERR(req))
720
		return PTR_ERR(req);
721

722 723
	attr_ver = fuse_get_attr_version(fc);

724
	req->out.page_zeroing = 1;
725
	req->out.argpages = 1;
726 727
	req->num_pages = 1;
	req->pages[0] = page;
728
	req->page_descs[0].length = count;
729
	num_read = fuse_send_read(req, &io, pos, count, NULL);
730
	err = req->out.h.error;
731 732 733 734 735 736

	if (!err) {
		/*
		 * Short read means EOF.  If file size is larger, truncate it
		 */
		if (num_read < count)
737
			fuse_short_read(req, inode, attr_ver);
738

739
		SetPageUptodate(page);
740 741
	}

742
	fuse_put_request(fc, req);
743 744 745 746 747 748 749 750 751 752 753 754 755 756

	return err;
}

static int fuse_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	int err;

	err = -EIO;
	if (is_bad_inode(inode))
		goto out;

	err = fuse_do_readpage(file, page);
757
	fuse_invalidate_atime(inode);
758 759 760 761 762
 out:
	unlock_page(page);
	return err;
}

763
static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
764
{
765
	int i;
766 767
	size_t count = req->misc.read.in.size;
	size_t num_read = req->out.args[0].size;
768
	struct address_space *mapping = NULL;
769

770 771
	for (i = 0; mapping == NULL && i < req->num_pages; i++)
		mapping = req->pages[i]->mapping;
772

773 774 775 776 777 778
	if (mapping) {
		struct inode *inode = mapping->host;

		/*
		 * Short read means EOF. If file size is larger, truncate it
		 */
779 780
		if (!req->out.h.error && num_read < count)
			fuse_short_read(req, inode, req->misc.read.attr_ver);
781

782
		fuse_invalidate_atime(inode);
783
	}
784

785 786 787 788
	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];
		if (!req->out.h.error)
			SetPageUptodate(page);
789 790
		else
			SetPageError(page);
791
		unlock_page(page);
792
		put_page(page);
793
	}
794
	if (req->ff)
795
		fuse_file_put(req->ff, false);
796 797
}

798
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
799
{
800 801
	struct fuse_file *ff = file->private_data;
	struct fuse_conn *fc = ff->fc;
802
	loff_t pos = page_offset(req->pages[0]);
803
	size_t count = req->num_pages << PAGE_SHIFT;
804 805

	req->out.argpages = 1;
806
	req->out.page_zeroing = 1;
807
	req->out.page_replace = 1;
808
	fuse_read_fill(req, file, pos, count, FUSE_READ);
809
	req->misc.read.attr_ver = fuse_get_attr_version(fc);
810
	if (fc->async_read) {
811
		req->ff = fuse_file_get(ff);
812
		req->end = fuse_readpages_end;
813
		fuse_request_send_background(fc, req);
814
	} else {
815
		fuse_request_send(fc, req);
816
		fuse_readpages_end(fc, req);
817
		fuse_put_request(fc, req);
818
	}
819 820
}

821
struct fuse_fill_data {
822
	struct fuse_req *req;
823
	struct file *file;
824
	struct inode *inode;
825
	unsigned nr_pages;
826 827 828 829
};

static int fuse_readpages_fill(void *_data, struct page *page)
{
830
	struct fuse_fill_data *data = _data;
831 832 833 834
	struct fuse_req *req = data->req;
	struct inode *inode = data->inode;
	struct fuse_conn *fc = get_fuse_conn(inode);

Miklos Szeredi's avatar
Miklos Szeredi committed
835 836
	fuse_wait_on_page_writeback(inode, page->index);

837 838
	if (req->num_pages &&
	    (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
839
	     (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
840
	     req->pages[req->num_pages - 1]->index + 1 != page->index)) {
841 842
		int nr_alloc = min_t(unsigned, data->nr_pages,
				     FUSE_MAX_PAGES_PER_REQ);
843
		fuse_send_readpages(req, data->file);
844 845 846 847 848 849
		if (fc->async_read)
			req = fuse_get_req_for_background(fc, nr_alloc);
		else
			req = fuse_get_req(fc, nr_alloc);

		data->req = req;
850
		if (IS_ERR(req)) {
851
			unlock_page(page);
852
			return PTR_ERR(req);
853 854
		}
	}
855 856 857 858 859 860

	if (WARN_ON(req->num_pages >= req->max_pages)) {
		fuse_put_request(fc, req);
		return -EIO;
	}

861
	get_page(page);
862
	req->pages[req->num_pages] = page;
863
	req->page_descs[req->num_pages].length = PAGE_SIZE;
Miklos Szeredi's avatar
Miklos Szeredi committed
864
	req->num_pages++;
865
	data->nr_pages--;
866 867 868 869 870 871 872 873
	return 0;
}

static int fuse_readpages(struct file *file, struct address_space *mapping,
			  struct list_head *pages, unsigned nr_pages)
{
	struct inode *inode = mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
874
	struct fuse_fill_data data;
875
	int err;
876
	int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
877

878
	err = -EIO;
879
	if (is_bad_inode(inode))
880
		goto out;
881

882
	data.file = file;
883
	data.inode = inode;
884 885 886 887
	if (fc->async_read)
		data.req = fuse_get_req_for_background(fc, nr_alloc);
	else
		data.req = fuse_get_req(fc, nr_alloc);
888
	data.nr_pages = nr_pages;
889
	err = PTR_ERR(data.req);
890
	if (IS_ERR(data.req))
891
		goto out;
892 893

	err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
894 895
	if (!err) {
		if (data.req->num_pages)
896
			fuse_send_readpages(data.req, file);
897 898 899
		else
			fuse_put_request(fc, data.req);
	}
900
out:
901
	return err;
902 903
}

904
static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
Miklos Szeredi's avatar
Miklos Szeredi committed
905 906
{
	struct inode *inode = iocb->ki_filp->f_mapping->host;
907
	struct fuse_conn *fc = get_fuse_conn(inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
908

909 910 911 912 913 914
	/*
	 * In auto invalidate mode, always update attributes on read.
	 * Otherwise, only update if we attempt to read past EOF (to ensure
	 * i_size is up to date).
	 */
	if (fc->auto_inval_data ||
915
	    (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
Miklos Szeredi's avatar
Miklos Szeredi committed
916 917 918 919 920 921
		int err;
		err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
		if (err)
			return err;
	}

922
	return generic_file_read_iter(iocb, to);
Miklos Szeredi's avatar
Miklos Szeredi committed
923 924
}

925
static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
926
			    loff_t pos, size_t count)
927
{
928 929
	struct fuse_write_in *inarg = &req->misc.write.in;
	struct fuse_write_out *outarg = &req->misc.write.out;
930

931 932 933
	inarg->fh = ff->fh;
	inarg->offset = pos;
	inarg->size = count;
934
	req->in.h.opcode = FUSE_WRITE;
935
	req->in.h.nodeid = ff->nodeid;
936
	req->in.numargs = 2;
937
	if (ff->fc->minor < 9)
938 939 940
		req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
	else
		req->in.args[0].size = sizeof(struct fuse_write_in);
941
	req->in.args[0].value = inarg;
942 943 944
	req->in.args[1].size = count;
	req->out.numargs = 1;
	req->out.args[0].size = sizeof(struct fuse_write_out);
945 946 947
	req->out.args[0].value = outarg;
}

948
static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
949
			      loff_t pos, size_t count, fl_owner_t owner)
950
{
951
	struct file *file = io->file;
952 953
	struct fuse_file *ff = file->private_data;
	struct fuse_conn *fc = ff->fc;
954 955
	struct fuse_write_in *inarg = &req->misc.write.in;

956
	fuse_write_fill(req, ff, pos, count);
957
	inarg->flags = file->f_flags;
958 959 960 961
	if (owner != NULL) {
		inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
		inarg->lock_owner = fuse_lock_owner_id(fc, owner);
	}
962 963 964 965

	if (io->async)
		return fuse_async_req_send(fc, req, count, io);

966
	fuse_request_send(fc, req);
967
	return req->misc.write.out.size;
968 969
}

970
bool fuse_write_update_size(struct inode *inode, loff_t pos)
971 972 973
{
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
974
	bool ret = false;
975 976 977

	spin_lock(&fc->lock);
	fi->attr_version = ++fc->attr_version;
978
	if (pos > inode->i_size) {
979
		i_size_write(inode, pos);
980 981
		ret = true;
	}
982
	spin_unlock(&fc->lock);
983 984

	return ret;
985 986
}

Nick Piggin's avatar
Nick Piggin committed
987 988 989 990 991 992 993
static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
				    struct inode *inode, loff_t pos,
				    size_t count)
{
	size_t res;
	unsigned offset;
	unsigned i;
994
	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
Nick Piggin's avatar
Nick Piggin committed
995 996 997 998

	for (i = 0; i < req->num_pages; i++)
		fuse_wait_on_page_writeback(inode, req->pages[i]->index);

999
	res = fuse_send_write(req, &io, pos, count, NULL);
Nick Piggin's avatar
Nick Piggin committed
1000

1001
	offset = req->page_descs[0].offset;
Nick Piggin's avatar
Nick Piggin committed
1002 1003 1004 1005
	count = res;
	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];

1006
		if (!req->out.h.error && !offset && count >= PAGE_SIZE)
Nick Piggin's avatar
Nick Piggin committed
1007 1008
			SetPageUptodate(page);

1009 1010
		if (count > PAGE_SIZE - offset)
			count -= PAGE_SIZE - offset;
Nick Piggin's avatar
Nick Piggin committed
1011 1012 1013 1014 1015
		else
			count = 0;
		offset = 0;

		unlock_page(page);
1016
		put_page(page);
Nick Piggin's avatar
Nick Piggin committed
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	}

	return res;
}

static ssize_t fuse_fill_write_pages(struct fuse_req *req,
			       struct address_space *mapping,
			       struct iov_iter *ii, loff_t pos)
{
	struct fuse_conn *fc = get_fuse_conn(mapping->host);
1027
	unsigned offset = pos & (PAGE_SIZE - 1);
Nick Piggin's avatar
Nick Piggin committed
1028 1029 1030
	size_t count = 0;
	int err;

1031
	req->in.argpages = 1;
1032
	req->page_descs[0].offset = offset;
Nick Piggin's avatar
Nick Piggin committed
1033 1034 1035 1036

	do {
		size_t tmp;
		struct page *page;
1037 1038
		pgoff_t index = pos >> PAGE_SHIFT;
		size_t bytes = min_t(size_t, PAGE_SIZE - offset,
Nick Piggin's avatar
Nick Piggin committed
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
				     iov_iter_count(ii));

		bytes = min_t(size_t, bytes, fc->max_write - count);

 again:
		err = -EFAULT;
		if (iov_iter_fault_in_readable(ii, bytes))
			break;

		err = -ENOMEM;
1049
		page = grab_cache_page_write_begin(mapping, index, 0);
Nick Piggin's avatar
Nick Piggin committed
1050 1051 1052
		if (!page)
			break;

1053 1054 1055
		if (mapping_writably_mapped(mapping))
			flush_dcache_page(page);

Nick Piggin's avatar
Nick Piggin committed
1056 1057 1058
		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
		flush_dcache_page(page);

1059
		iov_iter_advance(ii, tmp);
Nick Piggin's avatar
Nick Piggin committed
1060 1061
		if (!tmp) {
			unlock_page(page);
1062
			put_page(page);
Nick Piggin's avatar
Nick Piggin committed
1063 1064 1065 1066 1067 1068
			bytes = min(bytes, iov_iter_single_seg_count(ii));
			goto again;
		}

		err = 0;
		req->pages[req->num_pages] = page;
1069
		req->page_descs[req->num_pages].length = tmp;
Nick Piggin's avatar
Nick Piggin committed
1070 1071 1072 1073 1074
		req->num_pages++;

		count += tmp;
		pos += tmp;
		offset += tmp;
1075
		if (offset == PAGE_SIZE)
Nick Piggin's avatar
Nick Piggin committed
1076 1077
			offset = 0;

1078 1079
		if (!fc->big_writes)
			break;
Nick Piggin's avatar
Nick Piggin committed
1080
	} while (iov_iter_count(ii) && count < fc->max_write &&
1081
		 req->num_pages < req->max_pages && offset == 0);
Nick Piggin's avatar
Nick Piggin committed
1082 1083 1084 1085

	return count > 0 ? count : err;
}

1086 1087 1088
static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
{
	return min_t(unsigned,
1089 1090
		     ((pos + len - 1) >> PAGE_SHIFT) -
		     (pos >> PAGE_SHIFT) + 1,
1091 1092 1093
		     FUSE_MAX_PAGES_PER_REQ);
}

Nick Piggin's avatar
Nick Piggin committed
1094 1095 1096 1097 1098 1099
static ssize_t fuse_perform_write(struct file *file,
				  struct address_space *mapping,
				  struct iov_iter *ii, loff_t pos)
{
	struct inode *inode = mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
1100
	struct fuse_inode *fi = get_fuse_inode(inode);
Nick Piggin's avatar
Nick Piggin committed
1101 1102 1103 1104 1105 1106
	int err = 0;
	ssize_t res = 0;

	if (is_bad_inode(inode))
		return -EIO;

1107 1108 1109
	if (inode->i_size < pos + iov_iter_count(ii))
		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);

Nick Piggin's avatar
Nick Piggin committed
1110 1111 1112
	do {
		struct fuse_req *req;
		ssize_t count;
1113
		unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
Nick Piggin's avatar
Nick Piggin committed
1114

1115
		req = fuse_get_req(fc, nr_pages);
Nick Piggin's avatar
Nick Piggin committed
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
		if (IS_ERR(req)) {
			err = PTR_ERR(req);
			break;
		}

		count = fuse_fill_write_pages(req, mapping, ii, pos);
		if (count <= 0) {
			err = count;
		} else {
			size_t num_written;

			num_written = fuse_send_write_pages(req, file, inode,
							    pos, count);
			err = req->out.h.error;
			if (!err) {
				res += num_written;
				pos += num_written;

				/* break out of the loop on short write */
				if (num_written != count)
					err = -EIO;
			}
		}
		fuse_put_request(fc, req);
	} while (!err && iov_iter_count(ii));

	if (res > 0)
		fuse_write_update_size(inode, pos);

1145
	clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
Nick Piggin's avatar
Nick Piggin committed
1146 1147 1148 1149 1150
	fuse_invalidate_attr(inode);

	return res > 0 ? res : err;
}

Al Viro's avatar
Al Viro committed
1151
static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Nick Piggin's avatar
Nick Piggin committed
1152 1153 1154 1155
{
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
	ssize_t written = 0;
1156
	ssize_t written_buffered = 0;
Nick Piggin's avatar
Nick Piggin committed
1157 1158
	struct inode *inode = mapping->host;
	ssize_t err;
1159
	loff_t endbyte = 0;
Nick Piggin's avatar
Nick Piggin committed
1160

1161 1162 1163 1164 1165 1166
	if (get_fuse_conn(inode)->writeback_cache) {
		/* Update size (EOF optimization) and mode (SUID clearing) */
		err = fuse_update_attributes(mapping->host, NULL, file, NULL);
		if (err)
			return err;

Al Viro's avatar
Al Viro committed
1167
		return generic_file_write_iter(iocb, from);
1168 1169
	}

Al Viro's avatar
Al Viro committed
1170
	inode_lock(inode);
Nick Piggin's avatar
Nick Piggin committed
1171 1172

	/* We can write back this queue in page reclaim */
1173
	current->backing_dev_info = inode_to_bdi(inode);
Nick Piggin's avatar
Nick Piggin committed
1174

1175 1176
	err = generic_write_checks(iocb, from);
	if (err <= 0)
Nick Piggin's avatar
Nick Piggin committed
1177 1178
		goto out;

1179
	err = file_remove_privs(file);
Nick Piggin's avatar
Nick Piggin committed
1180 1181 1182
	if (err)
		goto out;

1183 1184 1185
	err = file_update_time(file);
	if (err)
		goto out;
Nick Piggin's avatar
Nick Piggin committed
1186

1187
	if (iocb->ki_flags & IOCB_DIRECT) {
1188
		loff_t pos = iocb->ki_pos;
1189
		written = generic_file_direct_write(iocb, from);
Al Viro's avatar
Al Viro committed
1190
		if (written < 0 || !iov_iter_count(from))
1191 1192 1193
			goto out;

		pos += written;
Nick Piggin's avatar
Nick Piggin committed
1194

Al Viro's avatar
Al Viro committed
1195
		written_buffered = fuse_perform_write(file, mapping, from, pos);
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		if (written_buffered < 0) {
			err = written_buffered;
			goto out;
		}
		endbyte = pos + written_buffered - 1;

		err = filemap_write_and_wait_range(file->f_mapping, pos,
						   endbyte);
		if (err)
			goto out;

		invalidate_mapping_pages(file->f_mapping,
1208 1209
					 pos >> PAGE_SHIFT,
					 endbyte >> PAGE_SHIFT);
1210 1211 1212 1213

		written += written_buffered;
		iocb->ki_pos = pos + written_buffered;
	} else {
1214
		written = fuse_perform_write(file, mapping, from, iocb->ki_pos);
1215
		if (written >= 0)
1216
			iocb->ki_pos += written;
1217
	}
Nick Piggin's avatar
Nick Piggin committed
1218 1219
out:
	current->backing_dev_info = NULL;
Al Viro's avatar
Al Viro committed
1220
	inode_unlock(inode);
Nick Piggin's avatar
Nick Piggin committed
1221 1222 1223 1224

	return written ? written : err;
}

1225 1226
static inline void fuse_page_descs_length_init(struct fuse_req *req,
		unsigned index, unsigned nr_pages)
1227 1228 1229
{
	int i;

1230
	for (i = index; i < index + nr_pages; i++)
1231 1232 1233 1234
		req->page_descs[i].length = PAGE_SIZE -
			req->page_descs[i].offset;
}

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
{
	return (unsigned long)ii->iov->iov_base + ii->iov_offset;
}

static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
					size_t max_size)
{
	return min(iov_iter_single_seg_count(ii), max_size);
}

1246
static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1247
			       size_t *nbytesp, int write)
Miklos Szeredi's avatar
Miklos Szeredi committed
1248
{
1249
	size_t nbytes = 0;  /* # bytes already packed in req */
1250
	ssize_t ret = 0;
1251

1252
	/* Special case for kernel I/O: can copy directly into the buffer */
Al Viro's avatar
Al Viro committed
1253
	if (ii->type & ITER_KVEC) {
1254 1255 1256
		unsigned long user_addr = fuse_get_user_addr(ii);
		size_t frag_size = fuse_get_frag_size(ii, *nbytesp);

1257 1258 1259 1260 1261
		if (write)
			req->in.args[1].value = (void *) user_addr;
		else
			req->out.args[0].value = (void *) user_addr;

1262 1263
		iov_iter_advance(ii, frag_size);
		*nbytesp = frag_size;
1264 1265
		return 0;
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
1266

1267
	while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
1268
		unsigned npages;
Al Viro's avatar
Al Viro committed
1269
		size_t start;
1270
		ret = iov_iter_get_pages(ii, &req->pages[req->num_pages],
1271
					*nbytesp - nbytes,
1272 1273
					req->max_pages - req->num_pages,
					&start);
1274
		if (ret < 0)
1275
			break;
1276

1277 1278
		iov_iter_advance(ii, ret);
		nbytes += ret;
1279

1280 1281
		ret += start;
		npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
1282

1283
		req->page_descs[req->num_pages].offset = start;
1284 1285 1286 1287
		fuse_page_descs_length_init(req, req->num_pages, npages);

		req->num_pages += npages;
		req->page_descs[req->num_pages - 1].length -=
1288
			(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
1289
	}
1290 1291 1292 1293 1294 1295

	if (write)
		req->in.argpages = 1;
	else
		req->out.argpages = 1;

1296
	*nbytesp = nbytes;
1297

1298
	return ret < 0 ? ret : 0;
Miklos Szeredi's avatar
Miklos Szeredi committed
1299 1300
}

1301 1302
static inline int fuse_iter_npages(const struct iov_iter *ii_p)
{
Al Viro's avatar
Al Viro committed
1303
	return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ);
1304 1305
}

1306 1307
ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
		       loff_t *ppos, int flags)
Miklos Szeredi's avatar
Miklos Szeredi committed
1308
{
1309 1310
	int write = flags & FUSE_DIO_WRITE;
	int cuse = flags & FUSE_DIO_CUSE;
1311
	struct file *file = io->file;
1312
	struct inode *inode = file->f_mapping->host;
1313 1314
	struct fuse_file *ff = file->private_data;
	struct fuse_conn *fc = ff->fc;
Miklos Szeredi's avatar
Miklos Szeredi committed
1315 1316
	size_t nmax = write ? fc->max_write : fc->max_read;
	loff_t pos = *ppos;
1317
	size_t count = iov_iter_count(iter);
1318 1319
	pgoff_t idx_from = pos >> PAGE_SHIFT;
	pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
Miklos Szeredi's avatar
Miklos Szeredi committed
1320
	ssize_t res = 0;
1321
	struct fuse_req *req;
1322
	int err = 0;
1323

1324
	if (io->async)
1325
		req = fuse_get_req_for_background(fc, fuse_iter_npages(iter));
1326
	else
1327
		req = fuse_get_req(fc, fuse_iter_npages(iter));
1328 1329
	if (IS_ERR(req))
		return PTR_ERR(req);
Miklos Szeredi's avatar
Miklos Szeredi committed
1330

1331 1332
	if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
		if (!write)
Al Viro's avatar
Al Viro committed
1333
			inode_lock(inode);
1334 1335
		fuse_sync_writes(inode);
		if (!write)
Al Viro's avatar
Al Viro committed
1336
			inode_unlock(inode);
1337 1338
	}

Miklos Szeredi's avatar
Miklos Szeredi committed
1339 1340
	while (count) {
		size_t nres;
1341
		fl_owner_t owner = current->files;
1342
		size_t nbytes = min(count, nmax);
1343 1344
		err = fuse_get_user_pages(req, iter, &nbytes, write);
		if (err && !nbytes)
Miklos Szeredi's avatar
Miklos Szeredi committed
1345
			break;
1346

Miklos Szeredi's avatar
Miklos Szeredi committed
1347
		if (write)
1348
			nres = fuse_send_write(req, io, pos, nbytes, owner);
Miklos Szeredi's avatar
Miklos Szeredi committed
1349
		else
1350
			nres = fuse_send_read(req, io, pos, nbytes, owner);
1351

1352 1353
		if (!io->async)
			fuse_release_user_pages(req, !write);
Miklos Szeredi's avatar
Miklos Szeredi committed
1354
		if (req->out.h.error) {
1355
			err = req->out.h.error;
Miklos Szeredi's avatar
Miklos Szeredi committed
1356 1357
			break;
		} else if (nres > nbytes) {
1358 1359
			res = 0;
			err = -EIO;
Miklos Szeredi's avatar
Miklos Szeredi committed
1360 1361 1362 1363 1364 1365 1366
			break;
		}
		count -= nres;
		res += nres;
		pos += nres;
		if (nres != nbytes)
			break;
1367 1368
		if (count) {
			fuse_put_request(fc, req);
1369 1370
			if (io->async)
				req = fuse_get_req_for_background(fc,
1371
					fuse_iter_npages(iter));
1372
			else
1373
				req = fuse_get_req(fc, fuse_iter_npages(iter));
1374 1375 1376
			if (IS_ERR(req))
				break;
		}
Miklos Szeredi's avatar
Miklos Szeredi committed
1377
	}
1378 1379
	if (!IS_ERR(req))
		fuse_put_request(fc, req);
1380
	if (res > 0)
Miklos Szeredi's avatar
Miklos Szeredi committed
1381 1382
		*ppos = pos;

1383
	return res > 0 ? res : err;
Miklos Szeredi's avatar
Miklos Szeredi committed
1384
}
1385
EXPORT_SYMBOL_GPL(fuse_direct_io);
Miklos Szeredi's avatar
Miklos Szeredi committed
1386

1387
static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1388 1389
				  struct iov_iter *iter,
				  loff_t *ppos)
Miklos Szeredi's avatar
Miklos Szeredi committed
1390
{
1391
	ssize_t res;
1392
	struct file *file = io->file;
1393
	struct inode *inode = file_inode(file);
1394 1395 1396 1397

	if (is_bad_inode(inode))
		return -EIO;

1398
	res = fuse_direct_io(io, iter, ppos, 0);
1399 1400 1401 1402

	fuse_invalidate_attr(inode);

	return res;
Miklos Szeredi's avatar
Miklos Szeredi committed
1403 1404
}

1405
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
1406
{
1407
	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
1408
	return __fuse_direct_read(&io, to, &iocb->ki_pos);
1409 1410
}

1411
static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
1412
{
1413
	struct file *file = iocb->ki_filp;
1414
	struct inode *inode = file_inode(file);
1415
	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
1416
	ssize_t res;
1417 1418 1419 1420 1421

	if (is_bad_inode(inode))
		return -EIO;

	/* Don't allow parallel writes to the same file */
Al Viro's avatar
Al Viro committed
1422
	inode_lock(inode);
1423 1424
	res = generic_write_checks(iocb, from);
	if (res > 0)
1425 1426
		res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
	fuse_invalidate_attr(inode);
1427
	if (res > 0)
1428
		fuse_write_update_size(inode, iocb->ki_pos);
Al Viro's avatar
Al Viro committed
1429
	inode_unlock(inode);
1430 1431 1432 1433

	return res;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
1434
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1435
{
1436 1437 1438 1439
	int i;

	for (i = 0; i < req->num_pages; i++)
		__free_page(req->pages[i]);
1440 1441 1442

	if (req->ff)
		fuse_file_put(req->ff, false);
Miklos Szeredi's avatar
Miklos Szeredi committed
1443 1444 1445 1446 1447 1448
}

static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
{
	struct inode *inode = req->inode;
	struct fuse_inode *fi = get_fuse_inode(inode);
1449
	struct backing_dev_info *bdi = inode_to_bdi(inode);
1450
	int i;
Miklos Szeredi's avatar
Miklos Szeredi committed
1451 1452

	list_del(&req->writepages_entry);
1453
	for (i = 0; i < req->num_pages; i++) {
1454
		dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1455
		dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
1456
		wb_writeout_inc(&bdi->wb);
1457
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
1458 1459 1460 1461
	wake_up(&fi->page_waitq);
}

/* Called under fc->lock, may release and reacquire it */
1462 1463
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
				loff_t size)
Miklos Szeredi's avatar
Miklos Szeredi committed
1464 1465
__releases(fc->lock)
__acquires(fc->lock)
Miklos Szeredi's avatar
Miklos Szeredi committed
1466 1467 1468
{
	struct fuse_inode *fi = get_fuse_inode(req->inode);
	struct fuse_write_in *inarg = &req->misc.write.in;
1469
	__u64 data_size = req->num_pages * PAGE_SIZE;
Miklos Szeredi's avatar
Miklos Szeredi committed
1470 1471 1472 1473

	if (!fc->connected)
		goto out_free;

1474 1475
	if (inarg->offset + data_size <= size) {
		inarg->size = data_size;
Miklos Szeredi's avatar
Miklos Szeredi committed
1476
	} else if (inarg->offset < size) {
1477
		inarg->size = size - inarg->offset;
Miklos Szeredi's avatar
Miklos Szeredi committed
1478 1479 1480
	} else {
		/* Got truncated off completely */
		goto out_free;
1481
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
1482 1483 1484

	req->in.args[1].size = inarg->size;
	fi->writectr++;
1485
	fuse_request_send_background_locked(fc, req);
Miklos Szeredi's avatar
Miklos Szeredi committed
1486 1487 1488 1489 1490 1491
	return;

 out_free:
	fuse_writepage_finish(fc, req);
	spin_unlock(&fc->lock);
	fuse_writepage_free(fc, req);
1492
	fuse_put_request(fc, req);
Miklos Szeredi's avatar
Miklos Szeredi committed
1493
	spin_lock(&fc->lock);
1494 1495
}

Miklos Szeredi's avatar
Miklos Szeredi committed
1496 1497 1498 1499 1500 1501 1502
/*
 * If fi->writectr is positive (no truncate or fsync going on) send
 * all queued writepage requests.
 *
 * Called with fc->lock
 */
void fuse_flush_writepages(struct inode *inode)
Miklos Szeredi's avatar
Miklos Szeredi committed
1503 1504
__releases(fc->lock)
__acquires(fc->lock)
1505
{
Miklos Szeredi's avatar
Miklos Szeredi committed
1506 1507
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
1508
	size_t crop = i_size_read(inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
1509 1510 1511 1512 1513
	struct fuse_req *req;

	while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
		req = list_entry(fi->queued_writes.next, struct fuse_req, list);
		list_del_init(&req->list);
1514
		fuse_send_writepage(fc, req, crop);
Miklos Szeredi's avatar
Miklos Szeredi committed
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
	}
}

static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
{
	struct inode *inode = req->inode;
	struct fuse_inode *fi = get_fuse_inode(inode);

	mapping_set_error(inode->i_mapping, req->out.h.error);
	spin_lock(&fc->lock);
1525
	while (req->misc.write.next) {
1526 1527
		struct fuse_conn *fc = get_fuse_conn(inode);
		struct fuse_write_in *inarg = &req->misc.write.in;
1528 1529 1530
		struct fuse_req *next = req->misc.write.next;
		req->misc.write.next = next->misc.write.next;
		next->misc.write.next = NULL;
1531
		next->ff = fuse_file_get(req->ff);
1532
		list_add(&next->writepages_entry, &fi->writepages);
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557

		/*
		 * Skip fuse_flush_writepages() to make it easy to crop requests
		 * based on primary request size.
		 *
		 * 1st case (trivial): there are no concurrent activities using
		 * fuse_set/release_nowrite.  Then we're on safe side because
		 * fuse_flush_writepages() would call fuse_send_writepage()
		 * anyway.
		 *
		 * 2nd case: someone called fuse_set_nowrite and it is waiting
		 * now for completion of all in-flight requests.  This happens
		 * rarely and no more than once per page, so this should be
		 * okay.
		 *
		 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
		 * of fuse_set_nowrite..fuse_release_nowrite section.  The fact
		 * that fuse_set_nowrite returned implies that all in-flight
		 * requests were completed along with all of their secondary
		 * requests.  Further primary requests are blocked by negative
		 * writectr.  Hence there cannot be any in-flight requests and
		 * no invocations of fuse_writepage_end() while we're in
		 * fuse_set_nowrite..fuse_release_nowrite section.
		 */
		fuse_send_writepage(fc, next, inarg->offset + inarg->size);
1558
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
1559 1560 1561 1562 1563 1564
	fi->writectr--;
	fuse_writepage_finish(fc, req);
	spin_unlock(&fc->lock);
	fuse_writepage_free(fc, req);
}

Miklos Szeredi's avatar
Miklos Szeredi committed
1565 1566
static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
					       struct fuse_inode *fi)
1567
{
1568
	struct fuse_file *ff = NULL;
1569 1570

	spin_lock(&fc->lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
1571
	if (!list_empty(&fi->write_files)) {
1572 1573 1574 1575
		ff = list_entry(fi->write_files.next, struct fuse_file,
				write_entry);
		fuse_file_get(ff);
	}
1576 1577 1578 1579 1580
	spin_unlock(&fc->lock);

	return ff;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
					     struct fuse_inode *fi)
{
	struct fuse_file *ff = __fuse_write_file_get(fc, fi);
	WARN_ON(!ff);
	return ff;
}

int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
{
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
	struct fuse_file *ff;
	int err;

	ff = __fuse_write_file_get(fc, fi);
1597
	err = fuse_flush_times(inode, ff);
Miklos Szeredi's avatar
Miklos Szeredi committed
1598 1599 1600 1601 1602 1603
	if (ff)
		fuse_file_put(ff, 0);

	return err;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
1604 1605 1606 1607 1608 1609 1610 1611
static int fuse_writepage_locked(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode = mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
	struct fuse_req *req;
	struct page *tmp_page;
1612
	int error = -ENOMEM;
Miklos Szeredi's avatar
Miklos Szeredi committed
1613 1614 1615

	set_page_writeback(page);

1616
	req = fuse_request_alloc_nofs(1);
Miklos Szeredi's avatar
Miklos Szeredi committed
1617 1618 1619
	if (!req)
		goto err;

Miklos Szeredi's avatar
Miklos Szeredi committed
1620 1621
	/* writeback always goes to bg_queue */
	__set_bit(FR_BACKGROUND, &req->flags);
Miklos Szeredi's avatar
Miklos Szeredi committed
1622 1623 1624 1625
	tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
	if (!tmp_page)
		goto err_free;

1626
	error = -EIO;
1627
	req->ff = fuse_write_file_get(fc, fi);
1628
	if (!req->ff)
1629
		goto err_nofile;
1630

1631
	fuse_write_fill(req, req->ff, page_offset(page), 0);
Miklos Szeredi's avatar
Miklos Szeredi committed
1632 1633

	copy_highpage(tmp_page, page);
1634
	req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1635
	req->misc.write.next = NULL;
1636
	req->in.argpages = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
1637 1638
	req->num_pages = 1;
	req->pages[0] = tmp_page;
1639
	req->page_descs[0].offset = 0;
1640
	req->page_descs[0].length = PAGE_SIZE;
Miklos Szeredi's avatar
Miklos Szeredi committed
1641 1642 1643
	req->end = fuse_writepage_end;
	req->inode = inode;

1644
	inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
Miklos Szeredi's avatar
Miklos Szeredi committed
1645 1646 1647 1648 1649 1650 1651 1652
	inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);

	spin_lock(&fc->lock);
	list_add(&req->writepages_entry, &fi->writepages);
	list_add_tail(&req->list, &fi->queued_writes);
	fuse_flush_writepages(inode);
	spin_unlock(&fc->lock);

1653 1654
	end_page_writeback(page);

Miklos Szeredi's avatar
Miklos Szeredi committed
1655 1656
	return 0;

1657 1658
err_nofile:
	__free_page(tmp_page);
Miklos Szeredi's avatar
Miklos Szeredi committed
1659 1660 1661 1662
err_free:
	fuse_request_free(req);
err:
	end_page_writeback(page);
1663
	return error;
Miklos Szeredi's avatar
Miklos Szeredi committed
1664 1665 1666 1667 1668 1669
}

static int fuse_writepage(struct page *page, struct writeback_control *wbc)
{
	int err;

1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
	if (fuse_page_is_writeback(page->mapping->host, page->index)) {
		/*
		 * ->writepages() should be called for sync() and friends.  We
		 * should only get here on direct reclaim and then we are
		 * allowed to skip a page which is already in flight
		 */
		WARN_ON(wbc->sync_mode == WB_SYNC_ALL);

		redirty_page_for_writepage(wbc, page);
		return 0;
	}

Miklos Szeredi's avatar
Miklos Szeredi committed
1682 1683 1684 1685 1686 1687
	err = fuse_writepage_locked(page);
	unlock_page(page);

	return err;
}

1688 1689 1690 1691
struct fuse_fill_wb_data {
	struct fuse_req *req;
	struct fuse_file *ff;
	struct inode *inode;
1692
	struct page **orig_pages;
1693 1694 1695 1696 1697 1698 1699 1700
};

static void fuse_writepages_send(struct fuse_fill_wb_data *data)
{
	struct fuse_req *req = data->req;
	struct inode *inode = data->inode;
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
1701 1702
	int num_pages = req->num_pages;
	int i;
1703 1704 1705 1706 1707 1708

	req->ff = fuse_file_get(data->ff);
	spin_lock(&fc->lock);
	list_add_tail(&req->list, &fi->queued_writes);
	fuse_flush_writepages(inode);
	spin_unlock(&fc->lock);
1709 1710 1711

	for (i = 0; i < num_pages; i++)
		end_page_writeback(data->orig_pages[i]);
1712 1713
}

1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729
static bool fuse_writepage_in_flight(struct fuse_req *new_req,
				     struct page *page)
{
	struct fuse_conn *fc = get_fuse_conn(new_req->inode);
	struct fuse_inode *fi = get_fuse_inode(new_req->inode);
	struct fuse_req *tmp;
	struct fuse_req *old_req;
	bool found = false;
	pgoff_t curr_index;

	BUG_ON(new_req->num_pages != 0);

	spin_lock(&fc->lock);
	list_del(&new_req->writepages_entry);
	list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
		BUG_ON(old_req->inode != new_req->inode);
1730
		curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
1731 1732 1733 1734 1735 1736
		if (curr_index <= page->index &&
		    page->index < curr_index + old_req->num_pages) {
			found = true;
			break;
		}
	}
1737 1738
	if (!found) {
		list_add(&new_req->writepages_entry, &fi->writepages);
1739
		goto out_unlock;
1740
	}
1741

1742
	new_req->num_pages = 1;
1743 1744
	for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
		BUG_ON(tmp->inode != new_req->inode);
1745
		curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
1746 1747 1748 1749 1750 1751
		if (tmp->num_pages == 1 &&
		    curr_index == page->index) {
			old_req = tmp;
		}
	}

Miklos Szeredi's avatar
Miklos Szeredi committed
1752
	if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
1753
		struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
1754

1755 1756 1757
		copy_highpage(old_req->pages[0], page);
		spin_unlock(&fc->lock);

1758
		dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1759
		dec_zone_page_state(page, NR_WRITEBACK_TEMP);
1760
		wb_writeout_inc(&bdi->wb);
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
		fuse_writepage_free(fc, new_req);
		fuse_request_free(new_req);
		goto out;
	} else {
		new_req->misc.write.next = old_req->misc.write.next;
		old_req->misc.write.next = new_req;
	}
out_unlock:
	spin_unlock(&fc->lock);
out:
	return found;
}

1774 1775 1776 1777 1778 1779 1780 1781
static int fuse_writepages_fill(struct page *page,
		struct writeback_control *wbc, void *_data)
{
	struct fuse_fill_wb_data *data = _data;
	struct fuse_req *req = data->req;
	struct inode *inode = data->inode;
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct page *tmp_page;
1782
	bool is_writeback;
1783 1784 1785 1786 1787 1788 1789 1790 1791
	int err;

	if (!data->ff) {
		err = -EIO;
		data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
		if (!data->ff)
			goto out_unlock;
	}

1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
	/*
	 * Being under writeback is unlikely but possible.  For example direct
	 * read to an mmaped fuse file will set the page dirty twice; once when
	 * the pages are faulted with get_user_pages(), and then after the read
	 * completed.
	 */
	is_writeback = fuse_page_is_writeback(inode, page->index);

	if (req && req->num_pages &&
	    (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
1802
	     (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
1803 1804 1805
	     data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
		fuse_writepages_send(data);
		data->req = NULL;
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
	}
	err = -ENOMEM;
	tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
	if (!tmp_page)
		goto out_unlock;

	/*
	 * The page must not be redirtied until the writeout is completed
	 * (i.e. userspace has sent a reply to the write request).  Otherwise
	 * there could be more than one temporary page instance for each real
	 * page.
	 *
	 * This is ensured by holding the page lock in page_mkwrite() while
	 * checking fuse_page_is_writeback().  We already hold the page lock
	 * since clear_page_dirty_for_io() and keep it held until we add the
	 * request to the fi->writepages list and increment req->num_pages.
	 * After this fuse_page_is_writeback() will indicate that the page is
	 * under writeback, so we can release the page lock.
	 */
	if (data->req == NULL) {
		struct fuse_inode *fi = get_fuse_inode(inode);

		err = -ENOMEM;
		req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
		if (!req) {
			__free_page(tmp_page);
			goto out_unlock;
		}

		fuse_write_fill(req, data->ff, page_offset(page), 0);
		req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1837
		req->misc.write.next = NULL;
1838
		req->in.argpages = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
1839
		__set_bit(FR_BACKGROUND, &req->flags);
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
		req->num_pages = 0;
		req->end = fuse_writepage_end;
		req->inode = inode;

		spin_lock(&fc->lock);
		list_add(&req->writepages_entry, &fi->writepages);
		spin_unlock(&fc->lock);

		data->req = req;
	}
	set_page_writeback(page);

	copy_highpage(tmp_page, page);
	req->pages[req->num_pages] = tmp_page;
	req->page_descs[req->num_pages].offset = 0;
	req->page_descs[req->num_pages].length = PAGE_SIZE;

1857
	inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
1858
	inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1859 1860 1861 1862 1863 1864 1865

	err = 0;
	if (is_writeback && fuse_writepage_in_flight(req, page)) {
		end_page_writeback(page);
		data->req = NULL;
		goto out_unlock;
	}
1866
	data->orig_pages[req->num_pages] = page;
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896

	/*
	 * Protected by fc->lock against concurrent access by
	 * fuse_page_is_writeback().
	 */
	spin_lock(&fc->lock);
	req->num_pages++;
	spin_unlock(&fc->lock);

out_unlock:
	unlock_page(page);

	return err;
}

static int fuse_writepages(struct address_space *mapping,
			   struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
	struct fuse_fill_wb_data data;
	int err;

	err = -EIO;
	if (is_bad_inode(inode))
		goto out;

	data.inode = inode;
	data.req = NULL;
	data.ff = NULL;

1897
	err = -ENOMEM;
1898 1899
	data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
				  sizeof(struct page *),
1900 1901 1902 1903
				  GFP_NOFS);
	if (!data.orig_pages)
		goto out;

1904 1905 1906 1907 1908 1909 1910 1911 1912
	err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
	if (data.req) {
		/* Ignore errors if we can write at least one page */
		BUG_ON(!data.req->num_pages);
		fuse_writepages_send(&data);
		err = 0;
	}
	if (data.ff)
		fuse_file_put(data.ff, false);
1913 1914

	kfree(data.orig_pages);
1915 1916 1917 1918
out:
	return err;
}

1919 1920 1921 1922 1923 1924 1925 1926
/*
 * It's worthy to make sure that space is reserved on disk for the write,
 * but how to implement it without killing performance need more thinking.
 */
static int fuse_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
1927
	pgoff_t index = pos >> PAGE_SHIFT;
Al Viro's avatar
Al Viro committed
1928
	struct fuse_conn *fc = get_fuse_conn(file_inode(file));
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
	struct page *page;
	loff_t fsize;
	int err = -ENOMEM;

	WARN_ON(!fc->writeback_cache);

	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page)
		goto error;

	fuse_wait_on_page_writeback(mapping->host, page->index);

1941
	if (PageUptodate(page) || len == PAGE_SIZE)
1942 1943 1944 1945 1946 1947
		goto success;
	/*
	 * Check if the start this page comes after the end of file, in which
	 * case the readpage can be optimized away.
	 */
	fsize = i_size_read(mapping->host);
1948 1949
	if (fsize <= (pos & PAGE_MASK)) {
		size_t off = pos & ~PAGE_MASK;
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
		if (off)
			zero_user_segment(page, 0, off);
		goto success;
	}
	err = fuse_do_readpage(file, page);
	if (err)
		goto cleanup;
success:
	*pagep = page;
	return 0;

cleanup:
	unlock_page(page);
1963
	put_page(page);
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
error:
	return err;
}

static int fuse_write_end(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned copied,
		struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

	if (!PageUptodate(page)) {
		/* Zero any unwritten bytes at the end of the page */
1976
		size_t endoff = (pos + copied) & ~PAGE_MASK;
1977
		if (endoff)
1978
			zero_user_segment(page, endoff, PAGE_SIZE);
1979 1980 1981 1982 1983 1984
		SetPageUptodate(page);
	}

	fuse_write_update_size(inode, pos + copied);
	set_page_dirty(page);
	unlock_page(page);
1985
	put_page(page);
1986 1987 1988 1989

	return copied;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
static int fuse_launder_page(struct page *page)
{
	int err = 0;
	if (clear_page_dirty_for_io(page)) {
		struct inode *inode = page->mapping->host;
		err = fuse_writepage_locked(page);
		if (!err)
			fuse_wait_on_page_writeback(inode, page->index);
	}
	return err;
}

/*
 * Write back dirty pages now, because there may not be any suitable
 * open files later
 */
static void fuse_vma_close(struct vm_area_struct *vma)
{
	filemap_write_and_wait(vma->vm_file->f_mapping);
}

/*
 * Wait for writeback against this page to complete before allowing it
 * to be marked dirty again, and hence written back again, possibly
 * before the previous writepage completed.
 *
 * Block here, instead of in ->writepage(), so that the userspace fs
 * can only block processes actually operating on the filesystem.
 *
 * Otherwise unprivileged userspace fs would be able to block
 * unrelated:
 *
 * - page migration
 * - sync(2)
 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
 */
2026
static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
Miklos Szeredi's avatar
Miklos Szeredi committed
2027
{
2028
	struct page *page = vmf->page;
Miklos Szeredi's avatar
Miklos Szeredi committed
2029 2030 2031 2032 2033 2034 2035 2036
	struct inode *inode = file_inode(vma->vm_file);

	file_update_time(vma->vm_file);
	lock_page(page);
	if (page->mapping != inode->i_mapping) {
		unlock_page(page);
		return VM_FAULT_NOPAGE;
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
2037 2038

	fuse_wait_on_page_writeback(inode, page->index);
Miklos Szeredi's avatar
Miklos Szeredi committed
2039
	return VM_FAULT_LOCKED;
Miklos Szeredi's avatar
Miklos Szeredi committed
2040 2041
}

2042
static const struct vm_operations_struct fuse_file_vm_ops = {
Miklos Szeredi's avatar
Miklos Szeredi committed
2043 2044
	.close		= fuse_vma_close,
	.fault		= filemap_fault,
2045
	.map_pages	= filemap_map_pages,
Miklos Szeredi's avatar
Miklos Szeredi committed
2046 2047 2048 2049 2050
	.page_mkwrite	= fuse_page_mkwrite,
};

static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
2051 2052 2053
	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
		fuse_link_write_file(file);

Miklos Szeredi's avatar
Miklos Szeredi committed
2054 2055
	file_accessed(file);
	vma->vm_ops = &fuse_file_vm_ops;
2056 2057 2058
	return 0;
}

2059 2060 2061 2062 2063 2064
static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
{
	/* Can't provide the coherency needed for MAP_SHARED */
	if (vma->vm_flags & VM_MAYSHARE)
		return -ENODEV;

2065 2066
	invalidate_inode_pages2(file->f_mapping);

2067 2068 2069
	return generic_file_mmap(file, vma);
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
				  struct file_lock *fl)
{
	switch (ffl->type) {
	case F_UNLCK:
		break;

	case F_RDLCK:
	case F_WRLCK:
		if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
		    ffl->end < ffl->start)
			return -EIO;

		fl->fl_start = ffl->start;
		fl->fl_end = ffl->end;
		fl->fl_pid = ffl->pid;
		break;

	default:
		return -EIO;
	}
	fl->fl_type = ffl->type;
	return 0;
}

2095
static void fuse_lk_fill(struct fuse_args *args, struct file *file,
2096
			 const struct file_lock *fl, int opcode, pid_t pid,
2097
			 int flock, struct fuse_lk_in *inarg)
2098
{
2099
	struct inode *inode = file_inode(file);
2100
	struct fuse_conn *fc = get_fuse_conn(inode);
2101
	struct fuse_file *ff = file->private_data;
2102 2103 2104 2105 2106 2107 2108 2109

	memset(inarg, 0, sizeof(*inarg));
	inarg->fh = ff->fh;
	inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
	inarg->lk.start = fl->fl_start;
	inarg->lk.end = fl->fl_end;
	inarg->lk.type = fl->fl_type;
	inarg->lk.pid = pid;
2110
	if (flock)
2111 2112 2113 2114 2115 2116
		inarg->lk_flags |= FUSE_LK_FLOCK;
	args->in.h.opcode = opcode;
	args->in.h.nodeid = get_node_id(inode);
	args->in.numargs = 1;
	args->in.args[0].size = sizeof(*inarg);
	args->in.args[0].value = inarg;
2117 2118 2119 2120
}

static int fuse_getlk(struct file *file, struct file_lock *fl)
{
2121
	struct inode *inode = file_inode(file);
2122
	struct fuse_conn *fc = get_fuse_conn(inode);
2123 2124
	FUSE_ARGS(args);
	struct fuse_lk_in inarg;
2125 2126 2127
	struct fuse_lk_out outarg;
	int err;

2128 2129 2130 2131 2132
	fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
	args.out.numargs = 1;
	args.out.args[0].size = sizeof(outarg);
	args.out.args[0].value = &outarg;
	err = fuse_simple_request(fc, &args);
2133 2134 2135 2136 2137 2138
	if (!err)
		err = convert_fuse_file_lock(&outarg.lk, fl);

	return err;
}

2139
static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
2140
{
2141
	struct inode *inode = file_inode(file);
2142
	struct fuse_conn *fc = get_fuse_conn(inode);
2143 2144
	FUSE_ARGS(args);
	struct fuse_lk_in inarg;
2145 2146 2147 2148
	int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
	pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
	int err;

2149
	if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
Miklos Szeredi's avatar
Miklos Szeredi committed
2150 2151 2152 2153
		/* NLM needs asynchronous locks, which we don't support yet */
		return -ENOLCK;
	}

2154 2155 2156 2157
	/* Unlock on close is handled by the flush method */
	if (fl->fl_flags & FL_CLOSE)
		return 0;

2158 2159
	fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg);
	err = fuse_simple_request(fc, &args);
2160

2161 2162 2163
	/* locking is restartable */
	if (err == -EINTR)
		err = -ERESTARTSYS;
2164

2165 2166 2167 2168 2169
	return err;
}

static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
{
2170
	struct inode *inode = file_inode(file);
2171 2172 2173
	struct fuse_conn *fc = get_fuse_conn(inode);
	int err;

Miklos Szeredi's avatar
Miklos Szeredi committed
2174 2175 2176
	if (cmd == F_CANCELLK) {
		err = 0;
	} else if (cmd == F_GETLK) {
2177
		if (fc->no_lock) {
2178
			posix_test_lock(file, fl);
2179 2180 2181 2182 2183
			err = 0;
		} else
			err = fuse_getlk(file, fl);
	} else {
		if (fc->no_lock)
Miklos Szeredi's avatar
Miklos Szeredi committed
2184
			err = posix_lock_file(file, fl, NULL);
2185
		else
2186
			err = fuse_setlk(file, fl, 0);
2187 2188 2189 2190
	}
	return err;
}

2191 2192
static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
{
2193
	struct inode *inode = file_inode(file);
2194 2195 2196
	struct fuse_conn *fc = get_fuse_conn(inode);
	int err;

Miklos Szeredi's avatar
Miklos Szeredi committed
2197
	if (fc->no_flock) {
2198
		err = locks_lock_file_wait(file, fl);
2199
	} else {
Miklos Szeredi's avatar
Miklos Szeredi committed
2200 2201
		struct fuse_file *ff = file->private_data;

2202
		/* emulate flock with POSIX locks */
Miklos Szeredi's avatar
Miklos Szeredi committed
2203
		ff->flock = true;
2204 2205 2206 2207 2208 2209
		err = fuse_setlk(file, fl, 1);
	}

	return err;
}

2210 2211 2212 2213
static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
{
	struct inode *inode = mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
2214
	FUSE_ARGS(args);
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
	struct fuse_bmap_in inarg;
	struct fuse_bmap_out outarg;
	int err;

	if (!inode->i_sb->s_bdev || fc->no_bmap)
		return 0;

	memset(&inarg, 0, sizeof(inarg));
	inarg.block = block;
	inarg.blocksize = inode->i_sb->s_blocksize;
2225 2226 2227 2228 2229 2230 2231 2232 2233
	args.in.h.opcode = FUSE_BMAP;
	args.in.h.nodeid = get_node_id(inode);
	args.in.numargs = 1;
	args.in.args[0].size = sizeof(inarg);
	args.in.args[0].value = &inarg;
	args.out.numargs = 1;
	args.out.args[0].size = sizeof(outarg);
	args.out.args[0].value = &outarg;
	err = fuse_simple_request(fc, &args);
2234 2235 2236 2237 2238 2239
	if (err == -ENOSYS)
		fc->no_bmap = 1;

	return err ? 0 : outarg.block;
}

2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_file *ff = file->private_data;
	FUSE_ARGS(args);
	struct fuse_lseek_in inarg = {
		.fh = ff->fh,
		.offset = offset,
		.whence = whence
	};
	struct fuse_lseek_out outarg;
	int err;

	if (fc->no_lseek)
		goto fallback;

	args.in.h.opcode = FUSE_LSEEK;
	args.in.h.nodeid = ff->nodeid;
	args.in.numargs = 1;
	args.in.args[0].size = sizeof(inarg);
	args.in.args[0].value = &inarg;
	args.out.numargs = 1;
	args.out.args[0].size = sizeof(outarg);
	args.out.args[0].value = &outarg;
	err = fuse_simple_request(fc, &args);
	if (err) {
		if (err == -ENOSYS) {
			fc->no_lseek = 1;
			goto fallback;
		}
		return err;
	}

	return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);

fallback:
	err = fuse_update_attributes(inode, NULL, file, NULL);
	if (!err)
		return generic_file_llseek(file, offset, whence);
	else
		return err;
}

2284
static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
Miklos Szeredi's avatar
Miklos Szeredi committed
2285 2286
{
	loff_t retval;
2287
	struct inode *inode = file_inode(file);
Miklos Szeredi's avatar
Miklos Szeredi committed
2288

2289 2290 2291 2292
	switch (whence) {
	case SEEK_SET:
	case SEEK_CUR:
		 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2293
		retval = generic_file_llseek(file, offset, whence);
2294 2295
		break;
	case SEEK_END:
Al Viro's avatar
Al Viro committed
2296
		inode_lock(inode);
2297 2298 2299
		retval = fuse_update_attributes(inode, NULL, file, NULL);
		if (!retval)
			retval = generic_file_llseek(file, offset, whence);
Al Viro's avatar
Al Viro committed
2300
		inode_unlock(inode);
2301 2302 2303
		break;
	case SEEK_HOLE:
	case SEEK_DATA:
Al Viro's avatar
Al Viro committed
2304
		inode_lock(inode);
2305
		retval = fuse_lseek(file, offset, whence);
Al Viro's avatar
Al Viro committed
2306
		inode_unlock(inode);
2307 2308 2309 2310
		break;
	default:
		retval = -EINVAL;
	}
2311

Miklos Szeredi's avatar
Miklos Szeredi committed
2312 2313 2314
	return retval;
}

Tejun Heo's avatar
Tejun Heo committed
2315 2316 2317 2318 2319 2320 2321 2322 2323
static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
			unsigned int nr_segs, size_t bytes, bool to_user)
{
	struct iov_iter ii;
	int page_idx = 0;

	if (!bytes)
		return 0;

Al Viro's avatar
Al Viro committed
2324
	iov_iter_init(&ii, to_user ? READ : WRITE, iov, nr_segs, bytes);
Tejun Heo's avatar
Tejun Heo committed
2325 2326 2327 2328

	while (iov_iter_count(&ii)) {
		struct page *page = pages[page_idx++];
		size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
2329
		void *kaddr;
Tejun Heo's avatar
Tejun Heo committed
2330

2331
		kaddr = kmap(page);
Tejun Heo's avatar
Tejun Heo committed
2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351

		while (todo) {
			char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
			size_t iov_len = ii.iov->iov_len - ii.iov_offset;
			size_t copy = min(todo, iov_len);
			size_t left;

			if (!to_user)
				left = copy_from_user(kaddr, uaddr, copy);
			else
				left = copy_to_user(uaddr, kaddr, copy);

			if (unlikely(left))
				return -EFAULT;

			iov_iter_advance(&ii, copy);
			todo -= copy;
			kaddr += copy;
		}

2352
		kunmap(page);
Tejun Heo's avatar
Tejun Heo committed
2353 2354 2355 2356 2357
	}

	return 0;
}

2358 2359 2360 2361 2362 2363
/*
 * CUSE servers compiled on 32bit broke on 64bit kernels because the
 * ABI was defined to be 'struct iovec' which is different on 32bit
 * and 64bit.  Fortunately we can determine which structure the server
 * used from the size of the reply.
 */
Miklos Szeredi's avatar
Miklos Szeredi committed
2364 2365 2366
static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
				     size_t transferred, unsigned count,
				     bool is_compat)
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
{
#ifdef CONFIG_COMPAT
	if (count * sizeof(struct compat_iovec) == transferred) {
		struct compat_iovec *ciov = src;
		unsigned i;

		/*
		 * With this interface a 32bit server cannot support
		 * non-compat (i.e. ones coming from 64bit apps) ioctl
		 * requests
		 */
		if (!is_compat)
			return -EINVAL;

		for (i = 0; i < count; i++) {
			dst[i].iov_base = compat_ptr(ciov[i].iov_base);
			dst[i].iov_len = ciov[i].iov_len;
		}
		return 0;
	}
#endif

	if (count * sizeof(struct iovec) != transferred)
		return -EIO;

	memcpy(dst, src, transferred);
	return 0;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
2396 2397 2398 2399 2400 2401
/* Make sure iov_length() won't overflow */
static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
{
	size_t n;
	u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;

2402
	for (n = 0; n < count; n++, iov++) {
Miklos Szeredi's avatar
Miklos Szeredi committed
2403 2404 2405 2406 2407 2408 2409
		if (iov->iov_len > (size_t) max)
			return -ENOMEM;
		max -= iov->iov_len;
	}
	return 0;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
				 void *src, size_t transferred, unsigned count,
				 bool is_compat)
{
	unsigned i;
	struct fuse_ioctl_iovec *fiov = src;

	if (fc->minor < 16) {
		return fuse_copy_ioctl_iovec_old(dst, src, transferred,
						 count, is_compat);
	}

	if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
		return -EIO;

	for (i = 0; i < count; i++) {
		/* Did the server supply an inappropriate value? */
		if (fiov[i].base != (unsigned long) fiov[i].base ||
		    fiov[i].len != (unsigned long) fiov[i].len)
			return -EIO;

		dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
		dst[i].iov_len = (size_t) fiov[i].len;

#ifdef CONFIG_COMPAT
		if (is_compat &&
		    (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
		     (compat_size_t) dst[i].iov_len != fiov[i].len))
			return -EIO;
#endif
	}

	return 0;
}


Tejun Heo's avatar
Tejun Heo committed
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
/*
 * For ioctls, there is no generic way to determine how much memory
 * needs to be read and/or written.  Furthermore, ioctls are allowed
 * to dereference the passed pointer, so the parameter requires deep
 * copying but FUSE has no idea whatsoever about what to copy in or
 * out.
 *
 * This is solved by allowing FUSE server to retry ioctl with
 * necessary in/out iovecs.  Let's assume the ioctl implementation
 * needs to read in the following structure.
 *
 * struct a {
 *	char	*buf;
 *	size_t	buflen;
 * }
 *
 * On the first callout to FUSE server, inarg->in_size and
 * inarg->out_size will be NULL; then, the server completes the ioctl
 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
 * the actual iov array to
 *
 * { { .iov_base = inarg.arg,	.iov_len = sizeof(struct a) } }
 *
 * which tells FUSE to copy in the requested area and retry the ioctl.
 * On the second round, the server has access to the structure and
 * from that it can tell what to look for next, so on the invocation,
 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
 *
 * { { .iov_base = inarg.arg,	.iov_len = sizeof(struct a)	},
 *   { .iov_base = a.buf,	.iov_len = a.buflen		} }
 *
 * FUSE will copy both struct a and the pointed buffer from the
 * process doing the ioctl and retry ioctl with both struct a and the
 * buffer.
 *
 * This time, FUSE server has everything it needs and completes ioctl
 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
 *
 * Copying data out works the same way.
 *
 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
 * automatically initializes in and out iovs by decoding @cmd with
 * _IOC_* macros and the server is not allowed to request RETRY.  This
 * limits ioctl data transfers to well-formed ioctls and is the forced
 * behavior for all FUSE servers.
 */
2492 2493
long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
		   unsigned int flags)
Tejun Heo's avatar
Tejun Heo committed
2494 2495
{
	struct fuse_file *ff = file->private_data;
2496
	struct fuse_conn *fc = ff->fc;
Tejun Heo's avatar
Tejun Heo committed
2497 2498 2499 2500 2501 2502 2503 2504 2505
	struct fuse_ioctl_in inarg = {
		.fh = ff->fh,
		.cmd = cmd,
		.arg = arg,
		.flags = flags
	};
	struct fuse_ioctl_out outarg;
	struct fuse_req *req = NULL;
	struct page **pages = NULL;
Miklos Szeredi's avatar
Miklos Szeredi committed
2506
	struct iovec *iov_page = NULL;
Tejun Heo's avatar
Tejun Heo committed
2507 2508 2509 2510 2511
	struct iovec *in_iov = NULL, *out_iov = NULL;
	unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
	size_t in_size, out_size, transferred;
	int err;

Miklos Szeredi's avatar
Miklos Szeredi committed
2512 2513 2514 2515 2516 2517 2518
#if BITS_PER_LONG == 32
	inarg.flags |= FUSE_IOCTL_32BIT;
#else
	if (flags & FUSE_IOCTL_COMPAT)
		inarg.flags |= FUSE_IOCTL_32BIT;
#endif

Tejun Heo's avatar
Tejun Heo committed
2519
	/* assume all the iovs returned by client always fits in a page */
Miklos Szeredi's avatar
Miklos Szeredi committed
2520
	BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
Tejun Heo's avatar
Tejun Heo committed
2521 2522

	err = -ENOMEM;
2523
	pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
Miklos Szeredi's avatar
Miklos Szeredi committed
2524
	iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
Tejun Heo's avatar
Tejun Heo committed
2525 2526 2527 2528 2529 2530 2531 2532
	if (!pages || !iov_page)
		goto out;

	/*
	 * If restricted, initialize IO parameters as encoded in @cmd.
	 * RETRY from server is not allowed.
	 */
	if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
Miklos Szeredi's avatar
Miklos Szeredi committed
2533
		struct iovec *iov = iov_page;
Tejun Heo's avatar
Tejun Heo committed
2534

2535
		iov->iov_base = (void __user *)arg;
Tejun Heo's avatar
Tejun Heo committed
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
		iov->iov_len = _IOC_SIZE(cmd);

		if (_IOC_DIR(cmd) & _IOC_WRITE) {
			in_iov = iov;
			in_iovs = 1;
		}

		if (_IOC_DIR(cmd) & _IOC_READ) {
			out_iov = iov;
			out_iovs = 1;
		}
	}

 retry:
	inarg.in_size = in_size = iov_length(in_iov, in_iovs);
	inarg.out_size = out_size = iov_length(out_iov, out_iovs);

	/*
	 * Out data can be used either for actual out data or iovs,
	 * make sure there always is at least one page.
	 */
	out_size = max_t(size_t, out_size, PAGE_SIZE);
	max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);

	/* make sure there are enough buffer pages and init request with them */
	err = -ENOMEM;
	if (max_pages > FUSE_MAX_PAGES_PER_REQ)
		goto out;
	while (num_pages < max_pages) {
		pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
		if (!pages[num_pages])
			goto out;
		num_pages++;
	}

2571
	req = fuse_get_req(fc, num_pages);
Tejun Heo's avatar
Tejun Heo committed
2572 2573 2574 2575 2576 2577 2578
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
		req = NULL;
		goto out;
	}
	memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
	req->num_pages = num_pages;
2579
	fuse_page_descs_length_init(req, 0, req->num_pages);
Tejun Heo's avatar
Tejun Heo committed
2580 2581 2582

	/* okay, let's send it to the client */
	req->in.h.opcode = FUSE_IOCTL;
2583
	req->in.h.nodeid = ff->nodeid;
Tejun Heo's avatar
Tejun Heo committed
2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(inarg);
	req->in.args[0].value = &inarg;
	if (in_size) {
		req->in.numargs++;
		req->in.args[1].size = in_size;
		req->in.argpages = 1;

		err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
					   false);
		if (err)
			goto out;
	}

	req->out.numargs = 2;
	req->out.args[0].size = sizeof(outarg);
	req->out.args[0].value = &outarg;
	req->out.args[1].size = out_size;
	req->out.argpages = 1;
	req->out.argvar = 1;

2605
	fuse_request_send(fc, req);
Tejun Heo's avatar
Tejun Heo committed
2606 2607 2608 2609 2610 2611 2612 2613 2614
	err = req->out.h.error;
	transferred = req->out.args[1].size;
	fuse_put_request(fc, req);
	req = NULL;
	if (err)
		goto out;

	/* did it ask for retry? */
	if (outarg.flags & FUSE_IOCTL_RETRY) {
Miklos Szeredi's avatar
Miklos Szeredi committed
2615
		void *vaddr;
Tejun Heo's avatar
Tejun Heo committed
2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634

		/* no retry if in restricted mode */
		err = -EIO;
		if (!(flags & FUSE_IOCTL_UNRESTRICTED))
			goto out;

		in_iovs = outarg.in_iovs;
		out_iovs = outarg.out_iovs;

		/*
		 * Make sure things are in boundary, separate checks
		 * are to protect against overflow.
		 */
		err = -ENOMEM;
		if (in_iovs > FUSE_IOCTL_MAX_IOV ||
		    out_iovs > FUSE_IOCTL_MAX_IOV ||
		    in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
			goto out;

2635
		vaddr = kmap_atomic(pages[0]);
Miklos Szeredi's avatar
Miklos Szeredi committed
2636
		err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
2637 2638
					    transferred, in_iovs + out_iovs,
					    (flags & FUSE_IOCTL_COMPAT) != 0);
2639
		kunmap_atomic(vaddr);
2640 2641
		if (err)
			goto out;
Tejun Heo's avatar
Tejun Heo committed
2642

Miklos Szeredi's avatar
Miklos Szeredi committed
2643
		in_iov = iov_page;
Tejun Heo's avatar
Tejun Heo committed
2644 2645
		out_iov = in_iov + in_iovs;

Miklos Szeredi's avatar
Miklos Szeredi committed
2646 2647 2648 2649 2650 2651 2652 2653
		err = fuse_verify_ioctl_iov(in_iov, in_iovs);
		if (err)
			goto out;

		err = fuse_verify_ioctl_iov(out_iov, out_iovs);
		if (err)
			goto out;

Tejun Heo's avatar
Tejun Heo committed
2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
		goto retry;
	}

	err = -EIO;
	if (transferred > inarg.out_size)
		goto out;

	err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
 out:
	if (req)
		fuse_put_request(fc, req);
Miklos Szeredi's avatar
Miklos Szeredi committed
2665
	free_page((unsigned long) iov_page);
Tejun Heo's avatar
Tejun Heo committed
2666 2667 2668 2669 2670 2671
	while (num_pages)
		__free_page(pages[--num_pages]);
	kfree(pages);

	return err ? err : outarg.result;
}
2672
EXPORT_SYMBOL_GPL(fuse_do_ioctl);
Tejun Heo's avatar
Tejun Heo committed
2673

2674 2675
long fuse_ioctl_common(struct file *file, unsigned int cmd,
		       unsigned long arg, unsigned int flags)
2676
{
2677
	struct inode *inode = file_inode(file);
2678 2679
	struct fuse_conn *fc = get_fuse_conn(inode);

2680
	if (!fuse_allow_current_process(fc))
2681 2682 2683 2684 2685 2686 2687 2688
		return -EACCES;

	if (is_bad_inode(inode))
		return -EIO;

	return fuse_do_ioctl(file, cmd, arg, flags);
}

Tejun Heo's avatar
Tejun Heo committed
2689 2690 2691
static long fuse_file_ioctl(struct file *file, unsigned int cmd,
			    unsigned long arg)
{
2692
	return fuse_ioctl_common(file, cmd, arg, 0);
Tejun Heo's avatar
Tejun Heo committed
2693 2694 2695 2696 2697
}

static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
				   unsigned long arg)
{
2698
	return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
Tejun Heo's avatar
Tejun Heo committed
2699 2700
}

Tejun Heo's avatar
Tejun Heo committed
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741
/*
 * All files which have been polled are linked to RB tree
 * fuse_conn->polled_files which is indexed by kh.  Walk the tree and
 * find the matching one.
 */
static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
					      struct rb_node **parent_out)
{
	struct rb_node **link = &fc->polled_files.rb_node;
	struct rb_node *last = NULL;

	while (*link) {
		struct fuse_file *ff;

		last = *link;
		ff = rb_entry(last, struct fuse_file, polled_node);

		if (kh < ff->kh)
			link = &last->rb_left;
		else if (kh > ff->kh)
			link = &last->rb_right;
		else
			return link;
	}

	if (parent_out)
		*parent_out = last;
	return link;
}

/*
 * The file is about to be polled.  Make sure it's on the polled_files
 * RB tree.  Note that files once added to the polled_files tree are
 * not removed before the file is released.  This is because a file
 * polled once is likely to be polled again.
 */
static void fuse_register_polled_file(struct fuse_conn *fc,
				      struct fuse_file *ff)
{
	spin_lock(&fc->lock);
	if (RB_EMPTY_NODE(&ff->polled_node)) {
2742
		struct rb_node **link, *uninitialized_var(parent);
Tejun Heo's avatar
Tejun Heo committed
2743 2744 2745 2746 2747 2748 2749 2750 2751

		link = fuse_find_polled_node(fc, ff->kh, &parent);
		BUG_ON(*link);
		rb_link_node(&ff->polled_node, parent, link);
		rb_insert_color(&ff->polled_node, &fc->polled_files);
	}
	spin_unlock(&fc->lock);
}

2752
unsigned fuse_file_poll(struct file *file, poll_table *wait)
Tejun Heo's avatar
Tejun Heo committed
2753 2754
{
	struct fuse_file *ff = file->private_data;
2755
	struct fuse_conn *fc = ff->fc;
Tejun Heo's avatar
Tejun Heo committed
2756 2757
	struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
	struct fuse_poll_out outarg;
2758
	FUSE_ARGS(args);
Tejun Heo's avatar
Tejun Heo committed
2759 2760 2761 2762 2763 2764
	int err;

	if (fc->no_poll)
		return DEFAULT_POLLMASK;

	poll_wait(file, &ff->poll_wait, wait);
Enke Chen's avatar
Enke Chen committed
2765
	inarg.events = (__u32)poll_requested_events(wait);
Tejun Heo's avatar
Tejun Heo committed
2766 2767 2768 2769 2770 2771 2772 2773 2774 2775

	/*
	 * Ask for notification iff there's someone waiting for it.
	 * The client may ignore the flag and always notify.
	 */
	if (waitqueue_active(&ff->poll_wait)) {
		inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
		fuse_register_polled_file(fc, ff);
	}

2776 2777 2778 2779 2780 2781 2782 2783 2784
	args.in.h.opcode = FUSE_POLL;
	args.in.h.nodeid = ff->nodeid;
	args.in.numargs = 1;
	args.in.args[0].size = sizeof(inarg);
	args.in.args[0].value = &inarg;
	args.out.numargs = 1;
	args.out.args[0].size = sizeof(outarg);
	args.out.args[0].value = &outarg;
	err = fuse_simple_request(fc, &args);
Tejun Heo's avatar
Tejun Heo committed
2785 2786 2787 2788 2789 2790 2791 2792 2793

	if (!err)
		return outarg.revents;
	if (err == -ENOSYS) {
		fc->no_poll = 1;
		return DEFAULT_POLLMASK;
	}
	return POLLERR;
}
2794
EXPORT_SYMBOL_GPL(fuse_file_poll);
Tejun Heo's avatar
Tejun Heo committed
2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819

/*
 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
 * wakes up the poll waiters.
 */
int fuse_notify_poll_wakeup(struct fuse_conn *fc,
			    struct fuse_notify_poll_wakeup_out *outarg)
{
	u64 kh = outarg->kh;
	struct rb_node **link;

	spin_lock(&fc->lock);

	link = fuse_find_polled_node(fc, kh, NULL);
	if (*link) {
		struct fuse_file *ff;

		ff = rb_entry(*link, struct fuse_file, polled_node);
		wake_up_interruptible_sync(&ff->poll_wait);
	}

	spin_unlock(&fc->lock);
	return 0;
}

2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833
static void fuse_do_truncate(struct file *file)
{
	struct inode *inode = file->f_mapping->host;
	struct iattr attr;

	attr.ia_valid = ATTR_SIZE;
	attr.ia_size = i_size_read(inode);

	attr.ia_file = file;
	attr.ia_valid |= ATTR_FILE;

	fuse_do_setattr(inode, &attr, file);
}

2834 2835 2836 2837 2838
static inline loff_t fuse_round_up(loff_t off)
{
	return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
}

2839
static ssize_t
2840
fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2841
{
2842
	DECLARE_COMPLETION_ONSTACK(wait);
2843
	ssize_t ret = 0;
2844 2845
	struct file *file = iocb->ki_filp;
	struct fuse_file *ff = file->private_data;
2846
	bool async_dio = ff->fc->async_dio;
2847
	loff_t pos = 0;
2848 2849
	struct inode *inode;
	loff_t i_size;
2850
	size_t count = iov_iter_count(iter);
2851
	loff_t offset = iocb->ki_pos;
2852
	struct fuse_io_priv *io;
2853
	bool is_sync = is_sync_kiocb(iocb);
2854 2855

	pos = offset;
2856 2857
	inode = file->f_mapping->host;
	i_size = i_size_read(inode);
2858

2859
	if ((iov_iter_rw(iter) == READ) && (offset > i_size))
2860 2861
		return 0;

2862
	/* optimization for short read */
2863
	if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
2864 2865
		if (offset >= i_size)
			return 0;
2866 2867
		iov_iter_truncate(iter, fuse_round_up(i_size - offset));
		count = iov_iter_count(iter);
2868 2869
	}

2870
	io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2871 2872
	if (!io)
		return -ENOMEM;
2873
	spin_lock_init(&io->lock);
2874
	kref_init(&io->refcnt);
2875 2876 2877 2878
	io->reqs = 1;
	io->bytes = -1;
	io->size = 0;
	io->offset = offset;
2879
	io->write = (iov_iter_rw(iter) == WRITE);
2880
	io->err = 0;
2881
	io->file = file;
2882 2883
	/*
	 * By default, we want to optimize all I/Os with async request
2884
	 * submission to the client filesystem if supported.
2885
	 */
2886
	io->async = async_dio;
2887 2888 2889 2890 2891 2892 2893
	io->iocb = iocb;

	/*
	 * We cannot asynchronously extend the size of a file. We have no method
	 * to wait on real async I/O requests, so we must submit this request
	 * synchronously.
	 */
2894
	if (!is_sync && (offset + count > i_size) &&
2895
	    iov_iter_rw(iter) == WRITE)
2896
		io->async = false;
2897

2898 2899 2900 2901 2902 2903
	if (io->async && is_sync) {
		/*
		 * Additional reference to keep io around after
		 * calling fuse_aio_complete()
		 */
		kref_get(&io->refcnt);
2904
		io->done = &wait;
2905
	}
2906

2907
	if (iov_iter_rw(iter) == WRITE) {
2908
		ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
2909 2910
		fuse_invalidate_attr(inode);
	} else {
2911
		ret = __fuse_direct_read(io, iter, &pos);
2912
	}
2913

2914 2915 2916 2917
	if (io->async) {
		fuse_aio_complete(io, ret < 0 ? ret : 0, -1);

		/* we have a non-extending, async request, so return */
2918
		if (!is_sync)
2919 2920
			return -EIOCBQUEUED;

2921 2922
		wait_for_completion(&wait);
		ret = fuse_get_res_by_io(io);
2923 2924
	}

2925
	kref_put(&io->refcnt, fuse_io_release);
2926

2927
	if (iov_iter_rw(iter) == WRITE) {
2928 2929 2930 2931 2932
		if (ret > 0)
			fuse_write_update_size(inode, pos);
		else if (ret < 0 && offset + count > i_size)
			fuse_do_truncate(file);
	}
2933 2934 2935 2936

	return ret;
}

2937 2938
static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
				loff_t length)
2939 2940
{
	struct fuse_file *ff = file->private_data;
2941
	struct inode *inode = file_inode(file);
2942
	struct fuse_inode *fi = get_fuse_inode(inode);
2943
	struct fuse_conn *fc = ff->fc;
2944
	FUSE_ARGS(args);
2945 2946 2947 2948 2949 2950 2951
	struct fuse_fallocate_in inarg = {
		.fh = ff->fh,
		.offset = offset,
		.length = length,
		.mode = mode
	};
	int err;
2952 2953
	bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
			   (mode & FALLOC_FL_PUNCH_HOLE);
2954

Miklos Szeredi's avatar
Miklos Szeredi committed
2955 2956 2957
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
		return -EOPNOTSUPP;

2958 2959 2960
	if (fc->no_fallocate)
		return -EOPNOTSUPP;

2961
	if (lock_inode) {
Al Viro's avatar
Al Viro committed
2962
		inode_lock(inode);
2963 2964 2965 2966 2967 2968 2969 2970 2971
		if (mode & FALLOC_FL_PUNCH_HOLE) {
			loff_t endbyte = offset + length - 1;
			err = filemap_write_and_wait_range(inode->i_mapping,
							   offset, endbyte);
			if (err)
				goto out;

			fuse_sync_writes(inode);
		}
2972 2973
	}

2974 2975 2976
	if (!(mode & FALLOC_FL_KEEP_SIZE))
		set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);

2977 2978 2979 2980 2981 2982
	args.in.h.opcode = FUSE_FALLOCATE;
	args.in.h.nodeid = ff->nodeid;
	args.in.numargs = 1;
	args.in.args[0].size = sizeof(inarg);
	args.in.args[0].value = &inarg;
	err = fuse_simple_request(fc, &args);
2983 2984 2985 2986
	if (err == -ENOSYS) {
		fc->no_fallocate = 1;
		err = -EOPNOTSUPP;
	}
2987 2988 2989 2990
	if (err)
		goto out;

	/* we could have extended the file */
2991 2992 2993
	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
		bool changed = fuse_write_update_size(inode, offset + length);

2994 2995
		if (changed && fc->writeback_cache)
			file_update_time(file);
2996
	}
2997 2998 2999 3000 3001 3002

	if (mode & FALLOC_FL_PUNCH_HOLE)
		truncate_pagecache_range(inode, offset, offset + length - 1);

	fuse_invalidate_attr(inode);

3003
out:
3004 3005 3006
	if (!(mode & FALLOC_FL_KEEP_SIZE))
		clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);

3007
	if (lock_inode)
Al Viro's avatar
Al Viro committed
3008
		inode_unlock(inode);
3009

3010 3011 3012
	return err;
}

3013
static const struct file_operations fuse_file_operations = {
Miklos Szeredi's avatar
Miklos Szeredi committed
3014
	.llseek		= fuse_file_llseek,
3015
	.read_iter	= fuse_file_read_iter,
Al Viro's avatar
Al Viro committed
3016
	.write_iter	= fuse_file_write_iter,
3017 3018 3019 3020 3021
	.mmap		= fuse_file_mmap,
	.open		= fuse_open,
	.flush		= fuse_flush,
	.release	= fuse_release,
	.fsync		= fuse_fsync,
3022
	.lock		= fuse_file_lock,
3023
	.flock		= fuse_file_flock,
3024
	.splice_read	= generic_file_splice_read,
Tejun Heo's avatar
Tejun Heo committed
3025 3026
	.unlocked_ioctl	= fuse_file_ioctl,
	.compat_ioctl	= fuse_file_compat_ioctl,
Tejun Heo's avatar
Tejun Heo committed
3027
	.poll		= fuse_file_poll,
3028
	.fallocate	= fuse_file_fallocate,
3029 3030
};

3031
static const struct file_operations fuse_direct_io_file_operations = {
Miklos Szeredi's avatar
Miklos Szeredi committed
3032
	.llseek		= fuse_file_llseek,
3033 3034
	.read_iter	= fuse_direct_read_iter,
	.write_iter	= fuse_direct_write_iter,
3035
	.mmap		= fuse_direct_mmap,
Miklos Szeredi's avatar
Miklos Szeredi committed
3036 3037 3038 3039
	.open		= fuse_open,
	.flush		= fuse_flush,
	.release	= fuse_release,
	.fsync		= fuse_fsync,
3040
	.lock		= fuse_file_lock,
3041
	.flock		= fuse_file_flock,
Tejun Heo's avatar
Tejun Heo committed
3042 3043
	.unlocked_ioctl	= fuse_file_ioctl,
	.compat_ioctl	= fuse_file_compat_ioctl,
Tejun Heo's avatar
Tejun Heo committed
3044
	.poll		= fuse_file_poll,
3045
	.fallocate	= fuse_file_fallocate,
3046
	/* no splice_read */
Miklos Szeredi's avatar
Miklos Szeredi committed
3047 3048
};

3049
static const struct address_space_operations fuse_file_aops  = {
3050
	.readpage	= fuse_readpage,
Miklos Szeredi's avatar
Miklos Szeredi committed
3051
	.writepage	= fuse_writepage,
3052
	.writepages	= fuse_writepages,
Miklos Szeredi's avatar
Miklos Szeredi committed
3053
	.launder_page	= fuse_launder_page,
3054
	.readpages	= fuse_readpages,
Miklos Szeredi's avatar
Miklos Szeredi committed
3055
	.set_page_dirty	= __set_page_dirty_nobuffers,
3056
	.bmap		= fuse_bmap,
3057
	.direct_IO	= fuse_direct_IO,
3058 3059
	.write_begin	= fuse_write_begin,
	.write_end	= fuse_write_end,
3060 3061 3062 3063
};

void fuse_init_file_inode(struct inode *inode)
{
3064 3065
	inode->i_fop = &fuse_file_operations;
	inode->i_data.a_ops = &fuse_file_aops;
3066
}