write.c 20.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* handling of writes to regular files and writing back to the server
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
11

12
#include <linux/backing-dev.h>
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include "internal.h"

/*
 * mark a page as having been made dirty and thus needing writeback
 */
int afs_set_page_dirty(struct page *page)
{
	_enter("");
	return __set_page_dirty_nobuffers(page);
}

/*
 * partly or wholly fill a page that's under preparation for writing
 */
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33
			 loff_t pos, unsigned int len, struct page *page)
34
{
35
	struct afs_read *req;
36 37
	size_t p;
	void *data;
38 39
	int ret;

40
	_enter(",,%llu", (unsigned long long)pos);
41

42 43 44 45 46 47 48 49 50
	if (pos >= vnode->vfs_inode.i_size) {
		p = pos & ~PAGE_MASK;
		ASSERTCMP(p + len, <=, PAGE_SIZE);
		data = kmap(page);
		memset(data + p, 0, len);
		kunmap(page);
		return 0;
	}

51 52 53 54 55
	req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
		      GFP_KERNEL);
	if (!req)
		return -ENOMEM;

56
	refcount_set(&req->usage, 1);
57
	req->pos = pos;
58
	req->len = len;
59
	req->nr_pages = 1;
60
	req->pages = req->array;
61
	req->pages[0] = page;
62
	get_page(page);
63

64
	ret = afs_fetch_data(vnode, key, req);
65
	afs_put_read(req);
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
	if (ret < 0) {
		if (ret == -ENOENT) {
			_debug("got NOENT from server"
			       " - marking file deleted and stale");
			set_bit(AFS_VNODE_DELETED, &vnode->flags);
			ret = -ESTALE;
		}
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * prepare to perform part of a write to a page
 */
Nick Piggin's avatar
Nick Piggin committed
82 83 84
int afs_write_begin(struct file *file, struct address_space *mapping,
		    loff_t pos, unsigned len, unsigned flags,
		    struct page **pagep, void **fsdata)
85
{
Al Viro's avatar
Al Viro committed
86
	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin's avatar
Nick Piggin committed
87
	struct page *page;
88
	struct key *key = afs_file_key(file);
89 90 91
	unsigned long priv;
	unsigned f, from = pos & (PAGE_SIZE - 1);
	unsigned t, to = from + len;
92
	pgoff_t index = pos >> PAGE_SHIFT;
93 94
	int ret;

95
	_enter("{%llx:%llu},{%lx},%u,%u",
Nick Piggin's avatar
Nick Piggin committed
96
	       vnode->fid.vid, vnode->fid.vnode, index, from, to);
97

98 99 100 101
	/* We want to store information about how much of a page is altered in
	 * page->private.
	 */
	BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
102

103
	page = grab_cache_page_write_begin(mapping, index, flags);
104
	if (!page)
Nick Piggin's avatar
Nick Piggin committed
105 106
		return -ENOMEM;

107
	if (!PageUptodate(page) && len != PAGE_SIZE) {
108
		ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
109
		if (ret < 0) {
110 111
			unlock_page(page);
			put_page(page);
112 113 114
			_leave(" = %d [prep]", ret);
			return ret;
		}
Nick Piggin's avatar
Nick Piggin committed
115
		SetPageUptodate(page);
116 117
	}

118 119 120
	/* page won't leak in error case: it eventually gets cleaned off LRU */
	*pagep = page;

121
try_again:
122 123 124 125 126 127 128 129 130
	/* See if this page is already partially written in a way that we can
	 * merge the new write with.
	 */
	t = f = 0;
	if (PagePrivate(page)) {
		priv = page_private(page);
		f = priv & AFS_PRIV_MAX;
		t = priv >> AFS_PRIV_SHIFT;
		ASSERTCMP(f, <=, t);
131 132
	}

133
	if (f != t) {
134 135 136 137 138
		if (PageWriteback(page)) {
			trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
					     page->index, priv);
			goto flush_conflicting_write;
		}
139 140 141 142 143 144
		/* If the file is being filled locally, allow inter-write
		 * spaces to be merged into writes.  If it's not, only write
		 * back what the user gives us.
		 */
		if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
		    (to < f || from > t))
145 146 147 148 149 150 151 152
			goto flush_conflicting_write;
		if (from < f)
			f = from;
		if (to > t)
			t = to;
	} else {
		f = from;
		t = to;
153 154
	}

155 156
	priv = (unsigned long)t << AFS_PRIV_SHIFT;
	priv |= f;
157 158
	trace_afs_page_dirty(vnode, tracepoint_string("begin"),
			     page->index, priv);
159
	SetPagePrivate(page);
160 161
	set_page_private(page, priv);
	_leave(" = 0");
162 163
	return 0;

164 165 166 167
	/* The previous write and this write aren't adjacent or overlapping, so
	 * flush the page out.
	 */
flush_conflicting_write:
168
	_debug("flush conflict");
169 170 171 172
	ret = write_one_page(page);
	if (ret < 0) {
		_leave(" = %d", ret);
		return ret;
173 174
	}

175 176 177 178 179
	ret = lock_page_killable(page);
	if (ret < 0) {
		_leave(" = %d", ret);
		return ret;
	}
180 181 182 183 184 185
	goto try_again;
}

/*
 * finalise part of a write to a page
 */
Nick Piggin's avatar
Nick Piggin committed
186 187 188
int afs_write_end(struct file *file, struct address_space *mapping,
		  loff_t pos, unsigned len, unsigned copied,
		  struct page *page, void *fsdata)
189
{
Al Viro's avatar
Al Viro committed
190
	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
191
	struct key *key = afs_file_key(file);
192
	loff_t i_size, maybe_i_size;
193
	int ret;
194

195
	_enter("{%llx:%llu},{%lx}",
Nick Piggin's avatar
Nick Piggin committed
196
	       vnode->fid.vid, vnode->fid.vnode, page->index);
197

Nick Piggin's avatar
Nick Piggin committed
198
	maybe_i_size = pos + copied;
199 200 201

	i_size = i_size_read(&vnode->vfs_inode);
	if (maybe_i_size > i_size) {
202
		spin_lock(&vnode->wb_lock);
203 204 205
		i_size = i_size_read(&vnode->vfs_inode);
		if (maybe_i_size > i_size)
			i_size_write(&vnode->vfs_inode, maybe_i_size);
206
		spin_unlock(&vnode->wb_lock);
207 208
	}

209 210 211 212 213 214 215 216 217
	if (!PageUptodate(page)) {
		if (copied < len) {
			/* Try and load any missing data from the server.  The
			 * unmarshalling routine will take care of clearing any
			 * bits that are beyond the EOF.
			 */
			ret = afs_fill_page(vnode, key, pos + copied,
					    len - copied, page);
			if (ret < 0)
218
				goto out;
219 220 221 222
		}
		SetPageUptodate(page);
	}

223 224 225
	set_page_dirty(page);
	if (PageDirty(page))
		_debug("dirtied");
226 227 228
	ret = copied;

out:
Nick Piggin's avatar
Nick Piggin committed
229
	unlock_page(page);
230
	put_page(page);
231
	return ret;
232 233 234 235 236
}

/*
 * kill all the pages in the given range
 */
237
static void afs_kill_pages(struct address_space *mapping,
238 239
			   pgoff_t first, pgoff_t last)
{
240
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
241 242 243
	struct pagevec pv;
	unsigned count, loop;

244
	_enter("{%llx:%llu},%lx-%lx",
245 246
	       vnode->fid.vid, vnode->fid.vnode, first, last);

247
	pagevec_init(&pv);
248 249 250 251 252 253 254

	do {
		_debug("kill %lx-%lx", first, last);

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
255
		pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
256 257 258
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
259 260
			struct page *page = pv.pages[loop];
			ClearPageUptodate(page);
261 262
			SetPageError(page);
			end_page_writeback(page);
263 264
			if (page->index >= first)
				first = page->index + 1;
265 266
			lock_page(page);
			generic_error_remove_page(mapping, page);
267 268 269
		}

		__pagevec_release(&pv);
270
	} while (first <= last);
271 272 273 274 275

	_leave("");
}

/*
276 277 278 279 280 281 282 283 284 285
 * Redirty all the pages in a given range.
 */
static void afs_redirty_pages(struct writeback_control *wbc,
			      struct address_space *mapping,
			      pgoff_t first, pgoff_t last)
{
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	struct pagevec pv;
	unsigned count, loop;

286
	_enter("{%llx:%llu},%lx-%lx",
287 288
	       vnode->fid.vid, vnode->fid.vnode, first, last);

289
	pagevec_init(&pv);
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310

	do {
		_debug("redirty %lx-%lx", first, last);

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
		pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
			struct page *page = pv.pages[loop];

			redirty_page_for_writepage(wbc, page);
			end_page_writeback(page);
			if (page->index >= first)
				first = page->index + 1;
		}

		__pagevec_release(&pv);
	} while (first <= last);
311 312 313 314

	_leave("");
}

315 316 317
/*
 * write to a file
 */
318 319
static int afs_store_data(struct address_space *mapping,
			  pgoff_t first, pgoff_t last,
320 321
			  unsigned offset, unsigned to)
{
322
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
323
	struct afs_fs_cursor fc;
324 325 326
	struct afs_wb_key *wbk = NULL;
	struct list_head *p;
	int ret = -ENOKEY, ret2;
327

328
	_enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
329 330 331 332 333 334
	       vnode->volume->name,
	       vnode->fid.vid,
	       vnode->fid.vnode,
	       vnode->fid.unique,
	       first, last, offset, to);

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
	spin_lock(&vnode->wb_lock);
	p = vnode->wb_keys.next;

	/* Iterate through the list looking for a valid key to use. */
try_next_key:
	while (p != &vnode->wb_keys) {
		wbk = list_entry(p, struct afs_wb_key, vnode_link);
		_debug("wbk %u", key_serial(wbk->key));
		ret2 = key_validate(wbk->key);
		if (ret2 == 0)
			goto found_key;
		if (ret == -ENOKEY)
			ret = ret2;
		p = p->next;
	}

	spin_unlock(&vnode->wb_lock);
	afs_put_wb_key(wbk);
	_leave(" = %d [no keys]", ret);
	return ret;

found_key:
	refcount_inc(&wbk->usage);
	spin_unlock(&vnode->wb_lock);

	_debug("USE WB KEY %u", key_serial(wbk->key));

362
	ret = -ERESTARTSYS;
363
	if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
364
		while (afs_select_fileserver(&fc)) {
365
			fc.cb_break = afs_calc_vnode_cb_break(vnode);
366
			afs_fs_store_data(&fc, mapping, first, last, offset, to);
367 368 369 370 371 372 373
		}

		afs_check_for_remote_deletion(&fc, fc.vnode);
		afs_vnode_commit_status(&fc, vnode, fc.cb_break);
		ret = afs_end_vnode_operation(&fc);
	}

374
	switch (ret) {
375 376 377 378 379 380
	case 0:
		afs_stat_v(vnode, n_stores);
		atomic_long_add((last * PAGE_SIZE + to) -
				(first * PAGE_SIZE + offset),
				&afs_v2net(vnode)->n_store_bytes);
		break;
381 382 383 384 385 386 387 388 389 390 391 392 393 394
	case -EACCES:
	case -EPERM:
	case -ENOKEY:
	case -EKEYEXPIRED:
	case -EKEYREJECTED:
	case -EKEYREVOKED:
		_debug("next");
		spin_lock(&vnode->wb_lock);
		p = wbk->vnode_link.next;
		afs_put_wb_key(wbk);
		goto try_next_key;
	}

	afs_put_wb_key(wbk);
395 396 397 398
	_leave(" = %d", ret);
	return ret;
}

399
/*
400 401
 * Synchronously write back the locked page and any subsequent non-locked dirty
 * pages.
402
 */
403 404 405 406
static int afs_write_back_from_locked_page(struct address_space *mapping,
					   struct writeback_control *wbc,
					   struct page *primary_page,
					   pgoff_t final_page)
407
{
408
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
409
	struct page *pages[8], *page;
410 411
	unsigned long count, priv;
	unsigned n, offset, to, f, t;
412 413 414 415 416 417 418 419 420
	pgoff_t start, first, last;
	int loop, ret;

	_enter(",%lx", primary_page->index);

	count = 1;
	if (test_set_page_writeback(primary_page))
		BUG();

421 422 423 424 425
	/* Find all consecutive lockable dirty pages that have contiguous
	 * written regions, stopping when we find a page that is not
	 * immediately lockable, is not dirty or is missing, or we reach the
	 * end of the range.
	 */
426
	start = primary_page->index;
427 428 429
	priv = page_private(primary_page);
	offset = priv & AFS_PRIV_MAX;
	to = priv >> AFS_PRIV_SHIFT;
430 431
	trace_afs_page_dirty(vnode, tracepoint_string("store"),
			     primary_page->index, priv);
432 433

	WARN_ON(offset == to);
434 435 436
	if (offset == to)
		trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
				     primary_page->index, priv);
437

438 439
	if (start >= final_page ||
	    (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
440
		goto no_more;
441

442 443 444
	start++;
	do {
		_debug("more %lx [%lx]", start, count);
445
		n = final_page - start + 1;
446 447
		if (n > ARRAY_SIZE(pages))
			n = ARRAY_SIZE(pages);
448
		n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
449 450 451 452
		_debug("fgpc %u", n);
		if (n == 0)
			goto no_more;
		if (pages[0]->index != start) {
453 454 455
			do {
				put_page(pages[--n]);
			} while (n > 0);
456 457 458 459 460
			goto no_more;
		}

		for (loop = 0; loop < n; loop++) {
			page = pages[loop];
461 462 463
			if (to != PAGE_SIZE &&
			    !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
				break;
464
			if (page->index > final_page)
465
				break;
Nick Piggin's avatar
Nick Piggin committed
466
			if (!trylock_page(page))
467
				break;
468
			if (!PageDirty(page) || PageWriteback(page)) {
469 470 471
				unlock_page(page);
				break;
			}
472 473 474 475

			priv = page_private(page);
			f = priv & AFS_PRIV_MAX;
			t = priv >> AFS_PRIV_SHIFT;
476 477
			if (f != 0 &&
			    !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
478 479 480
				unlock_page(page);
				break;
			}
481 482
			to = t;

483 484 485
			trace_afs_page_dirty(vnode, tracepoint_string("store+"),
					     page->index, priv);

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
			if (!clear_page_dirty_for_io(page))
				BUG();
			if (test_set_page_writeback(page))
				BUG();
			unlock_page(page);
			put_page(page);
		}
		count += loop;
		if (loop < n) {
			for (; loop < n; loop++)
				put_page(pages[loop]);
			goto no_more;
		}

		start += loop;
501
	} while (start <= final_page && count < 65536);
502 503

no_more:
504 505 506 507 508 509
	/* We now have a contiguous set of dirty pages, each with writeback
	 * set; the first page is still locked at this point, but all the rest
	 * have been unlocked.
	 */
	unlock_page(primary_page);

510 511 512 513 514
	first = primary_page->index;
	last = first + count - 1;

	_debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);

515 516 517
	ret = afs_store_data(mapping, first, last, offset, to);
	switch (ret) {
	case 0:
518
		ret = count;
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
		break;

	default:
		pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
		/* Fall through */
	case -EACCES:
	case -EPERM:
	case -ENOKEY:
	case -EKEYEXPIRED:
	case -EKEYREJECTED:
	case -EKEYREVOKED:
		afs_redirty_pages(wbc, mapping, first, last);
		mapping_set_error(mapping, ret);
		break;

	case -EDQUOT:
	case -ENOSPC:
		afs_redirty_pages(wbc, mapping, first, last);
		mapping_set_error(mapping, -ENOSPC);
		break;

	case -EROFS:
	case -EIO:
	case -EREMOTEIO:
	case -EFBIG:
	case -ENOENT:
	case -ENOMEDIUM:
	case -ENXIO:
547
		trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
548 549 550
		afs_kill_pages(mapping, first, last);
		mapping_set_error(mapping, ret);
		break;
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * write a page back to the server
 * - the caller locked the page for us
 */
int afs_writepage(struct page *page, struct writeback_control *wbc)
{
	int ret;

	_enter("{%lx},", page->index);

567 568
	ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
					      wbc->range_end >> PAGE_SHIFT);
569 570 571 572 573 574 575 576 577 578 579 580 581 582
	if (ret < 0) {
		_leave(" = %d", ret);
		return 0;
	}

	wbc->nr_to_write -= ret;

	_leave(" = 0");
	return 0;
}

/*
 * write a region of pages back to the server
 */
Adrian Bunk's avatar
Adrian Bunk committed
583 584 585
static int afs_writepages_region(struct address_space *mapping,
				 struct writeback_control *wbc,
				 pgoff_t index, pgoff_t end, pgoff_t *_next)
586 587 588 589 590 591 592
{
	struct page *page;
	int ret, n;

	_enter(",,%lx,%lx,", index, end);

	do {
593 594
		n = find_get_pages_range_tag(mapping, &index, end,
					PAGECACHE_TAG_DIRTY, 1, &page);
595 596 597 598 599
		if (!n)
			break;

		_debug("wback %lx", page->index);

Matthew Wilcox's avatar
Matthew Wilcox committed
600 601 602 603 604
		/*
		 * at this point we hold neither the i_pages lock nor the
		 * page lock: the page may be truncated or invalidated
		 * (changing page->mapping to NULL), or even swizzled
		 * back from swapper_space to tmpfs file mapping
605
		 */
606 607 608 609 610 611
		ret = lock_page_killable(page);
		if (ret < 0) {
			put_page(page);
			_leave(" = %d", ret);
			return ret;
		}
612

613
		if (page->mapping != mapping || !PageDirty(page)) {
614
			unlock_page(page);
615
			put_page(page);
616 617 618
			continue;
		}

619
		if (PageWriteback(page)) {
620
			unlock_page(page);
621 622
			if (wbc->sync_mode != WB_SYNC_NONE)
				wait_on_page_writeback(page);
623
			put_page(page);
624 625 626
			continue;
		}

627 628
		if (!clear_page_dirty_for_io(page))
			BUG();
629
		ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
630
		put_page(page);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
		if (ret < 0) {
			_leave(" = %d", ret);
			return ret;
		}

		wbc->nr_to_write -= ret;

		cond_resched();
	} while (index < end && wbc->nr_to_write > 0);

	*_next = index;
	_leave(" = 0 [%lx]", *_next);
	return 0;
}

/*
 * write some of the pending data back to the server
 */
int afs_writepages(struct address_space *mapping,
		   struct writeback_control *wbc)
{
	pgoff_t start, end, next;
	int ret;

	_enter("");

	if (wbc->range_cyclic) {
		start = mapping->writeback_index;
		end = -1;
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
661
		if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
662 663 664 665
			ret = afs_writepages_region(mapping, wbc, 0, start,
						    &next);
		mapping->writeback_index = next;
	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
666
		end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
667 668 669 670
		ret = afs_writepages_region(mapping, wbc, 0, end, &next);
		if (wbc->nr_to_write > 0)
			mapping->writeback_index = next;
	} else {
671 672
		start = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
673 674 675 676 677 678 679 680 681 682 683 684 685
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * completion of write to server
 */
void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
{
	struct pagevec pv;
686
	unsigned long priv;
687 688 689
	unsigned count, loop;
	pgoff_t first = call->first, last = call->last;

690
	_enter("{%llx:%llu},{%lx-%lx}",
691 692
	       vnode->fid.vid, vnode->fid.vnode, first, last);

693
	pagevec_init(&pv);
694 695

	do {
696
		_debug("done %lx-%lx", first, last);
697 698 699 700

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
701 702
		pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
					      first, count, pv.pages);
703 704 705
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
706 707 708
			priv = page_private(pv.pages[loop]);
			trace_afs_page_dirty(vnode, tracepoint_string("clear"),
					     pv.pages[loop]->index, priv);
709 710
			set_page_private(pv.pages[loop], 0);
			end_page_writeback(pv.pages[loop]);
711 712 713
		}
		first += count;
		__pagevec_release(&pv);
714
	} while (first <= last);
715

716
	afs_prune_wb_keys(vnode);
717 718 719 720 721 722
	_leave("");
}

/*
 * write to an AFS file
 */
Al Viro's avatar
Al Viro committed
723
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
724
{
Al Viro's avatar
Al Viro committed
725
	struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
726
	ssize_t result;
Al Viro's avatar
Al Viro committed
727
	size_t count = iov_iter_count(from);
728

729
	_enter("{%llx:%llu},{%zu},",
Al Viro's avatar
Al Viro committed
730
	       vnode->fid.vid, vnode->fid.vnode, count);
731 732 733 734 735 736 737 738 739 740

	if (IS_SWAPFILE(&vnode->vfs_inode)) {
		printk(KERN_INFO
		       "AFS: Attempt to write to active swap file!\n");
		return -EBUSY;
	}

	if (!count)
		return 0;

Al Viro's avatar
Al Viro committed
741
	result = generic_file_write_iter(iocb, from);
742 743 744 745 746 747 748 749 750 751

	_leave(" = %zd", result);
	return result;
}

/*
 * flush any dirty pages for this process, and check for write errors.
 * - the return status from this call provides a reliable indication of
 *   whether any write errors occurred for this process.
 */
752
int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
753
{
754 755
	struct inode *inode = file_inode(file);
	struct afs_vnode *vnode = AFS_FS_I(inode);
756

757
	_enter("{%llx:%llu},{n=%pD},%d",
758
	       vnode->fid.vid, vnode->fid.vnode, file,
759 760
	       datasync);

761
	return file_write_and_wait_range(file, start, end);
762
}
763 764 765 766 767

/*
 * notification that a previously read-only page is about to become writable
 * - if it returns an error, the caller will deliver a bus error signal
 */
768
vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
769
{
770 771 772 773
	struct file *file = vmf->vma->vm_file;
	struct inode *inode = file_inode(file);
	struct afs_vnode *vnode = AFS_FS_I(inode);
	unsigned long priv;
774

775
	_enter("{{%llx:%llu}},{%lx}",
776
	       vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
777

778
	sb_start_pagefault(inode->i_sb);
779

780 781 782
	/* Wait for the page to be written to the cache before we allow it to
	 * be modified.  We then assume the entire page will need writing back.
	 */
783
#ifdef CONFIG_AFS_FSCACHE
784
	fscache_wait_on_page_write(vnode->cache, vmf->page);
785 786
#endif

787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
	if (PageWriteback(vmf->page) &&
	    wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
		return VM_FAULT_RETRY;

	if (lock_page_killable(vmf->page) < 0)
		return VM_FAULT_RETRY;

	/* We mustn't change page->private until writeback is complete as that
	 * details the portion of the page we need to write back and we might
	 * need to redirty the page if there's a problem.
	 */
	wait_on_page_writeback(vmf->page);

	priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
	priv |= 0; /* From */
802 803
	trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
			     vmf->page->index, priv);
804 805 806 807 808
	SetPagePrivate(vmf->page);
	set_page_private(vmf->page, priv);

	sb_end_pagefault(inode->i_sb);
	return VM_FAULT_LOCKED;
809
}
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860

/*
 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 */
void afs_prune_wb_keys(struct afs_vnode *vnode)
{
	LIST_HEAD(graveyard);
	struct afs_wb_key *wbk, *tmp;

	/* Discard unused keys */
	spin_lock(&vnode->wb_lock);

	if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
	    !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
		list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
			if (refcount_read(&wbk->usage) == 1)
				list_move(&wbk->vnode_link, &graveyard);
		}
	}

	spin_unlock(&vnode->wb_lock);

	while (!list_empty(&graveyard)) {
		wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
		list_del(&wbk->vnode_link);
		afs_put_wb_key(wbk);
	}
}

/*
 * Clean up a page during invalidation.
 */
int afs_launder_page(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	unsigned long priv;
	unsigned int f, t;
	int ret = 0;

	_enter("{%lx}", page->index);

	priv = page_private(page);
	if (clear_page_dirty_for_io(page)) {
		f = 0;
		t = PAGE_SIZE;
		if (PagePrivate(page)) {
			f = priv & AFS_PRIV_MAX;
			t = priv >> AFS_PRIV_SHIFT;
		}

861 862
		trace_afs_page_dirty(vnode, tracepoint_string("launder"),
				     page->index, priv);
863 864 865
		ret = afs_store_data(mapping, page->index, page->index, t, f);
	}

866 867
	trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
			     page->index, priv);
868 869 870 871 872 873 874 875 876 877
	set_page_private(page, 0);
	ClearPagePrivate(page);

#ifdef CONFIG_AFS_FSCACHE
	if (PageFsCache(page)) {
		fscache_wait_on_page_write(vnode->cache, page);
		fscache_uncache_page(vnode->cache, page);
	}
#endif
	return ret;
878
}