buffer.c 93.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/fs/buffer.c
 *
 *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
 */

/*
 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
 *
 * Removed a lot of unnecessary code and simplified things now that
 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
 *
 * Speed up hash, lru, and free list operations.  Use gfp() for allocating
 * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
 *
 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
 *
 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
 */

#include <linux/kernel.h>
22
#include <linux/sched/signal.h>
Linus Torvalds's avatar
Linus Torvalds committed
23 24
#include <linux/syscalls.h>
#include <linux/fs.h>
25
#include <linux/iomap.h>
Linus Torvalds's avatar
Linus Torvalds committed
26 27 28
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
29
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
30 31 32 33
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/highmem.h>
34
#include <linux/export.h>
35
#include <linux/backing-dev.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38 39
#include <linux/writeback.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
40
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43 44 45
#include <linux/bio.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
46
#include <linux/bit_spinlock.h>
47
#include <linux/pagevec.h>
48
#include <trace/events/block.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50

static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
51
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
52
			 enum rw_hint hint, struct writeback_control *wbc);
Linus Torvalds's avatar
Linus Torvalds committed
53 54 55

#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)

56
void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
Linus Torvalds's avatar
Linus Torvalds committed
57 58 59 60
{
	bh->b_end_io = handler;
	bh->b_private = private;
}
61
EXPORT_SYMBOL(init_buffer);
Linus Torvalds's avatar
Linus Torvalds committed
62

63 64
inline void touch_buffer(struct buffer_head *bh)
{
65
	trace_block_touch_buffer(bh);
66 67 68 69
	mark_page_accessed(bh->b_page);
}
EXPORT_SYMBOL(touch_buffer);

70
void __lock_buffer(struct buffer_head *bh)
Linus Torvalds's avatar
Linus Torvalds committed
71
{
72
	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
Linus Torvalds's avatar
Linus Torvalds committed
73 74 75
}
EXPORT_SYMBOL(__lock_buffer);

76
void unlock_buffer(struct buffer_head *bh)
Linus Torvalds's avatar
Linus Torvalds committed
77
{
78
	clear_bit_unlock(BH_Lock, &bh->b_state);
79
	smp_mb__after_atomic();
Linus Torvalds's avatar
Linus Torvalds committed
80 81
	wake_up_bit(&bh->b_state, BH_Lock);
}
82
EXPORT_SYMBOL(unlock_buffer);
Linus Torvalds's avatar
Linus Torvalds committed
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/*
 * Returns if the page has dirty or writeback buffers. If all the buffers
 * are unlocked and clean then the PageDirty information is stale. If
 * any of the pages are locked, it is assumed they are locked for IO.
 */
void buffer_check_dirty_writeback(struct page *page,
				     bool *dirty, bool *writeback)
{
	struct buffer_head *head, *bh;
	*dirty = false;
	*writeback = false;

	BUG_ON(!PageLocked(page));

	if (!page_has_buffers(page))
		return;

	if (PageWriteback(page))
		*writeback = true;

	head = page_buffers(page);
	bh = head;
	do {
		if (buffer_locked(bh))
			*writeback = true;

		if (buffer_dirty(bh))
			*dirty = true;

		bh = bh->b_this_page;
	} while (bh != head);
}
EXPORT_SYMBOL(buffer_check_dirty_writeback);

Linus Torvalds's avatar
Linus Torvalds committed
118 119 120 121 122 123 124
/*
 * Block until a buffer comes unlocked.  This doesn't stop it
 * from becoming locked again - you have to lock it yourself
 * if you want to preserve its state.
 */
void __wait_on_buffer(struct buffer_head * bh)
{
125
	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
Linus Torvalds's avatar
Linus Torvalds committed
126
}
127
EXPORT_SYMBOL(__wait_on_buffer);
Linus Torvalds's avatar
Linus Torvalds committed
128 129 130 131 132

static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
133
	set_page_private(page, 0);
134
	put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
135 136
}

137
static void buffer_io_error(struct buffer_head *bh, char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
138
{
139 140
	if (!test_bit(BH_Quiet, &bh->b_state))
		printk_ratelimited(KERN_ERR
141 142
			"Buffer I/O error on dev %pg, logical block %llu%s\n",
			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
Linus Torvalds's avatar
Linus Torvalds committed
143 144 145
}

/*
146 147 148 149 150 151
 * End-of-IO handler helper function which does not touch the bh after
 * unlocking it.
 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
 * a race there is benign: unlock_buffer() only use the bh's address for
 * hashing after unlocking the buffer, so it doesn't actually touch the bh
 * itself.
Linus Torvalds's avatar
Linus Torvalds committed
152
 */
153
static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
Linus Torvalds's avatar
Linus Torvalds committed
154 155 156 157
{
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
158
		/* This happens, due to failed read-ahead attempts. */
Linus Torvalds's avatar
Linus Torvalds committed
159 160 161
		clear_buffer_uptodate(bh);
	}
	unlock_buffer(bh);
162 163 164 165 166 167 168 169 170
}

/*
 * Default synchronous end-of-IO handler..  Just mark it up-to-date and
 * unlock the buffer. This is what ll_rw_block uses too.
 */
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
	__end_buffer_read_notouch(bh, uptodate);
Linus Torvalds's avatar
Linus Torvalds committed
171 172
	put_bh(bh);
}
173
EXPORT_SYMBOL(end_buffer_read_sync);
Linus Torvalds's avatar
Linus Torvalds committed
174 175 176 177 178 179

void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
180
		buffer_io_error(bh, ", lost sync page write");
181
		mark_buffer_write_io_error(bh);
Linus Torvalds's avatar
Linus Torvalds committed
182 183 184 185 186
		clear_buffer_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}
187
EXPORT_SYMBOL(end_buffer_write_sync);
Linus Torvalds's avatar
Linus Torvalds committed
188 189 190 191 192 193 194 195 196 197 198 199 200

/*
 * Various filesystems appear to want __find_get_block to be non-blocking.
 * But it's the page lock which protects the buffers.  To get around this,
 * we get exclusion from try_to_free_buffers with the blockdev mapping's
 * private_lock.
 *
 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
 * may be quite high.  This code could TryLock the page, and if that
 * succeeds, there is no need to take private_lock. (But if
 * private_lock is contended then so is mapping->tree_lock).
 */
static struct buffer_head *
201
__find_get_block_slow(struct block_device *bdev, sector_t block)
Linus Torvalds's avatar
Linus Torvalds committed
202 203 204 205 206 207 208 209 210 211
{
	struct inode *bd_inode = bdev->bd_inode;
	struct address_space *bd_mapping = bd_inode->i_mapping;
	struct buffer_head *ret = NULL;
	pgoff_t index;
	struct buffer_head *bh;
	struct buffer_head *head;
	struct page *page;
	int all_mapped = 1;

212
	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
213
	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
Linus Torvalds's avatar
Linus Torvalds committed
214 215 216 217 218 219 220 221 222
	if (!page)
		goto out;

	spin_lock(&bd_mapping->private_lock);
	if (!page_has_buffers(page))
		goto out_unlock;
	head = page_buffers(page);
	bh = head;
	do {
223 224 225
		if (!buffer_mapped(bh))
			all_mapped = 0;
		else if (bh->b_blocknr == block) {
Linus Torvalds's avatar
Linus Torvalds committed
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
			ret = bh;
			get_bh(bh);
			goto out_unlock;
		}
		bh = bh->b_this_page;
	} while (bh != head);

	/* we might be here because some of the buffers on this page are
	 * not mapped.  This is due to various races between
	 * file io on the block device and getblk.  It gets dealt with
	 * elsewhere, don't buffer_error if we had some unmapped buffers
	 */
	if (all_mapped) {
		printk("__find_get_block_slow() failed. "
			"block=%llu, b_blocknr=%llu\n",
241 242 243 244
			(unsigned long long)block,
			(unsigned long long)bh->b_blocknr);
		printk("b_state=0x%08lx, b_size=%zu\n",
			bh->b_state, bh->b_size);
245
		printk("device %pg blocksize: %d\n", bdev,
246
			1 << bd_inode->i_blkbits);
Linus Torvalds's avatar
Linus Torvalds committed
247 248 249
	}
out_unlock:
	spin_unlock(&bd_mapping->private_lock);
250
	put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
251 252 253 254 255 256 257 258 259 260 261
out:
	return ret;
}

/*
 * I/O completion handler for block_read_full_page() - pages
 * which come unlocked at the end of I/O.
 */
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
	unsigned long flags;
262
	struct buffer_head *first;
Linus Torvalds's avatar
Linus Torvalds committed
263 264 265 266 267 268 269 270 271 272 273
	struct buffer_head *tmp;
	struct page *page;
	int page_uptodate = 1;

	BUG_ON(!buffer_async_read(bh));

	page = bh->b_page;
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
		clear_buffer_uptodate(bh);
274
		buffer_io_error(bh, ", async page read");
Linus Torvalds's avatar
Linus Torvalds committed
275 276 277 278 279 280 281 282
		SetPageError(page);
	}

	/*
	 * Be _very_ careful from here on. Bad things can happen if
	 * two buffer heads end IO at almost the same time and both
	 * decide that the page is now completely done.
	 */
283 284 285
	first = page_buffers(page);
	local_irq_save(flags);
	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
Linus Torvalds's avatar
Linus Torvalds committed
286 287 288 289 290 291 292 293 294 295 296 297
	clear_buffer_async_read(bh);
	unlock_buffer(bh);
	tmp = bh;
	do {
		if (!buffer_uptodate(tmp))
			page_uptodate = 0;
		if (buffer_async_read(tmp)) {
			BUG_ON(!buffer_locked(tmp));
			goto still_busy;
		}
		tmp = tmp->b_this_page;
	} while (tmp != bh);
298 299
	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
300 301 302 303 304 305 306 307 308 309 310

	/*
	 * If none of the buffers had errors and they are all
	 * uptodate then we can set the page uptodate.
	 */
	if (page_uptodate && !PageError(page))
		SetPageUptodate(page);
	unlock_page(page);
	return;

still_busy:
311 312
	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
313 314 315 316 317 318 319
	return;
}

/*
 * Completion handler for block_write_full_page() - pages which are unlocked
 * during I/O, and which have PageWriteback cleared upon I/O completion.
 */
320
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
Linus Torvalds's avatar
Linus Torvalds committed
321 322
{
	unsigned long flags;
323
	struct buffer_head *first;
Linus Torvalds's avatar
Linus Torvalds committed
324 325 326 327 328 329 330 331 332
	struct buffer_head *tmp;
	struct page *page;

	BUG_ON(!buffer_async_write(bh));

	page = bh->b_page;
	if (uptodate) {
		set_buffer_uptodate(bh);
	} else {
333
		buffer_io_error(bh, ", lost async page write");
334
		mark_buffer_write_io_error(bh);
Linus Torvalds's avatar
Linus Torvalds committed
335 336 337 338
		clear_buffer_uptodate(bh);
		SetPageError(page);
	}

339 340 341 342
	first = page_buffers(page);
	local_irq_save(flags);
	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);

Linus Torvalds's avatar
Linus Torvalds committed
343 344 345 346 347 348 349 350 351 352
	clear_buffer_async_write(bh);
	unlock_buffer(bh);
	tmp = bh->b_this_page;
	while (tmp != bh) {
		if (buffer_async_write(tmp)) {
			BUG_ON(!buffer_locked(tmp));
			goto still_busy;
		}
		tmp = tmp->b_this_page;
	}
353 354
	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
355 356 357 358
	end_page_writeback(page);
	return;

still_busy:
359 360
	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
361 362
	return;
}
363
EXPORT_SYMBOL(end_buffer_async_write);
Linus Torvalds's avatar
Linus Torvalds committed
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391

/*
 * If a page's buffers are under async readin (end_buffer_async_read
 * completion) then there is a possibility that another thread of
 * control could lock one of the buffers after it has completed
 * but while some of the other buffers have not completed.  This
 * locked buffer would confuse end_buffer_async_read() into not unlocking
 * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
 * that this buffer is not under async I/O.
 *
 * The page comes unlocked when it has no locked buffer_async buffers
 * left.
 *
 * PageLocked prevents anyone starting new async I/O reads any of
 * the buffers.
 *
 * PageWriteback is used to prevent simultaneous writeout of the same
 * page.
 *
 * PageLocked prevents anyone from starting writeback of a page which is
 * under read I/O (PageWriteback is only ever set against a locked page).
 */
static void mark_buffer_async_read(struct buffer_head *bh)
{
	bh->b_end_io = end_buffer_async_read;
	set_buffer_async_read(bh);
}

392 393
static void mark_buffer_async_write_endio(struct buffer_head *bh,
					  bh_end_io_t *handler)
Linus Torvalds's avatar
Linus Torvalds committed
394
{
395
	bh->b_end_io = handler;
Linus Torvalds's avatar
Linus Torvalds committed
396 397
	set_buffer_async_write(bh);
}
398 399 400 401 402

void mark_buffer_async_write(struct buffer_head *bh)
{
	mark_buffer_async_write_endio(bh, end_buffer_async_write);
}
Linus Torvalds's avatar
Linus Torvalds committed
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
EXPORT_SYMBOL(mark_buffer_async_write);


/*
 * fs/buffer.c contains helper functions for buffer-backed address space's
 * fsync functions.  A common requirement for buffer-based filesystems is
 * that certain data from the backing blockdev needs to be written out for
 * a successful fsync().  For example, ext2 indirect blocks need to be
 * written back and waited upon before fsync() returns.
 *
 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
 * management of a list of dependent buffers at ->i_mapping->private_list.
 *
 * Locking is a little subtle: try_to_free_buffers() will remove buffers
 * from their controlling inode's queue when they are being freed.  But
 * try_to_free_buffers() will be operating against the *blockdev* mapping
 * at the time, not against the S_ISREG file which depends on those buffers.
 * So the locking for private_list is via the private_lock in the address_space
 * which backs the buffers.  Which is different from the address_space 
 * against which the buffers are listed.  So for a particular address_space,
 * mapping->private_lock does *not* protect mapping->private_list!  In fact,
 * mapping->private_list will always be protected by the backing blockdev's
 * ->private_lock.
 *
 * Which introduces a requirement: all buffers on an address_space's
 * ->private_list must be from the same address_space: the blockdev's.
 *
 * address_spaces which do not place buffers at ->private_list via these
 * utility functions are free to use private_lock and private_list for
 * whatever they want.  The only requirement is that list_empty(private_list)
 * be true at clear_inode() time.
 *
 * FIXME: clear_inode should not call invalidate_inode_buffers().  The
 * filesystems should do that.  invalidate_inode_buffers() should just go
 * BUG_ON(!list_empty).
 *
 * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
 * take an address_space, not an inode.  And it should be called
 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
 * queued up.
 *
 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
 * list if it is already on a list.  Because if the buffer is on a list,
 * it *must* already be on the right one.  If not, the filesystem is being
 * silly.  This will save a ton of locking.  But first we have to ensure
 * that buffers are taken *off* the old inode's list when they are freed
 * (presumably in truncate).  That requires careful auditing of all
 * filesystems (do it inside bforget()).  It could also be done by bringing
 * b_inode back.
 */

/*
 * The buffer's backing address_space's private_lock must be held
 */
458
static void __remove_assoc_queue(struct buffer_head *bh)
Linus Torvalds's avatar
Linus Torvalds committed
459 460
{
	list_del_init(&bh->b_assoc_buffers);
461 462
	WARN_ON(!bh->b_assoc_map);
	bh->b_assoc_map = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
}

int inode_has_buffers(struct inode *inode)
{
	return !list_empty(&inode->i_data.private_list);
}

/*
 * osync is designed to support O_SYNC io.  It waits synchronously for
 * all already-submitted IO to complete, but does not queue any new
 * writes to the disk.
 *
 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
 * you dirty the buffers, and then use osync_inode_buffers to wait for
 * completion.  Any other dirty buffers which are not yet queued for
 * write will not be flushed to disk by the osync.
 */
static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
{
	struct buffer_head *bh;
	struct list_head *p;
	int err = 0;

	spin_lock(lock);
repeat:
	list_for_each_prev(p, list) {
		bh = BH_ENTRY(p);
		if (buffer_locked(bh)) {
			get_bh(bh);
			spin_unlock(lock);
			wait_on_buffer(bh);
			if (!buffer_uptodate(bh))
				err = -EIO;
			brelse(bh);
			spin_lock(lock);
			goto repeat;
		}
	}
	spin_unlock(lock);
	return err;
}

Al Viro's avatar
Al Viro committed
505
static void do_thaw_one(struct super_block *sb, void *unused)
506
{
Al Viro's avatar
Al Viro committed
507
	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
508
		printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
Al Viro's avatar
Al Viro committed
509
}
510

Al Viro's avatar
Al Viro committed
511 512 513
static void do_thaw_all(struct work_struct *work)
{
	iterate_supers(do_thaw_one, NULL);
514
	kfree(work);
515 516 517 518 519 520 521 522 523 524
	printk(KERN_WARNING "Emergency Thaw complete\n");
}

/**
 * emergency_thaw_all -- forcibly thaw every frozen filesystem
 *
 * Used for emergency unfreeze of all filesystems via SysRq
 */
void emergency_thaw_all(void)
{
525 526 527 528 529 530 531
	struct work_struct *work;

	work = kmalloc(sizeof(*work), GFP_ATOMIC);
	if (work) {
		INIT_WORK(work, do_thaw_all);
		schedule_work(work);
	}
532 533
}

Linus Torvalds's avatar
Linus Torvalds committed
534
/**
535
 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
536
 * @mapping: the mapping which wants those buffers written
Linus Torvalds's avatar
Linus Torvalds committed
537 538 539 540
 *
 * Starts I/O against the buffers at mapping->private_list, and waits upon
 * that I/O.
 *
541 542 543
 * Basically, this is a convenience function for fsync().
 * @mapping is a file or directory which needs those buffers to be written for
 * a successful fsync().
Linus Torvalds's avatar
Linus Torvalds committed
544 545 546
 */
int sync_mapping_buffers(struct address_space *mapping)
{
547
	struct address_space *buffer_mapping = mapping->private_data;
Linus Torvalds's avatar
Linus Torvalds committed
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568

	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
		return 0;

	return fsync_buffers_list(&buffer_mapping->private_lock,
					&mapping->private_list);
}
EXPORT_SYMBOL(sync_mapping_buffers);

/*
 * Called when we've recently written block `bblock', and it is known that
 * `bblock' was for a buffer_boundary() buffer.  This means that the block at
 * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
 * dirty, schedule it for IO.  So that indirects merge nicely with their data.
 */
void write_boundary_block(struct block_device *bdev,
			sector_t bblock, unsigned blocksize)
{
	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
	if (bh) {
		if (buffer_dirty(bh))
569
			ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
Linus Torvalds's avatar
Linus Torvalds committed
570 571 572 573 574 575 576 577 578 579
		put_bh(bh);
	}
}

void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
	struct address_space *mapping = inode->i_mapping;
	struct address_space *buffer_mapping = bh->b_page->mapping;

	mark_buffer_dirty(bh);
580 581
	if (!mapping->private_data) {
		mapping->private_data = buffer_mapping;
Linus Torvalds's avatar
Linus Torvalds committed
582
	} else {
583
		BUG_ON(mapping->private_data != buffer_mapping);
Linus Torvalds's avatar
Linus Torvalds committed
584
	}
585
	if (!bh->b_assoc_map) {
Linus Torvalds's avatar
Linus Torvalds committed
586 587 588
		spin_lock(&buffer_mapping->private_lock);
		list_move_tail(&bh->b_assoc_buffers,
				&mapping->private_list);
589
		bh->b_assoc_map = mapping;
Linus Torvalds's avatar
Linus Torvalds committed
590 591 592 593 594
		spin_unlock(&buffer_mapping->private_lock);
	}
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);

595 596 597 598 599 600
/*
 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
 * dirty.
 *
 * If warn is true, then emit a warning if the page is not uptodate and has
 * not been truncated.
601
 *
602
 * The caller must hold lock_page_memcg().
603
 */
604
static void __set_page_dirty(struct page *page, struct address_space *mapping,
605
			     int warn)
606
{
607 608 609
	unsigned long flags;

	spin_lock_irqsave(&mapping->tree_lock, flags);
610 611
	if (page->mapping) {	/* Race with truncate? */
		WARN_ON_ONCE(warn && !PageUptodate(page));
612
		account_page_dirtied(page, mapping);
613 614 615
		radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
	}
616
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
617 618
}

Linus Torvalds's avatar
Linus Torvalds committed
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
/*
 * Add a page to the dirty page list.
 *
 * It is a sad fact of life that this function is called from several places
 * deeply under spinlocking.  It may not sleep.
 *
 * If the page has buffers, the uptodate buffers are set dirty, to preserve
 * dirty-state coherency between the page and the buffers.  It the page does
 * not have buffers then when they are later attached they will all be set
 * dirty.
 *
 * The buffers are dirtied before the page is dirtied.  There's a small race
 * window in which a writepage caller may see the page cleanness but not the
 * buffer dirtiness.  That's fine.  If this code were to set the page dirty
 * before the buffers, a concurrent writepage caller could clear the page dirty
 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
 * page on the dirty page list.
 *
 * We use private_lock to lock against try_to_free_buffers while using the
 * page's buffer list.  Also use this to protect against clean buffers being
 * added to the page after it was set dirty.
 *
 * FIXME: may need to call ->reservepage here as well.  That's rather up to the
 * address_space though.
 */
int __set_page_dirty_buffers(struct page *page)
{
646
	int newly_dirty;
647
	struct address_space *mapping = page_mapping(page);
648 649 650

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);
Linus Torvalds's avatar
Linus Torvalds committed
651 652 653 654 655 656 657 658 659 660 661

	spin_lock(&mapping->private_lock);
	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
		struct buffer_head *bh = head;

		do {
			set_buffer_dirty(bh);
			bh = bh->b_this_page;
		} while (bh != head);
	}
662
	/*
663 664
	 * Lock out page->mem_cgroup migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
665
	 */
666
	lock_page_memcg(page);
667
	newly_dirty = !TestSetPageDirty(page);
Linus Torvalds's avatar
Linus Torvalds committed
668 669
	spin_unlock(&mapping->private_lock);

670
	if (newly_dirty)
671
		__set_page_dirty(page, mapping, 1);
672

673
	unlock_page_memcg(page);
674 675 676 677

	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);

678
	return newly_dirty;
Linus Torvalds's avatar
Linus Torvalds committed
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
}
EXPORT_SYMBOL(__set_page_dirty_buffers);

/*
 * Write out and wait upon a list of buffers.
 *
 * We have conflicting pressures: we want to make sure that all
 * initially dirty buffers get waited on, but that any subsequently
 * dirtied buffers don't.  After all, we don't want fsync to last
 * forever if somebody is actively writing to the file.
 *
 * Do this in two main stages: first we copy dirty buffers to a
 * temporary inode list, queueing the writes as we go.  Then we clean
 * up, waiting for those writes to complete.
 * 
 * During this second stage, any subsequent updates to the file may end
 * up refiling the buffer on the original inode's dirty list again, so
 * there is a chance we will end up with a buffer queued for write but
 * not yet completed on that list.  So, as a final cleanup we go through
 * the osync code to catch these locked, dirty buffers without requeuing
 * any newly dirty buffers for write.
 */
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
	struct buffer_head *bh;
	struct list_head tmp;
705
	struct address_space *mapping;
Linus Torvalds's avatar
Linus Torvalds committed
706
	int err = 0, err2;
707
	struct blk_plug plug;
Linus Torvalds's avatar
Linus Torvalds committed
708 709

	INIT_LIST_HEAD(&tmp);
710
	blk_start_plug(&plug);
Linus Torvalds's avatar
Linus Torvalds committed
711 712 713 714

	spin_lock(lock);
	while (!list_empty(list)) {
		bh = BH_ENTRY(list->next);
715
		mapping = bh->b_assoc_map;
716
		__remove_assoc_queue(bh);
717 718 719
		/* Avoid race with mark_buffer_dirty_inode() which does
		 * a lockless check and we rely on seeing the dirty bit */
		smp_mb();
Linus Torvalds's avatar
Linus Torvalds committed
720 721
		if (buffer_dirty(bh) || buffer_locked(bh)) {
			list_add(&bh->b_assoc_buffers, &tmp);
722
			bh->b_assoc_map = mapping;
Linus Torvalds's avatar
Linus Torvalds committed
723 724 725 726 727
			if (buffer_dirty(bh)) {
				get_bh(bh);
				spin_unlock(lock);
				/*
				 * Ensure any pending I/O completes so that
728 729 730 731
				 * write_dirty_buffer() actually writes the
				 * current contents - it is a noop if I/O is
				 * still in flight on potentially older
				 * contents.
Linus Torvalds's avatar
Linus Torvalds committed
732
				 */
733
				write_dirty_buffer(bh, REQ_SYNC);
734 735 736 737 738 739 740

				/*
				 * Kick off IO for the previous mapping. Note
				 * that we will not run the very last mapping,
				 * wait_on_buffer() will do that for us
				 * through sync_buffer().
				 */
Linus Torvalds's avatar
Linus Torvalds committed
741 742 743 744 745 746
				brelse(bh);
				spin_lock(lock);
			}
		}
	}

747 748 749 750
	spin_unlock(lock);
	blk_finish_plug(&plug);
	spin_lock(lock);

Linus Torvalds's avatar
Linus Torvalds committed
751 752 753
	while (!list_empty(&tmp)) {
		bh = BH_ENTRY(tmp.prev);
		get_bh(bh);
754 755 756 757 758 759 760
		mapping = bh->b_assoc_map;
		__remove_assoc_queue(bh);
		/* Avoid race with mark_buffer_dirty_inode() which does
		 * a lockless check and we rely on seeing the dirty bit */
		smp_mb();
		if (buffer_dirty(bh)) {
			list_add(&bh->b_assoc_buffers,
761
				 &mapping->private_list);
762 763
			bh->b_assoc_map = mapping;
		}
Linus Torvalds's avatar
Linus Torvalds committed
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
		spin_unlock(lock);
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh))
			err = -EIO;
		brelse(bh);
		spin_lock(lock);
	}
	
	spin_unlock(lock);
	err2 = osync_buffers_list(lock, list);
	if (err)
		return err;
	else
		return err2;
}

/*
 * Invalidate any and all dirty buffers on a given inode.  We are
 * probably unmounting the fs, but that doesn't mean we have already
 * done a sync().  Just drop the buffers from the inode list.
 *
 * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
 * assumes that all the buffers are against the blockdev.  Not true
 * for reiserfs.
 */
void invalidate_inode_buffers(struct inode *inode)
{
	if (inode_has_buffers(inode)) {
		struct address_space *mapping = &inode->i_data;
		struct list_head *list = &mapping->private_list;
794
		struct address_space *buffer_mapping = mapping->private_data;
Linus Torvalds's avatar
Linus Torvalds committed
795 796 797 798 799 800 801

		spin_lock(&buffer_mapping->private_lock);
		while (!list_empty(list))
			__remove_assoc_queue(BH_ENTRY(list->next));
		spin_unlock(&buffer_mapping->private_lock);
	}
}
802
EXPORT_SYMBOL(invalidate_inode_buffers);
Linus Torvalds's avatar
Linus Torvalds committed
803 804 805 806 807 808 809 810 811 812 813 814 815 816

/*
 * Remove any clean buffers from the inode's buffer list.  This is called
 * when we're trying to free the inode itself.  Those buffers can pin it.
 *
 * Returns true if all buffers were removed.
 */
int remove_inode_buffers(struct inode *inode)
{
	int ret = 1;

	if (inode_has_buffers(inode)) {
		struct address_space *mapping = &inode->i_data;
		struct list_head *list = &mapping->private_list;
817
		struct address_space *buffer_mapping = mapping->private_data;
Linus Torvalds's avatar
Linus Torvalds committed
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842

		spin_lock(&buffer_mapping->private_lock);
		while (!list_empty(list)) {
			struct buffer_head *bh = BH_ENTRY(list->next);
			if (buffer_dirty(bh)) {
				ret = 0;
				break;
			}
			__remove_assoc_queue(bh);
		}
		spin_unlock(&buffer_mapping->private_lock);
	}
	return ret;
}

/*
 * Create the appropriate buffers when given a page for data area and
 * the size of each buffer.. Use the bh->b_this_page linked list to
 * follow the buffers created.  Return NULL if unable to create more
 * buffers.
 *
 * The retry flag is used to differentiate async IO (paging, swapping)
 * which may not fail from ordinary buffer allocations.
 */
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
843
		bool retry)
Linus Torvalds's avatar
Linus Torvalds committed
844 845
{
	struct buffer_head *bh, *head;
846
	gfp_t gfp = GFP_NOFS;
Linus Torvalds's avatar
Linus Torvalds committed
847 848
	long offset;

849 850 851
	if (retry)
		gfp |= __GFP_NOFAIL;

Linus Torvalds's avatar
Linus Torvalds committed
852 853 854
	head = NULL;
	offset = PAGE_SIZE;
	while ((offset -= size) >= 0) {
855
		bh = alloc_buffer_head(gfp);
Linus Torvalds's avatar
Linus Torvalds committed
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
		if (!bh)
			goto no_grow;

		bh->b_this_page = head;
		bh->b_blocknr = -1;
		head = bh;

		bh->b_size = size;

		/* Link the buffer to its page */
		set_bh_page(bh, page, offset);
	}
	return head;
/*
 * In case anything failed, we just free everything we got.
 */
no_grow:
	if (head) {
		do {
			bh = head;
			head = head->b_this_page;
			free_buffer_head(bh);
		} while (head);
	}

881
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
}
EXPORT_SYMBOL_GPL(alloc_page_buffers);

static inline void
link_dev_buffers(struct page *page, struct buffer_head *head)
{
	struct buffer_head *bh, *tail;

	bh = head;
	do {
		tail = bh;
		bh = bh->b_this_page;
	} while (bh);
	tail->b_this_page = head;
	attach_page_buffers(page, head);
}

899 900 901 902 903 904 905 906 907 908 909 910
static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
{
	sector_t retval = ~((sector_t)0);
	loff_t sz = i_size_read(bdev->bd_inode);

	if (sz) {
		unsigned int sizebits = blksize_bits(size);
		retval = (sz >> sizebits);
	}
	return retval;
}

Linus Torvalds's avatar
Linus Torvalds committed
911 912 913
/*
 * Initialise the state of a blockdev page's buffers.
 */ 
914
static sector_t
Linus Torvalds's avatar
Linus Torvalds committed
915 916 917 918 919 920
init_page_buffers(struct page *page, struct block_device *bdev,
			sector_t block, int size)
{
	struct buffer_head *head = page_buffers(page);
	struct buffer_head *bh = head;
	int uptodate = PageUptodate(page);
921
	sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
Linus Torvalds's avatar
Linus Torvalds committed
922 923 924 925 926 927 928 929

	do {
		if (!buffer_mapped(bh)) {
			init_buffer(bh, NULL, NULL);
			bh->b_bdev = bdev;
			bh->b_blocknr = block;
			if (uptodate)
				set_buffer_uptodate(bh);
930 931
			if (block < end_block)
				set_buffer_mapped(bh);
Linus Torvalds's avatar
Linus Torvalds committed
932 933 934 935
		}
		block++;
		bh = bh->b_this_page;
	} while (bh != head);
936 937 938 939 940

	/*
	 * Caller needs to validate requested block against end of device.
	 */
	return end_block;
Linus Torvalds's avatar
Linus Torvalds committed
941 942 943 944 945
}

/*
 * Create the page-cache page that contains the requested block.
 *
946
 * This is used purely for blockdev mappings.
Linus Torvalds's avatar
Linus Torvalds committed
947
 */
948
static int
Linus Torvalds's avatar
Linus Torvalds committed
949
grow_dev_page(struct block_device *bdev, sector_t block,
950
	      pgoff_t index, int size, int sizebits, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
951 952 953 954
{
	struct inode *inode = bdev->bd_inode;
	struct page *page;
	struct buffer_head *bh;
955 956
	sector_t end_block;
	int ret = 0;		/* Will call free_more_memory() */
957
	gfp_t gfp_mask;
Linus Torvalds's avatar
Linus Torvalds committed
958

959
	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
960

961 962 963 964 965 966 967 968 969
	/*
	 * XXX: __getblk_slow() can not really deal with failure and
	 * will endlessly loop on improvised global reclaim.  Prefer
	 * looping in the allocator rather than here, at least that
	 * code knows what it's doing.
	 */
	gfp_mask |= __GFP_NOFAIL;

	page = find_or_create_page(inode->i_mapping, index, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
970

971
	BUG_ON(!PageLocked(page));
Linus Torvalds's avatar
Linus Torvalds committed
972 973 974 975

	if (page_has_buffers(page)) {
		bh = page_buffers(page);
		if (bh->b_size == size) {
976
			end_block = init_page_buffers(page, bdev,
977 978
						(sector_t)index << sizebits,
						size);
979
			goto done;
Linus Torvalds's avatar
Linus Torvalds committed
980 981 982 983 984 985 986 987
		}
		if (!try_to_free_buffers(page))
			goto failed;
	}

	/*
	 * Allocate some buffers for this page
	 */
988
	bh = alloc_page_buffers(page, size, true);
Linus Torvalds's avatar
Linus Torvalds committed
989 990 991 992 993 994 995 996

	/*
	 * Link the page to the buffers and initialise them.  Take the
	 * lock to be atomic wrt __find_get_block(), which does not
	 * run under the page lock.
	 */
	spin_lock(&inode->i_mapping->private_lock);
	link_dev_buffers(page, bh);
997 998
	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
			size);
Linus Torvalds's avatar
Linus Torvalds committed
999
	spin_unlock(&inode->i_mapping->private_lock);
1000 1001
done:
	ret = (block < end_block) ? 1 : -ENXIO;
Linus Torvalds's avatar
Linus Torvalds committed
1002 1003
failed:
	unlock_page(page);
1004
	put_page(page);
1005
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
1006 1007 1008 1009 1010 1011
}

/*
 * Create buffers for the specified block device block's page.  If
 * that page was dirty, the buffers are set dirty also.
 */
1012
static int
1013
grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
{
	pgoff_t index;
	int sizebits;

	sizebits = -1;
	do {
		sizebits++;
	} while ((size << sizebits) < PAGE_SIZE);

	index = block >> sizebits;

1025 1026 1027 1028 1029 1030
	/*
	 * Check for a block which wants to lie outside our maximum possible
	 * pagecache index.  (this comparison is done using sector_t types).
	 */
	if (unlikely(index != block >> sizebits)) {
		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1031
			"device %pg\n",
1032
			__func__, (unsigned long long)block,
1033
			bdev);
1034 1035
		return -EIO;
	}
1036

Linus Torvalds's avatar
Linus Torvalds committed
1037
	/* Create a page with the proper size buffers.. */
1038
	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
Linus Torvalds's avatar
Linus Torvalds committed
1039 1040
}

1041
static struct buffer_head *
1042 1043
__getblk_slow(struct block_device *bdev, sector_t block,
	     unsigned size, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
1044 1045
{
	/* Size must be multiple of hard sectorsize */
1046
	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
Linus Torvalds's avatar
Linus Torvalds committed
1047 1048 1049
			(size < 512 || size > PAGE_SIZE))) {
		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
					size);
1050 1051
		printk(KERN_ERR "logical block size: %d\n",
					bdev_logical_block_size(bdev));
Linus Torvalds's avatar
Linus Torvalds committed
1052 1053 1054 1055 1056

		dump_stack();
		return NULL;
	}

1057 1058 1059
	for (;;) {
		struct buffer_head *bh;
		int ret;
Linus Torvalds's avatar
Linus Torvalds committed
1060 1061 1062 1063

		bh = __find_get_block(bdev, block, size);
		if (bh)
			return bh;
1064

1065
		ret = grow_buffers(bdev, block, size, gfp);
1066 1067
		if (ret < 0)
			return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	}
}

/*
 * The relationship between dirty buffers and dirty pages:
 *
 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
 * the page is tagged dirty in its radix tree.
 *
 * At all times, the dirtiness of the buffers represents the dirtiness of
 * subsections of the page.  If the page has buffers, the page dirty bit is
 * merely a hint about the true dirty state.
 *
 * When a page is set dirty in its entirety, all its buffers are marked dirty
 * (if the page has buffers).
 *
 * When a buffer is marked dirty, its page is dirtied, but the page's other
 * buffers are not.
 *
 * Also.  When blockdev buffers are explicitly read with bread(), they
 * individually become uptodate.  But their backing page remains not
 * uptodate - even if all of its buffers are uptodate.  A subsequent
 * block_read_full_page() against that page will discover all the uptodate
 * buffers, will set the page uptodate and will perform no I/O.
 */

/**
 * mark_buffer_dirty - mark a buffer_head as needing writeout
1096
 * @bh: the buffer_head to mark dirty
Linus Torvalds's avatar
Linus Torvalds committed
1097 1098 1099 1100 1101 1102 1103
 *
 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
 * backing page dirty, then tag the page as dirty in its address_space's radix
 * tree and then attach the address_space's inode to its superblock's dirty
 * inode list.
 *
 * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1104
 * mapping->tree_lock and mapping->host->i_lock.
Linus Torvalds's avatar
Linus Torvalds committed
1105
 */
1106
void mark_buffer_dirty(struct buffer_head *bh)
Linus Torvalds's avatar
Linus Torvalds committed
1107
{
1108
	WARN_ON_ONCE(!buffer_uptodate(bh));
1109

1110 1111
	trace_block_dirty_buffer(bh);

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
	/*
	 * Very *carefully* optimize the it-is-already-dirty case.
	 *
	 * Don't let the final "is it dirty" escape to before we
	 * perhaps modified the buffer.
	 */
	if (buffer_dirty(bh)) {
		smp_mb();
		if (buffer_dirty(bh))
			return;
	}

1124 1125
	if (!test_set_buffer_dirty(bh)) {
		struct page *page = bh->b_page;
1126 1127
		struct address_space *mapping = NULL;

1128
		lock_page_memcg(page);
1129
		if (!TestSetPageDirty(page)) {
1130
			mapping = page_mapping(page);
1131
			if (mapping)
1132
				__set_page_dirty(page, mapping, 0);
1133
		}
1134
		unlock_page_memcg(page);
1135 1136
		if (mapping)
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1137
	}
Linus Torvalds's avatar
Linus Torvalds committed
1138
}
1139
EXPORT_SYMBOL(mark_buffer_dirty);
Linus Torvalds's avatar
Linus Torvalds committed
1140

1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
void mark_buffer_write_io_error(struct buffer_head *bh)
{
	set_buffer_write_io_error(bh);
	/* FIXME: do we need to set this in both places? */
	if (bh->b_page && bh->b_page->mapping)
		mapping_set_error(bh->b_page->mapping, -EIO);
	if (bh->b_assoc_map)
		mapping_set_error(bh->b_assoc_map, -EIO);
}
EXPORT_SYMBOL(mark_buffer_write_io_error);

Linus Torvalds's avatar
Linus Torvalds committed
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
/*
 * Decrement a buffer_head's reference count.  If all buffers against a page
 * have zero reference count, are clean and unlocked, and if the page is clean
 * and unlocked then try_to_free_buffers() may strip the buffers from the page
 * in preparation for freeing it (sometimes, rarely, buffers are removed from
 * a page but it ends up not being freed, and buffers may later be reattached).
 */
void __brelse(struct buffer_head * buf)
{
	if (atomic_read(&buf->b_count)) {
		put_bh(buf);
		return;
	}
Arjan van de Ven's avatar
Arjan van de Ven committed
1165
	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
Linus Torvalds's avatar
Linus Torvalds committed
1166
}
1167
EXPORT_SYMBOL(__brelse);
Linus Torvalds's avatar
Linus Torvalds committed
1168 1169 1170 1171 1172 1173 1174 1175

/*
 * bforget() is like brelse(), except it discards any
 * potentially dirty data.
 */
void __bforget(struct buffer_head *bh)
{
	clear_buffer_dirty(bh);
1176
	if (bh->b_assoc_map) {
Linus Torvalds's avatar
Linus Torvalds committed
1177 1178 1179 1180
		struct address_space *buffer_mapping = bh->b_page->mapping;

		spin_lock(&buffer_mapping->private_lock);
		list_del_init(&bh->b_assoc_buffers);
1181
		bh->b_assoc_map = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1182 1183 1184 1185
		spin_unlock(&buffer_mapping->private_lock);
	}
	__brelse(bh);
}
1186
EXPORT_SYMBOL(__bforget);
Linus Torvalds's avatar
Linus Torvalds committed
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196

static struct buffer_head *__bread_slow(struct buffer_head *bh)
{
	lock_buffer(bh);
	if (buffer_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	} else {
		get_bh(bh);
		bh->b_end_io = end_buffer_read_sync;
1197
		submit_bh(REQ_OP_READ, 0, bh);
Linus Torvalds's avatar
Linus Torvalds committed
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
		wait_on_buffer(bh);
		if (buffer_uptodate(bh))
			return bh;
	}
	brelse(bh);
	return NULL;
}

/*
 * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
 * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
 * refcount elevated by one when they're in an LRU.  A buffer can only appear
 * once in a particular CPU's LRU.  A single buffer can be present in multiple
 * CPU's LRUs at the same time.
 *
 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
 * sb_find_get_block().
 *
 * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
 * a local interrupt disable for that.
 */

1220
#define BH_LRU_SIZE	16
Linus Torvalds's avatar
Linus Torvalds committed
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243

struct bh_lru {
	struct buffer_head *bhs[BH_LRU_SIZE];
};

static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};

#ifdef CONFIG_SMP
#define bh_lru_lock()	local_irq_disable()
#define bh_lru_unlock()	local_irq_enable()
#else
#define bh_lru_lock()	preempt_disable()
#define bh_lru_unlock()	preempt_enable()
#endif

static inline void check_irqs_on(void)
{
#ifdef irqs_disabled
	BUG_ON(irqs_disabled());
#endif
}

/*
1244 1245 1246
 * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
 * inserted at the front, and the buffer_head at the back if any is evicted.
 * Or, if already in the LRU it is moved to the front.
Linus Torvalds's avatar
Linus Torvalds committed
1247 1248 1249
 */
static void bh_lru_install(struct buffer_head *bh)
{
1250 1251 1252
	struct buffer_head *evictee = bh;
	struct bh_lru *b;
	int i;
Linus Torvalds's avatar
Linus Torvalds committed
1253 1254 1255 1256

	check_irqs_on();
	bh_lru_lock();

1257 1258 1259 1260 1261 1262
	b = this_cpu_ptr(&bh_lrus);
	for (i = 0; i < BH_LRU_SIZE; i++) {
		swap(evictee, b->bhs[i]);
		if (evictee == bh) {
			bh_lru_unlock();
			return;
Linus Torvalds's avatar
Linus Torvalds committed
1263 1264 1265
		}
	}

1266 1267 1268
	get_bh(bh);
	bh_lru_unlock();
	brelse(evictee);
Linus Torvalds's avatar
Linus Torvalds committed
1269 1270 1271 1272 1273
}

/*
 * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
 */
1274
static struct buffer_head *
1275
lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
1276 1277
{
	struct buffer_head *ret = NULL;
1278
	unsigned int i;
Linus Torvalds's avatar
Linus Torvalds committed
1279 1280 1281 1282

	check_irqs_on();
	bh_lru_lock();
	for (i = 0; i < BH_LRU_SIZE; i++) {
1283
		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
Linus Torvalds's avatar
Linus Torvalds committed
1284

1285 1286
		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
		    bh->b_size == size) {
Linus Torvalds's avatar
Linus Torvalds committed
1287 1288
			if (i) {
				while (i) {
1289 1290
					__this_cpu_write(bh_lrus.bhs[i],
						__this_cpu_read(bh_lrus.bhs[i - 1]));
Linus Torvalds's avatar
Linus Torvalds committed
1291 1292
					i--;
				}
1293
				__this_cpu_write(bh_lrus.bhs[0], bh);
Linus Torvalds's avatar
Linus Torvalds committed
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
			}
			get_bh(bh);
			ret = bh;
			break;
		}
	}
	bh_lru_unlock();
	return ret;
}

/*
 * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
 * it in the LRU and mark it as accessed.  If it is not present then return
 * NULL
 */
struct buffer_head *
1310
__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
1311 1312 1313 1314
{
	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);

	if (bh == NULL) {
1315
		/* __find_get_block_slow will mark the page accessed */
1316
		bh = __find_get_block_slow(bdev, block);
Linus Torvalds's avatar
Linus Torvalds committed
1317 1318
		if (bh)
			bh_lru_install(bh);
1319
	} else
Linus Torvalds's avatar
Linus Torvalds committed
1320
		touch_buffer(bh);
1321

Linus Torvalds's avatar
Linus Torvalds committed
1322 1323 1324 1325 1326
	return bh;
}
EXPORT_SYMBOL(__find_get_block);

/*
1327
 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
Linus Torvalds's avatar
Linus Torvalds committed
1328 1329 1330
 * which corresponds to the passed block_device, block and size. The
 * returned buffer has its reference count incremented.
 *
1331 1332
 * __getblk_gfp() will lock up the machine if grow_dev_page's
 * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
Linus Torvalds's avatar
Linus Torvalds committed
1333 1334
 */
struct buffer_head *
1335 1336
__getblk_gfp(struct block_device *bdev, sector_t block,
	     unsigned size, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
1337 1338 1339 1340 1341
{
	struct buffer_head *bh = __find_get_block(bdev, block, size);

	might_sleep();
	if (bh == NULL)
1342
		bh = __getblk_slow(bdev, block, size, gfp);
Linus Torvalds's avatar
Linus Torvalds committed
1343 1344
	return bh;
}
1345
EXPORT_SYMBOL(__getblk_gfp);
Linus Torvalds's avatar
Linus Torvalds committed
1346 1347 1348 1349

/*
 * Do async read-ahead on a buffer..
 */
1350
void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
Linus Torvalds's avatar
Linus Torvalds committed
1351 1352
{
	struct buffer_head *bh = __getblk(bdev, block, size);
Andrew Morton's avatar
Andrew Morton committed
1353
	if (likely(bh)) {
1354
		ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
Andrew Morton's avatar
Andrew Morton committed
1355 1356
		brelse(bh);
	}
Linus Torvalds's avatar
Linus Torvalds committed
1357 1358 1359 1360
}
EXPORT_SYMBOL(__breadahead);

/**
1361
 *  __bread_gfp() - reads a specified block and returns the bh
1362
 *  @bdev: the block_device to read from
Linus Torvalds's avatar
Linus Torvalds committed
1363 1364
 *  @block: number of block
 *  @size: size (in bytes) to read
1365 1366
 *  @gfp: page allocation flag
 *
Linus Torvalds's avatar
Linus Torvalds committed
1367
 *  Reads a specified block, and returns buffer head that contains it.
1368 1369
 *  The page cache can be allocated from non-movable area
 *  not to prevent page migration if you set gfp to zero.
Linus Torvalds's avatar
Linus Torvalds committed
1370 1371 1372
 *  It returns NULL if the block was unreadable.
 */
struct buffer_head *
1373 1374
__bread_gfp(struct block_device *bdev, sector_t block,
		   unsigned size, gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
1375
{
1376
	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
Linus Torvalds's avatar
Linus Torvalds committed
1377

Andrew Morton's avatar
Andrew Morton committed
1378
	if (likely(bh) && !buffer_uptodate(bh))
Linus Torvalds's avatar
Linus Torvalds committed
1379 1380 1381
		bh = __bread_slow(bh);
	return bh;
}
1382
EXPORT_SYMBOL(__bread_gfp);
Linus Torvalds's avatar
Linus Torvalds committed
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399

/*
 * invalidate_bh_lrus() is called rarely - but not only at unmount.
 * This doesn't race because it runs in each cpu either in irq
 * or with preempt disabled.
 */
static void invalidate_bh_lru(void *arg)
{
	struct bh_lru *b = &get_cpu_var(bh_lrus);
	int i;

	for (i = 0; i < BH_LRU_SIZE; i++) {
		brelse(b->bhs[i]);
		b->bhs[i] = NULL;
	}
	put_cpu_var(bh_lrus);
}
1400 1401 1402 1403 1404

static bool has_bh_in_lru(int cpu, void *dummy)
{
	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
	int i;
Linus Torvalds's avatar
Linus Torvalds committed
1405
	
1406 1407 1408 1409 1410 1411 1412 1413
	for (i = 0; i < BH_LRU_SIZE; i++) {
		if (b->bhs[i])
			return 1;
	}

	return 0;
}

1414
void invalidate_bh_lrus(void)
Linus Torvalds's avatar
Linus Torvalds committed
1415
{
1416
	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
1417
}
Nick Piggin's avatar
Nick Piggin committed
1418
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
Linus Torvalds's avatar
Linus Torvalds committed
1419 1420 1421 1422 1423

void set_bh_page(struct buffer_head *bh,
		struct page *page, unsigned long offset)
{
	bh->b_page = page;
1424
	BUG_ON(offset >= PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
	if (PageHighMem(page))
		/*
		 * This catches illegal uses and preserves the offset:
		 */
		bh->b_data = (char *)(0 + offset);
	else
		bh->b_data = page_address(page) + offset;
}
EXPORT_SYMBOL(set_bh_page);

/*
 * Called when truncating a buffer on a page completely.
 */
1438 1439 1440 1441 1442 1443

/* Bits that are cleared during an invalidate */
#define BUFFER_FLAGS_DISCARD \
	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
	 1 << BH_Delay | 1 << BH_Unwritten)

1444
static void discard_buffer(struct buffer_head * bh)
Linus Torvalds's avatar
Linus Torvalds committed
1445
{
1446 1447
	unsigned long b_state, b_state_old;

Linus Torvalds's avatar
Linus Torvalds committed
1448 1449 1450
	lock_buffer(bh);
	clear_buffer_dirty(bh);
	bh->b_bdev = NULL;
1451 1452 1453 1454 1455 1456 1457 1458
	b_state = bh->b_state;
	for (;;) {
		b_state_old = cmpxchg(&bh->b_state, b_state,
				      (b_state & ~BUFFER_FLAGS_DISCARD));
		if (b_state_old == b_state)
			break;
		b_state = b_state_old;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1459 1460 1461 1462
	unlock_buffer(bh);
}

/**
1463
 * block_invalidatepage - invalidate part or all of a buffer-backed page
Linus Torvalds's avatar
Linus Torvalds committed
1464 1465
 *
 * @page: the page which is affected
1466 1467
 * @offset: start of the range to invalidate
 * @length: length of the range to invalidate
Linus Torvalds's avatar
Linus Torvalds committed
1468 1469
 *
 * block_invalidatepage() is called when all or part of the page has become
1470
 * invalidated by a truncate operation.
Linus Torvalds's avatar
Linus Torvalds committed
1471 1472 1473 1474 1475 1476 1477
 *
 * block_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
1478 1479
void block_invalidatepage(struct page *page, unsigned int offset,
			  unsigned int length)
Linus Torvalds's avatar
Linus Torvalds committed
1480 1481 1482
{
	struct buffer_head *head, *bh, *next;
	unsigned int curr_off = 0;
1483
	unsigned int stop = length + offset;
Linus Torvalds's avatar
Linus Torvalds committed
1484 1485 1486 1487 1488

	BUG_ON(!PageLocked(page));
	if (!page_has_buffers(page))
		goto out;

1489 1490 1491
	/*
	 * Check for overflow
	 */
1492
	BUG_ON(stop > PAGE_SIZE || stop < length);
1493

Linus Torvalds's avatar
Linus Torvalds committed
1494 1495 1496 1497 1498 1499
	head = page_buffers(page);
	bh = head;
	do {
		unsigned int next_off = curr_off + bh->b_size;
		next = bh->b_this_page;

1500 1501 1502 1503 1504 1505
		/*
		 * Are we still fully in range ?
		 */
		if (next_off > stop)
			goto out;

Linus Torvalds's avatar
Linus Torvalds committed
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
		/*
		 * is this block fully invalidated?
		 */
		if (offset <= curr_off)
			discard_buffer(bh);
		curr_off = next_off;
		bh = next;
	} while (bh != head);

	/*
	 * We release buffers only if the entire page is being invalidated.
	 * The get_block cached value has been unconditionally invalidated,
	 * so real IO is not possible anymore.
	 */
	if (offset == 0)
1521
		try_to_release_page(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1522
out:
1523
	return;
Linus Torvalds's avatar
Linus Torvalds committed
1524 1525 1526
}
EXPORT_SYMBOL(block_invalidatepage);

1527

Linus Torvalds's avatar
Linus Torvalds committed
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
/*
 * We attach and possibly dirty the buffers atomically wrt
 * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
 * is already excluded via the page lock.
 */
void create_empty_buffers(struct page *page,
			unsigned long blocksize, unsigned long b_state)
{
	struct buffer_head *bh, *head, *tail;

1538
	head = alloc_page_buffers(page, blocksize, true);
Linus Torvalds's avatar
Linus Torvalds committed
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	bh = head;
	do {
		bh->b_state |= b_state;
		tail = bh;
		bh = bh->b_this_page;
	} while (bh);
	tail->b_this_page = head;

	spin_lock(&page->mapping->private_lock);
	if (PageUptodate(page) || PageDirty(page)) {
		bh = head;
		do {
			if (PageDirty(page))
				set_buffer_dirty(bh);
			if (PageUptodate(page))
				set_buffer_uptodate(bh);
			bh = bh->b_this_page;
		} while (bh != head);
	}
	attach_page_buffers(page, head);
	spin_unlock(&page->mapping->private_lock);
}
EXPORT_SYMBOL(create_empty_buffers);

1563 1564 1565 1566 1567
/**
 * clean_bdev_aliases: clean a range of buffers in block device
 * @bdev: Block device to clean buffers in
 * @block: Start of a range of blocks to clean
 * @len: Number of blocks to clean
Linus Torvalds's avatar
Linus Torvalds committed
1568
 *
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
 * We are taking a range of blocks for data and we don't want writeback of any
 * buffer-cache aliases starting from return from this function and until the
 * moment when something will explicitly mark the buffer dirty (hopefully that
 * will not happen until we will free that block ;-) We don't even need to mark
 * it not-uptodate - nobody can expect anything from a newly allocated buffer
 * anyway. We used to use unmap_buffer() for such invalidation, but that was
 * wrong. We definitely don't want to mark the alias unmapped, for example - it
 * would confuse anyone who might pick it with bread() afterwards...
 *
 * Also..  Note that bforget() doesn't lock the buffer.  So there can be
 * writeout I/O going on against recently-freed buffers.  We don't wait on that
 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
 * need to.  That happens here.
Linus Torvalds's avatar
Linus Torvalds committed
1582
 */
1583
void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
Linus Torvalds's avatar
Linus Torvalds committed
1584
{
1585 1586 1587 1588 1589
	struct inode *bd_inode = bdev->bd_inode;
	struct address_space *bd_mapping = bd_inode->i_mapping;
	struct pagevec pvec;
	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
	pgoff_t end;
1590
	int i, count;
1591 1592
	struct buffer_head *bh;
	struct buffer_head *head;
Linus Torvalds's avatar
Linus Torvalds committed
1593

1594
	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1595
	pagevec_init(&pvec);
1596
	while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
1597 1598
		count = pagevec_count(&pvec);
		for (i = 0; i < count; i++) {
1599
			struct page *page = pvec.pages[i];
Linus Torvalds's avatar
Linus Torvalds committed
1600

1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
			if (!page_has_buffers(page))
				continue;
			/*
			 * We use page lock instead of bd_mapping->private_lock
			 * to pin buffers here since we can afford to sleep and
			 * it scales better than a global spinlock lock.
			 */
			lock_page(page);
			/* Recheck when the page is locked which pins bhs */
			if (!page_has_buffers(page))
				goto unlock_page;
			head = page_buffers(page);
			bh = head;
			do {
1615
				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
					goto next;
				if (bh->b_blocknr >= block + len)
					break;
				clear_buffer_dirty(bh);
				wait_on_buffer(bh);
				clear_buffer_req(bh);
next:
				bh = bh->b_this_page;
			} while (bh != head);
unlock_page:
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
1630 1631 1632
		/* End of range already reached? */
		if (index > end || !index)
			break;
Linus Torvalds's avatar
Linus Torvalds committed
1633 1634
	}
}
1635
EXPORT_SYMBOL(clean_bdev_aliases);
Linus Torvalds's avatar
Linus Torvalds committed
1636

1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
/*
 * Size is a power-of-two in the range 512..PAGE_SIZE,
 * and the case we care about most is PAGE_SIZE.
 *
 * So this *could* possibly be written with those
 * constraints in mind (relevant mostly if some
 * architecture has a slow bit-scan instruction)
 */
static inline int block_size_bits(unsigned int blocksize)
{
	return ilog2(blocksize);
}

static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
{
	BUG_ON(!PageLocked(page));

	if (!page_has_buffers(page))
1655 1656
		create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
				     b_state);
1657 1658 1659
	return page_buffers(page);
}

Linus Torvalds's avatar
Linus Torvalds committed
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
/*
 * NOTE! All mapped/uptodate combinations are valid:
 *
 *	Mapped	Uptodate	Meaning
 *
 *	No	No		"unknown" - must do get_block()
 *	No	Yes		"hole" - zero-filled
 *	Yes	No		"allocated" - allocated on disk, not read in
 *	Yes	Yes		"valid" - allocated and up-to-date in memory.
 *
 * "Dirty" is valid only with the last case (mapped+uptodate).
 */

/*
 * While block_write_full_page is writing back the dirty buffers under
 * the page lock, whoever dirtied the buffers may decide to clean them
 * again at any time.  We handle that by only looking at the buffer
 * state inside lock_buffer().
 *
 * If block_write_full_page() is called for regular writeback
 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
 * locked buffer.   This only can happen if someone has written the buffer
 * directly, with submit_bh().  At the address_space level PageWriteback
 * prevents this contention from occurring.