Commit 3123bca7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull second set of btrfs updates from Chris Mason:
 "The most important changes here are from Josef, fixing a btrfs
  regression in 3.14 that can cause corruptions in the extent allocation
  tree when snapshots are in use.

  Josef also fixed some deadlocks in send/recv and other assorted races
  when balance is running"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (23 commits)
  Btrfs: fix compile warnings on on avr32 platform
  btrfs: allow mounting btrfs subvolumes with different ro/rw options
  btrfs: export global block reserve size as space_info
  btrfs: fix crash in remount(thread_pool=) case
  Btrfs: abort the transaction when we don't find our extent ref
  Btrfs: fix EINVAL checks in btrfs_clone
  Btrfs: fix unlock in __start_delalloc_inodes()
  Btrfs: scrub raid56 stripes in the right way
  Btrfs: don't compress for a small write
  Btrfs: more efficient io tree navigation on wait_extent_bit
  Btrfs: send, build path string only once in send_hole
  btrfs: filter invalid arg for btrfs resize
  Btrfs: send, fix data corruption due to incorrect hole detection
  Btrfs: kmalloc() doesn't return an ERR_PTR
  Btrfs: fix snapshot vs nocow writting
  btrfs: Change the expanding write sequence to fix snapshot related bug.
  btrfs: make device scan less noisy
  btrfs: fix lockdep warning with reclaim lock inversion
  Btrfs: hold the commit_root_sem when getting the commit root during send
  Btrfs: remove transaction from send
  ...
parents 582076ab e4fbaee2
......@@ -323,6 +323,8 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
{
if (!wq)
return;
wq->normal->max_active = max;
if (wq->high)
wq->high->max_active = max;
......
......@@ -330,7 +330,10 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
goto out;
}
root_level = btrfs_old_root_level(root, time_seq);
if (path->search_commit_root)
root_level = btrfs_header_level(root->commit_root);
else
root_level = btrfs_old_root_level(root, time_seq);
if (root_level + 1 == level) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
......@@ -1099,9 +1102,9 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
*
* returns 0 on success, < 0 on error.
*/
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist **roots)
static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist **roots)
{
struct ulist *tmp;
struct ulist_node *node = NULL;
......@@ -1137,6 +1140,20 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
return 0;
}
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist **roots)
{
int ret;
if (!trans)
down_read(&fs_info->commit_root_sem);
ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
if (!trans)
up_read(&fs_info->commit_root_sem);
return ret;
}
/*
* this makes the path point to (inum INODE_ITEM ioff)
*/
......@@ -1516,6 +1533,8 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
} else {
down_read(&fs_info->commit_root_sem);
}
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
......@@ -1526,8 +1545,8 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ULIST_ITER_INIT(&ref_uiter);
while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
tree_mod_seq_elem.seq, &roots);
ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
tree_mod_seq_elem.seq, &roots);
if (ret)
break;
ULIST_ITER_INIT(&root_uiter);
......@@ -1549,6 +1568,8 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
if (!search_commit_root) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
btrfs_end_transaction(trans, fs_info->extent_root);
} else {
up_read(&fs_info->commit_root_sem);
}
return ret;
......
......@@ -2769,9 +2769,13 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
* the commit roots are read only
* so we always do read locks
*/
if (p->need_commit_sem)
down_read(&root->fs_info->commit_root_sem);
b = root->commit_root;
extent_buffer_get(b);
level = btrfs_header_level(b);
if (p->need_commit_sem)
up_read(&root->fs_info->commit_root_sem);
if (!p->skip_locking)
btrfs_tree_read_lock(b);
} else {
......@@ -5360,7 +5364,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
{
int ret;
int cmp;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_path *left_path = NULL;
struct btrfs_path *right_path = NULL;
struct btrfs_key left_key;
......@@ -5378,9 +5381,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
u64 right_blockptr;
u64 left_gen;
u64 right_gen;
u64 left_start_ctransid;
u64 right_start_ctransid;
u64 ctransid;
left_path = btrfs_alloc_path();
if (!left_path) {
......@@ -5404,21 +5404,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
right_path->search_commit_root = 1;
right_path->skip_locking = 1;
spin_lock(&left_root->root_item_lock);
left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
spin_unlock(&left_root->root_item_lock);
spin_lock(&right_root->root_item_lock);
right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
spin_unlock(&right_root->root_item_lock);
trans = btrfs_join_transaction(left_root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
/*
* Strategy: Go to the first items of both trees. Then do
*
......@@ -5455,6 +5440,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
* the right if possible or go up and right.
*/
down_read(&left_root->fs_info->commit_root_sem);
left_level = btrfs_header_level(left_root->commit_root);
left_root_level = left_level;
left_path->nodes[left_level] = left_root->commit_root;
......@@ -5464,6 +5450,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
right_root_level = right_level;
right_path->nodes[right_level] = right_root->commit_root;
extent_buffer_get(right_path->nodes[right_level]);
up_read(&left_root->fs_info->commit_root_sem);
if (left_level == 0)
btrfs_item_key_to_cpu(left_path->nodes[left_level],
......@@ -5482,67 +5469,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
advance_left = advance_right = 0;
while (1) {
/*
* We need to make sure the transaction does not get committed
* while we do anything on commit roots. This means, we need to
* join and leave transactions for every item that we process.
*/
if (trans && btrfs_should_end_transaction(trans, left_root)) {
btrfs_release_path(left_path);
btrfs_release_path(right_path);
ret = btrfs_end_transaction(trans, left_root);
trans = NULL;
if (ret < 0)
goto out;
}
/* now rejoin the transaction */
if (!trans) {
trans = btrfs_join_transaction(left_root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
spin_lock(&left_root->root_item_lock);
ctransid = btrfs_root_ctransid(&left_root->root_item);
spin_unlock(&left_root->root_item_lock);
if (ctransid != left_start_ctransid)
left_start_ctransid = 0;
spin_lock(&right_root->root_item_lock);
ctransid = btrfs_root_ctransid(&right_root->root_item);
spin_unlock(&right_root->root_item_lock);
if (ctransid != right_start_ctransid)
right_start_ctransid = 0;
if (!left_start_ctransid || !right_start_ctransid) {
WARN(1, KERN_WARNING
"BTRFS: btrfs_compare_tree detected "
"a change in one of the trees while "
"iterating. This is probably a "
"bug.\n");
ret = -EIO;
goto out;
}
/*
* the commit root may have changed, so start again
* where we stopped
*/
left_path->lowest_level = left_level;
right_path->lowest_level = right_level;
ret = btrfs_search_slot(NULL, left_root,
&left_key, left_path, 0, 0);
if (ret < 0)
goto out;
ret = btrfs_search_slot(NULL, right_root,
&right_key, right_path, 0, 0);
if (ret < 0)
goto out;
}
if (advance_left && !left_end_reached) {
ret = tree_advance(left_root, left_path, &left_level,
left_root_level,
......@@ -5672,14 +5598,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
btrfs_free_path(left_path);
btrfs_free_path(right_path);
kfree(tmp_buf);
if (trans) {
if (!ret)
ret = btrfs_end_transaction(trans, left_root);
else
btrfs_end_transaction(trans, left_root);
}
return ret;
}
......
......@@ -609,6 +609,7 @@ struct btrfs_path {
unsigned int skip_locking:1;
unsigned int leave_spinning:1;
unsigned int search_commit_root:1;
unsigned int need_commit_sem:1;
};
/*
......@@ -986,7 +987,8 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7)
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
enum btrfs_raid_types {
BTRFS_RAID_RAID10,
......@@ -1018,6 +1020,12 @@ enum btrfs_raid_types {
*/
#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48)
/*
* A fake block group type that is used to communicate global block reserve
* size to userspace via the SPACE_INFO ioctl.
*/
#define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49)
#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
BTRFS_AVAIL_ALLOC_BIT_SINGLE)
......@@ -1440,7 +1448,7 @@ struct btrfs_fs_info {
*/
struct mutex ordered_extent_flush_mutex;
struct rw_semaphore extent_commit_sem;
struct rw_semaphore commit_root_sem;
struct rw_semaphore cleanup_work_sem;
......@@ -1711,7 +1719,6 @@ struct btrfs_root {
struct btrfs_block_rsv *block_rsv;
/* free ino cache stuff */
struct mutex fs_commit_mutex;
struct btrfs_free_space_ctl *free_ino_ctl;
enum btrfs_caching_type cached;
spinlock_t cache_lock;
......
......@@ -329,6 +329,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
{
struct extent_state *cached_state = NULL;
int ret;
bool need_lock = (current->journal_info ==
(void *)BTRFS_SEND_TRANS_STUB);
if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
return 0;
......@@ -336,6 +338,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
if (atomic)
return -EAGAIN;
if (need_lock) {
btrfs_tree_read_lock(eb);
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
}
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
0, &cached_state);
if (extent_buffer_uptodate(eb) &&
......@@ -347,10 +354,21 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
"found %llu\n",
eb->start, parent_transid, btrfs_header_generation(eb));
ret = 1;
clear_extent_buffer_uptodate(eb);
/*
* Things reading via commit roots that don't have normal protection,
* like send, can have a really old block in cache that may point at a
* block that has been free'd and re-allocated. So don't clear uptodate
* if we find an eb that is under IO (dirty/writeback) because we could
* end up reading in the stale data and then writing it back out and
* making everybody very sad.
*/
if (!extent_buffer_under_io(eb))
clear_extent_buffer_uptodate(eb);
out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
&cached_state, GFP_NOFS);
btrfs_tree_read_unlock_blocking(eb);
return ret;
}
......@@ -1546,7 +1564,6 @@ int btrfs_init_fs_root(struct btrfs_root *root)
root->subv_writers = writers;
btrfs_init_free_ino_ctl(root);
mutex_init(&root->fs_commit_mutex);
spin_lock_init(&root->cache_lock);
init_waitqueue_head(&root->cache_wait);
......@@ -2324,7 +2341,7 @@ int open_ctree(struct super_block *sb,
mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
init_rwsem(&fs_info->extent_commit_sem);
init_rwsem(&fs_info->commit_root_sem);
init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
sema_init(&fs_info->uuid_tree_rescan_sem, 1);
......
......@@ -419,7 +419,7 @@ static noinline void caching_thread(struct btrfs_work *work)
again:
mutex_lock(&caching_ctl->mutex);
/* need to make sure the commit_root doesn't disappear */
down_read(&fs_info->extent_commit_sem);
down_read(&fs_info->commit_root_sem);
next:
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
......@@ -443,10 +443,10 @@ static noinline void caching_thread(struct btrfs_work *work)
break;
if (need_resched() ||
rwsem_is_contended(&fs_info->extent_commit_sem)) {
rwsem_is_contended(&fs_info->commit_root_sem)) {
caching_ctl->progress = last;
btrfs_release_path(path);
up_read(&fs_info->extent_commit_sem);
up_read(&fs_info->commit_root_sem);
mutex_unlock(&caching_ctl->mutex);
cond_resched();
goto again;
......@@ -513,7 +513,7 @@ static noinline void caching_thread(struct btrfs_work *work)
err:
btrfs_free_path(path);
up_read(&fs_info->extent_commit_sem);
up_read(&fs_info->commit_root_sem);
free_excluded_extents(extent_root, block_group);
......@@ -633,10 +633,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
return 0;
}
down_write(&fs_info->extent_commit_sem);
down_write(&fs_info->commit_root_sem);
atomic_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->extent_commit_sem);
up_write(&fs_info->commit_root_sem);
btrfs_get_block_group(cache);
......@@ -2444,7 +2444,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_unlock(&locked_ref->lock);
spin_lock(&delayed_refs->lock);
spin_lock(&locked_ref->lock);
if (rb_first(&locked_ref->ref_root)) {
if (rb_first(&locked_ref->ref_root) ||
locked_ref->extent_op) {
spin_unlock(&locked_ref->lock);
spin_unlock(&delayed_refs->lock);
continue;
......@@ -5470,7 +5471,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *cache;
struct btrfs_space_info *space_info;
down_write(&fs_info->extent_commit_sem);
down_write(&fs_info->commit_root_sem);
list_for_each_entry_safe(caching_ctl, next,
&fs_info->caching_block_groups, list) {
......@@ -5489,7 +5490,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
else
fs_info->pinned_extents = &fs_info->freed_extents[0];
up_write(&fs_info->extent_commit_sem);
up_write(&fs_info->commit_root_sem);
list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
percpu_counter_set(&space_info->total_bytes_pinned, 0);
......@@ -5744,6 +5745,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
bytenr, parent, root_objectid, owner_objectid,
owner_offset);
btrfs_abort_transaction(trans, extent_root, ret);
goto out;
} else {
btrfs_abort_transaction(trans, extent_root, ret);
goto out;
......@@ -8255,14 +8258,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
down_write(&info->extent_commit_sem);
down_write(&info->commit_root_sem);
while (!list_empty(&info->caching_block_groups)) {
caching_ctl = list_entry(info->caching_block_groups.next,
struct btrfs_caching_control, list);
list_del(&caching_ctl->list);
put_caching_control(caching_ctl);
}
up_write(&info->extent_commit_sem);
up_write(&info->commit_root_sem);
spin_lock(&info->block_group_cache_lock);
while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
......@@ -8336,9 +8339,15 @@ static void __link_block_group(struct btrfs_space_info *space_info,
struct btrfs_block_group_cache *cache)
{
int index = get_block_group_index(cache);
bool first = false;
down_write(&space_info->groups_sem);
if (list_empty(&space_info->block_groups[index])) {
if (list_empty(&space_info->block_groups[index]))
first = true;
list_add_tail(&cache->list, &space_info->block_groups[index]);
up_write(&space_info->groups_sem);
if (first) {
struct kobject *kobj = &space_info->block_group_kobjs[index];
int ret;
......@@ -8350,8 +8359,6 @@ static void __link_block_group(struct btrfs_space_info *space_info,
kobject_put(&space_info->kobj);
}
}
list_add_tail(&cache->list, &space_info->block_groups[index]);
up_write(&space_info->groups_sem);
}
static struct btrfs_block_group_cache *
......
......@@ -749,6 +749,7 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* our range starts
*/
node = tree_search(tree, start);
process_node:
if (!node)
break;
......@@ -769,7 +770,10 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (start > end)
break;
cond_resched_lock(&tree->lock);
if (!cond_resched_lock(&tree->lock)) {
node = rb_next(node);
goto process_node;
}
}
out:
spin_unlock(&tree->lock);
......@@ -4306,7 +4310,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)
kmem_cache_free(extent_buffer_cache, eb);
}
static int extent_buffer_under_io(struct extent_buffer *eb)
int extent_buffer_under_io(struct extent_buffer *eb)
{
return (atomic_read(&eb->io_pages) ||
test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
......
......@@ -320,6 +320,7 @@ int set_extent_buffer_dirty(struct extent_buffer *eb);
int set_extent_buffer_uptodate(struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_under_io(struct extent_buffer *eb);
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long min_len, char **map,
unsigned long *map_start,
......
......@@ -1727,6 +1727,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
struct btrfs_root *root = BTRFS_I(inode)->root;
loff_t *ppos = &iocb->ki_pos;
u64 start_pos;
u64 end_pos;
ssize_t num_written = 0;
ssize_t err = 0;
size_t count, ocount;
......@@ -1781,7 +1782,9 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
start_pos = round_down(pos, root->sectorsize);
if (start_pos > i_size_read(inode)) {
err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
/* Expand hole size to cover write data, preventing empty gap */
end_pos = round_up(pos + iov->iov_len, root->sectorsize);
err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
......
......@@ -55,7 +55,7 @@ static int caching_kthread(void *data)
key.type = BTRFS_INODE_ITEM_KEY;
again:
/* need to make sure the commit_root doesn't disappear */
mutex_lock(&root->fs_commit_mutex);
down_read(&fs_info->commit_root_sem);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
......@@ -88,7 +88,7 @@ static int caching_kthread(void *data)
btrfs_item_key_to_cpu(leaf, &key, 0);
btrfs_release_path(path);
root->cache_progress = last;
mutex_unlock(&root->fs_commit_mutex);
up_read(&fs_info->commit_root_sem);
schedule_timeout(1);
goto again;
} else
......@@ -127,7 +127,7 @@ static int caching_kthread(void *data)
btrfs_unpin_free_ino(root);
out:
wake_up(&root->cache_wait);
mutex_unlock(&root->fs_commit_mutex);
up_read(&fs_info->commit_root_sem);
btrfs_free_path(path);
......@@ -223,11 +223,11 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
* or the caching work is done.
*/
mutex_lock(&root->fs_commit_mutex);
down_write(&root->fs_info->commit_root_sem);
spin_lock(&root->cache_lock);
if (root->cached == BTRFS_CACHE_FINISHED) {
spin_unlock(&root->cache_lock);
mutex_unlock(&root->fs_commit_mutex);
up_write(&root->fs_info->commit_root_sem);
goto again;
}
spin_unlock(&root->cache_lock);
......@@ -240,7 +240,7 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
else
__btrfs_add_free_space(pinned, objectid, 1);
mutex_unlock(&root->fs_commit_mutex);
up_write(&root->fs_info->commit_root_sem);
}
}
......@@ -250,7 +250,7 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
* and others will just be dropped, because the commit root we were
* searching has changed.
*
* Must be called with root->fs_commit_mutex held
* Must be called with root->fs_info->commit_root_sem held
*/
void btrfs_unpin_free_ino(struct btrfs_root *root)
{
......
......@@ -394,6 +394,14 @@ static noinline int compress_file_range(struct inode *inode,
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
btrfs_add_inode_defrag(NULL, inode);
/*
* skip compression for a small file range(<=blocksize) that
* isn't an inline extent, since it dosen't save disk space at all.
*/
if ((end - start + 1) <= blocksize &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
goto cleanup_and_bail_uncompressed;
actual_end = min_t(u64, isize, end + 1);
again:
will_compress = 0;
......@@ -1270,6 +1278,15 @@ static noinline int run_delalloc_nocow(struct inode *inode,
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
/*
* if there are pending snapshots for this root,
* we fall into common COW way.
*/
if (!nolock) {
err = btrfs_start_nocow_write(root);
if (!err)
goto out_check;
}
/*
* force cow if csum exists in the range.
* this ensure that csum for a given extent are
......@@ -1289,6 +1306,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
out_check:
if (extent_end <= start) {
path->slots[0]++;
if (!nolock && nocow)
btrfs_end_nocow_write(root);
goto next_slot;
}
if (!nocow) {
......@@ -1306,8 +1325,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
ret = cow_file_range(inode, locked_page,