1. 12 Jun, 2018 1 commit
    • Kees Cook's avatar
      treewide: Use array_size() in vmalloc() · 42bc47b3
      Kees Cook authored
      The vmalloc() function has no 2-factor argument form, so multiplication
      factors need to be wrapped in array_size(). This patch replaces cases of:
      
              vmalloc(a * b)
      
      with:
              vmalloc(array_size(a, b))
      
      as well as handling cases of:
      
              vmalloc(a * b * c)
      
      with:
      
              vmalloc(array3_size(a, b, c))
      
      This does, however, attempt to ignore constant size factors like:
      
              vmalloc(4 * 1024)
      
      though any constants defined via macros get caught up in the conversion.
      
      Any factors with a sizeof() of "unsigned char", "char", and "u8" were
      dropped, since they're redundant.
      
      The Coccinelle script used for this was:
      
      // Fix redundant parens around sizeof().
      @@
      type TYPE;
      expression THING, E;
      @@
      
      (
        vmalloc(
      -	(sizeof(TYPE)) * E
      +	sizeof(TYPE) * E
        , ...)
      |
        vmalloc(
      -	(sizeof(THING)) * E
      +	sizeof(THING) * E
        , ...)
      )
      
      // Drop single-byte sizes and redundant parens.
      @@
      expression COUNT;
      typedef u8;
      typedef __u8;
      @@
      
      (
        vmalloc(
      -	sizeof(u8) * (COUNT)
      +	COUNT
        , ...)
      |
        vmalloc(
      -	sizeof(__u8) * (COUNT)
      +	COUNT
        , ...)
      |
        vmalloc(
      -	sizeof(char) * (COUNT)
      +	COUNT
        , ...)
      |
        vmalloc(
      -	sizeof(unsigned char) * (COUNT)
      +	COUNT
        , ...)
      |
        vmalloc(
      -	sizeof(u8) * COUNT
      +	COUNT
        , ...)
      |
        vmalloc(
      -	sizeof(__u8) * COUNT
      +	COUNT
        , ...)
      |
        vmalloc(
      -	sizeof(char) * COUNT
      +	COUNT
        , ...)
      |
        vmalloc(
      -	sizeof(unsigned char) * COUNT
      +	COUNT
        , ...)
      )
      
      // 2-factor product with sizeof(type/expression) and identifier or constant.
      @@
      type TYPE;
      expression THING;
      identifier COUNT_ID;
      constant COUNT_CONST;
      @@
      
      (
        vmalloc(
      -	sizeof(TYPE) * (COUNT_ID)
      +	array_size(COUNT_ID, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE) * COUNT_ID
      +	array_size(COUNT_ID, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE) * (COUNT_CONST)
      +	array_size(COUNT_CONST, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE) * COUNT_CONST
      +	array_size(COUNT_CONST, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * (COUNT_ID)
      +	array_size(COUNT_ID, sizeof(THING))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * COUNT_ID
      +	array_size(COUNT_ID, sizeof(THING))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * (COUNT_CONST)
      +	array_size(COUNT_CONST, sizeof(THING))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * COUNT_CONST
      +	array_size(COUNT_CONST, sizeof(THING))
        , ...)
      )
      
      // 2-factor product, only identifiers.
      @@
      identifier SIZE, COUNT;
      @@
      
        vmalloc(
      -	SIZE * COUNT
      +	array_size(COUNT, SIZE)
        , ...)
      
      // 3-factor product with 1 sizeof(type) or sizeof(expression), with
      // redundant parens removed.
      @@
      expression THING;
      identifier STRIDE, COUNT;
      type TYPE;
      @@
      
      (
        vmalloc(
      -	sizeof(TYPE) * (COUNT) * (STRIDE)
      +	array3_size(COUNT, STRIDE, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE) * (COUNT) * STRIDE
      +	array3_size(COUNT, STRIDE, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE) * COUNT * (STRIDE)
      +	array3_size(COUNT, STRIDE, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE) * COUNT * STRIDE
      +	array3_size(COUNT, STRIDE, sizeof(TYPE))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * (COUNT) * (STRIDE)
      +	array3_size(COUNT, STRIDE, sizeof(THING))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * (COUNT) * STRIDE
      +	array3_size(COUNT, STRIDE, sizeof(THING))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * COUNT * (STRIDE)
      +	array3_size(COUNT, STRIDE, sizeof(THING))
        , ...)
      |
        vmalloc(
      -	sizeof(THING) * COUNT * STRIDE
      +	array3_size(COUNT, STRIDE, sizeof(THING))
        , ...)
      )
      
      // 3-factor product with 2 sizeof(variable), with redundant parens removed.
      @@
      expression THING1, THING2;
      identifier COUNT;
      type TYPE1, TYPE2;
      @@
      
      (
        vmalloc(
      -	sizeof(TYPE1) * sizeof(TYPE2) * COUNT
      +	array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE1) * sizeof(THING2) * (COUNT)
      +	array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
        , ...)
      |
        vmalloc(
      -	sizeof(THING1) * sizeof(THING2) * COUNT
      +	array3_size(COUNT, sizeof(THING1), sizeof(THING2))
        , ...)
      |
        vmalloc(
      -	sizeof(THING1) * sizeof(THING2) * (COUNT)
      +	array3_size(COUNT, sizeof(THING1), sizeof(THING2))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE1) * sizeof(THING2) * COUNT
      +	array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
        , ...)
      |
        vmalloc(
      -	sizeof(TYPE1) * sizeof(THING2) * (COUNT)
      +	array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
        , ...)
      )
      
      // 3-factor product, only identifiers, with redundant parens removed.
      @@
      identifier STRIDE, SIZE, COUNT;
      @@
      
      (
        vmalloc(
      -	(COUNT) * STRIDE * SIZE
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      |
        vmalloc(
      -	COUNT * (STRIDE) * SIZE
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      |
        vmalloc(
      -	COUNT * STRIDE * (SIZE)
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      |
        vmalloc(
      -	(COUNT) * (STRIDE) * SIZE
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      |
        vmalloc(
      -	COUNT * (STRIDE) * (SIZE)
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      |
        vmalloc(
      -	(COUNT) * STRIDE * (SIZE)
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      |
        vmalloc(
      -	(COUNT) * (STRIDE) * (SIZE)
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      |
        vmalloc(
      -	COUNT * STRIDE * SIZE
      +	array3_size(COUNT, STRIDE, SIZE)
        , ...)
      )
      
      // Any remaining multi-factor products, first at least 3-factor products
      // when they're not all constants...
      @@
      expression E1, E2, E3;
      constant C1, C2, C3;
      @@
      
      (
        vmalloc(C1 * C2 * C3, ...)
      |
        vmalloc(
      -	E1 * E2 * E3
      +	array3_size(E1, E2, E3)
        , ...)
      )
      
      // And then all remaining 2 factors products when they're not all constants.
      @@
      expression E1, E2;
      constant C1, C2;
      @@
      
      (
        vmalloc(C1 * C2, ...)
      |
        vmalloc(
      -	E1 * E2
      +	array_size(E1, E2)
        , ...)
      )
      Signed-off-by: default avatarKees Cook <keescook@chromium.org>
      42bc47b3
  2. 23 Jun, 2017 2 commits
    • Jeff Mahoney's avatar
      reiserfs: fix race in prealloc discard · 08db141b
      Jeff Mahoney authored
      The main loop in __discard_prealloc is protected by the reiserfs write lock
      which is dropped across schedules like the BKL it replaced.  The problem is
      that it checks the value, calls a routine that schedules, and then adjusts
      the state.  As a result, two threads that are calling
      reiserfs_prealloc_discard at the same time can race when one calls
      reiserfs_free_prealloc_block, the lock is dropped, and the other calls
      reiserfs_free_prealloc_block with the same block number.  In the right
      circumstances, it can cause the prealloc count to go negative.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      Signed-off-by: default avatarJan Kara <jack@suse.cz>
      08db141b
    • Jeff Mahoney's avatar
      reiserfs: don't preallocate blocks for extended attributes · 54930dfe
      Jeff Mahoney authored
      Most extended attributes will fit in a single block.  More importantly,
      we drop the reference to the inode while holding the transaction open
      so the preallocated blocks aren't released.  As a result, the inode
      may be evicted before it's removed from the transaction's prealloc list
      which can cause memory corruption.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      Signed-off-by: default avatarJan Kara <jack@suse.cz>
      54930dfe
  3. 06 Jun, 2014 1 commit
  4. 06 May, 2014 5 commits
  5. 08 Aug, 2013 2 commits
    • Jeff Mahoney's avatar
      reiserfs: locking, release lock around quota operations · d2d0395f
      Jeff Mahoney authored
      Previous commits released the write lock across quota operations but
      missed several places.  In particular, the free operations can also
      call into the file system code and take the write lock, causing
      deadlocks.
      
      This patch introduces some more helpers and uses them for quota call
      sites.  Without this patch applied, reiserfs + quotas runs into deadlocks
      under anything more than trivial load.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      d2d0395f
    • Jeff Mahoney's avatar
      reiserfs: locking, handle nested locks properly · 278f6679
      Jeff Mahoney authored
      The reiserfs write lock replaced the BKL and uses similar semantics.
      
      Frederic's locking code makes a distinction between when the lock is nested
      and when it's being acquired/released, but I don't think that's the right
      distinction to make.
      
      The right distinction is between the lock being released at end-of-use and
      the lock being released for a schedule. The unlock should return the depth
      and the lock should restore it, rather than the other way around as it is now.
      
      This patch implements that and adds a number of places where the lock
      should be dropped.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      278f6679
  6. 14 Aug, 2012 1 commit
    • Jeff Mahoney's avatar
      reiserfs: fix deadlocks with quotas · 48d17884
      Jeff Mahoney authored
      The BKL push-down for reiserfs made lock recursion a special case that needs
      to be handled explicitly. One of the cases that was unhandled is dropping
      the quota during inode eviction. Both reiserfs_evict_inode and
      reiserfs_write_dquot take the write lock, but when the journal lock is
      taken it only drops one the references. The locking rules are that the journal
      lock be acquired before the write lock so leaving the reference open leads
      to a ABBA deadlock.
      
      This patch pushes the unlock up before clear_inode and avoids the recursive
      locking.
      
      Another ABBA situation can occur when the write lock is dropped while reading
      the bitmap buffer while in the quota code. When the lock is reacquired, it
      will deadlock against dquot->dq_lock and dqopt->dqio_mutex in the dquot_acquire
      path. It's safe to retain the lock across the read and should be cached under
      write load.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      Signed-off-by: default avatarJan Kara <jack@suse.cz>
      48d17884
  7. 21 Mar, 2012 2 commits
  8. 11 Jan, 2012 1 commit
  9. 07 Jan, 2012 1 commit
  10. 26 Jul, 2011 2 commits
  11. 04 Mar, 2010 1 commit
    • Christoph Hellwig's avatar
      dquot: cleanup space allocation / freeing routines · 5dd4056d
      Christoph Hellwig authored
      Get rid of the alloc_space, free_space, reserve_space, claim_space and
      release_rsv dquot operations - they are always called from the filesystem
      and if a filesystem really needs their own (which none currently does)
      it can just call into it's own routine directly.
      
      Move shared logic into the common __dquot_alloc_space,
      dquot_claim_space_nodirty and __dquot_free_space low-level methods,
      and rationalize the wrappers around it to move as much as possible
      code into the common block for CONFIG_QUOTA vs not.  Also rename
      all these helpers to be named dquot_* instead of vfs_dq_*.
      Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
      Signed-off-by: default avatarJan Kara <jack@suse.cz>
      5dd4056d
  12. 09 Feb, 2010 1 commit
  13. 14 Dec, 2009 1 commit
    • Frederic Weisbecker's avatar
      reiserfs: Fix possible recursive lock · 500f5a0b
      Frederic Weisbecker authored
      While allocating the bitmap using vmalloc, we hold the reiserfs lock,
      which makes lockdep later reporting a possible deadlock as we may
      swap out pages to allocate memory and then take the reiserfs lock
      recursively:
      
      inconsistent {RECLAIM_FS-ON-W} -> {IN-RECLAIM_FS-W} usage.
      kswapd0/312 [HC0[0]:SC0[0]:HE1:SE1] takes:
       (&REISERFS_SB(s)->lock){+.+.?.}, at: [<c11108a8>] reiserfs_write_lock+0x28/0x40
      {RECLAIM_FS-ON-W} state was registered at:
        [<c104e1c2>] mark_held_locks+0x62/0x90
        [<c104e28a>] lockdep_trace_alloc+0x9a/0xc0
        [<c108e396>] kmem_cache_alloc+0x26/0xf0
        [<c10850ec>] __get_vm_area_node+0x6c/0xf0
        [<c10857de>] __vmalloc_node+0x7e/0xa0
        [<c108597b>] vmalloc+0x2b/0x30
        [<c10e00b9>] reiserfs_init_bitmap_cache+0x39/0x70
        [<c10f8178>] reiserfs_fill_super+0x2e8/0xb90
        [<c1094345>] get_sb_bdev+0x145/0x180
        [<c10f5a11>] get_super_block+0x21/0x30
        [<c10931f0>] vfs_kern_mount+0x40/0xd0
        [<c10932d9>] do_kern_mount+0x39/0xd0
        [<c10a9857>] do_mount+0x2c7/0x6b0
        [<c10a9ca6>] sys_mount+0x66/0xa0
        [<c161589b>] mount_block_root+0xc4/0x245
        [<c1615a75>] mount_root+0x59/0x5f
        [<c1615b8c>] prepare_namespace+0x111/0x14b
        [<c1615269>] kernel_init+0xcf/0xdb
        [<c10031fb>] kernel_thread_helper+0x7/0x1c
      
      This is actually fine for two reasons: we call vmalloc at mount time
      then it's not in the swapping out path. Also the reiserfs lock can be
      acquired recursively, but since its implementation depends on a mutex,
      it's hard and not necessary worth it to teach that to lockdep.
      
      The lock is useless at mount time anyway, at least until we replay the
      journal. But let's remove it from this path later as this needs
      more thinking and is a sensible change.
      
      For now we can just relax the lock around vmalloc,
      Reported-by: default avatarAlexander Beregalov <a.beregalov@gmail.com>
      Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
      Cc: Chris Mason <chris.mason@oracle.com>
      Cc: Ingo Molnar <mingo@elte.hu>
      Cc: Thomas Gleixner <tglx@linutronix.de>
      500f5a0b
  14. 14 Sep, 2009 2 commits
    • Frederic Weisbecker's avatar
      kill-the-BKL/reiserfs: release the write lock inside reiserfs_read_bitmap_block() · 4c5eface
      Frederic Weisbecker authored
      reiserfs_read_bitmap_block() uses sb_bread() to read the bitmap block. This
      helper might sleep.
      
      Then, when the bkl was used, it was released at this point. We can then
      relax the write lock too here.
      
      [ Impact: release the reiserfs write lock when it is not needed ]
      
      Cc: Jeff Mahoney <jeffm@suse.com>
      Cc: Chris Mason <chris.mason@oracle.com>
      Cc: Alexander Beregalov <a.beregalov@gmail.com>
      Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
      4c5eface
    • Frederic Weisbecker's avatar
      reiserfs: kill-the-BKL · 8ebc4232
      Frederic Weisbecker authored
      This patch is an attempt to remove the Bkl based locking scheme from
      reiserfs and is intended.
      
      It is a bit inspired from an old attempt by Peter Zijlstra:
      
         http://lkml.indiana.edu/hypermail/linux/kernel/0704.2/2174.html
      
      The bkl is heavily used in this filesystem to prevent from
      concurrent write accesses on the filesystem.
      
      Reiserfs makes a deep use of the specific properties of the Bkl:
      
      - It can be acqquired recursively by a same task
      - It is released on the schedule() calls and reacquired when schedule() returns
      
      The two properties above are a roadmap for the reiserfs write locking so it's
      very hard to simply replace it with a common mutex.
      
      - We need a recursive-able locking unless we want to restructure several blocks
        of the code.
      - We need to identify the sites where the bkl was implictly relaxed
        (schedule, wait, sync, etc...) so that we can in turn release and
        reacquire our new lock explicitly.
        Such implicit releases of the lock are often required to let other
        resources producer/consumer do their job or we can suffer unexpected
        starvations or deadlocks.
      
      So the new lock that replaces the bkl here is a per superblock mutex with a
      specific property: it can be acquired recursively by a same task, like the
      bkl.
      
      For such purpose, we integrate a lock owner and a lock depth field on the
      superblock information structure.
      
      The first axis on this patch is to turn reiserfs_write_(un)lock() function
      into a wrapper to manage this mutex. Also some explicit calls to
      lock_kernel() have been converted to reiserfs_write_lock() helpers.
      
      The second axis is to find the important blocking sites (schedule...(),
      wait_on_buffer(), sync_dirty_buffer(), etc...) and then apply an explicit
      release of the write lock on these locations before blocking. Then we can
      safely wait for those who can give us resources or those who need some.
      Typically this is a fight between the current writer, the reiserfs workqueue
      (aka the async commiter) and the pdflush threads.
      
      The third axis is a consequence of the second. The write lock is usually
      on top of a lock dependency chain which can include the journal lock, the
      flush lock or the commit lock. So it's dangerous to release and trying to
      reacquire the write lock while we still hold other locks.
      
      This is fine with the bkl:
      
            T1                       T2
      
      lock_kernel()
          mutex_lock(A)
          unlock_kernel()
          // do something
                                  lock_kernel()
                                      mutex_lock(A) -> already locked by T1
                                      schedule() (and then unlock_kernel())
          lock_kernel()
          mutex_unlock(A)
          ....
      
      This is not fine with a mutex:
      
            T1                       T2
      
      mutex_lock(write)
          mutex_lock(A)
          mutex_unlock(write)
          // do something
                                 mutex_lock(write)
                                    mutex_lock(A) -> already locked by T1
                                    schedule()
      
          mutex_lock(write) -> already locked by T2
          deadlock
      
      The solution in this patch is to provide a helper which releases the write
      lock and sleep a bit if we can't lock a mutex that depend on it. It's another
      simulation of the bkl behaviour.
      
      The last axis is to locate the fs callbacks that are called with the bkl held,
      according to Documentation/filesystem/Locking.
      
      Those are:
      
      - reiserfs_remount
      - reiserfs_fill_super
      - reiserfs_put_super
      
      Reiserfs didn't need to explicitly lock because of the context of these callbacks.
      But now we must take care of that with the new locking.
      
      After this patch, reiserfs suffers from a slight performance regression (for now).
      On UP, a high volume write with dd reports an average of 27 MB/s instead
      of 30 MB/s without the patch applied.
      Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
      Reviewed-by: default avatarIngo Molnar <mingo@elte.hu>
      Cc: Jeff Mahoney <jeffm@suse.com>
      Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
      Cc: Bron Gondwana <brong@fastmail.fm>
      Cc: Andrew Morton <akpm@linux-foundation.org>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Cc: Alexander Viro <viro@zeniv.linux.org.uk>
      LKML-Reference: <1239070789-13354-1-git-send-email-fweisbec@gmail.com>
      Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
      8ebc4232
  15. 30 Mar, 2009 3 commits
  16. 26 Mar, 2009 1 commit
  17. 28 Apr, 2008 1 commit
  18. 03 Feb, 2008 1 commit
  19. 19 Oct, 2007 4 commits
  20. 17 Oct, 2007 1 commit
  21. 08 Dec, 2006 1 commit
  22. 07 Oct, 2006 1 commit
  23. 01 Oct, 2006 4 commits
    • Jeff Mahoney's avatar
      [PATCH] reiserfs: eliminate minimum window size for bitmap searching · 9ea0f949
      Jeff Mahoney authored
      When a file system becomes fragmented (using MythTV, for example), the
      bigalloc window searching ends up causing huge performance problems.  In a
      file system presented by a user experiencing this bug, the file system was
      90% free, but no 32-block free windows existed on the entire file system.
      This causes the allocator to scan the entire file system for each 128k
      write before backing down to searching for individual blocks.
      
      In the end, finding a contiguous window for all the blocks in a write is an
      advantageous special case, but one that can be found naturally when such a
      window exists anyway.
      
      This patch removes the bigalloc window searching, and has been proven to
      fix the test case described above.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
      Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
      9ea0f949
    • Jeff Mahoney's avatar
      [PATCH] reiserfs: on-demand bitmap loading · 5065227b
      Jeff Mahoney authored
      This is the patch the three previous ones have been leading up to.
      
      It changes the behavior of ReiserFS from loading and caching all the bitmaps
      as special, to treating the bitmaps like any other bit of metadata and just
      letting the system-wide caches figure out what to hang on to.
      
      Buffer heads are allocated on the fly, so there is no need to retain pointers
      to all of them.  The caching of the metadata occurs when the data is read and
      updated, and is considered invalid and uncached until then.
      
      I needed to remove the vs-4040 check for performing a duplicate operation on a
      particular bit.  The reason is that while the other sites for working with
      bitmaps are allowed to schedule, is_reusable() is called from do_balance(),
      which will panic if a schedule occurs in certain places.
      
      The benefit of on-demand bitmaps clearly outweighs a sanity check that depends
      on a compile-time option that is discouraged.
      
      [akpm@osdl.org: warning fix]
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      Cc: <reiserfs-dev@namesys.com>
      Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
      Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
      5065227b
    • Jeff Mahoney's avatar
      [PATCH] reiserfs: reorganize bitmap loading functions · 6f01046b
      Jeff Mahoney authored
      This patch moves the bitmap loading code from super.c to bitmap.c
      
      The code is also restructured somewhat.  The only difference between new
      format bitmaps and old format bitmaps is where they are.  That's a two liner
      before loading the block to use the correct one.  There's no need for an
      entirely separate code path.
      
      The load path is generally the same, with the pattern being to throw out a
      bunch of requests and then wait for them, then cache the metadata from the
      contents.
      
      Again, like the previous patches, the purpose is to set up for later ones.
      
      Update: There was a bug in the previously posted version of this that resulted
      in corruption.  The problem was that bitmap 0 on new format file systems must
      be treated specially, and wasn't.  A stupid bug with an easy fix.
      
      This is hopefully the last fix for the disaster that is the reiserfs bitmap
      patch set.
      
      If a bitmap block was full, first_zero_hint would end up at zero since it
      would never be changed from it's zeroed out value.  This just sets it
      beyond the end of the bitmap block.  If any bits are freed, it will be
      reset to a valid bit.  When info->free_count = 0, then we already know it's
      full.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      Cc: <reiserfs-dev@namesys.com>
      Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
      Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
      6f01046b
    • Jeff Mahoney's avatar
      [PATCH] reiserfs: clean up bitmap block buffer head references · 0b3dc17b
      Jeff Mahoney authored
      Similar to the SB_JOURNAL cleanup that was accepted a while ago, this patch
      uses a temporary variable for buffer head references from the bitmap info
      array.
      
      This makes the code much more readable in some areas.
      
      It also uses proper reference counting, doing a get_bh() after using the
      pointer from the array and brelse()'ing it later.  This may seem silly, but a
      later patch will replace the simple temporary variables with an actual read,
      so the reference freeing will be used then.
      Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
      Cc: <reiserfs-dev@namesys.com>
      Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
      Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
      0b3dc17b