Commit c49c3111 authored by Richard Knutsson's avatar Richard Knutsson Committed by Linus Torvalds

[PATCH] fs/ntfs: Conversion to generic boolean

Conversion of booleans to: generic-boolean.patch (2006-08-23)
Signed-off-by: default avatarRichard Knutsson <ricknu-0@student.ltu.se>
Signed-off-by: default avatarAnton Altaparmakov <aia21@cantab.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6e218287
......@@ -254,7 +254,7 @@ static int ntfs_read_block(struct page *page)
bh->b_bdev = vol->sb->s_bdev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
BOOL is_retry = FALSE;
bool is_retry = false;
/* Convert iblock into corresponding vcn and offset. */
vcn = (VCN)iblock << blocksize_bits >>
......@@ -292,7 +292,7 @@ static int ntfs_read_block(struct page *page)
goto handle_hole;
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE;
is_retry = true;
/*
* Attempt to map runlist, dropping lock for
* the duration.
......@@ -558,7 +558,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
unsigned long flags;
unsigned int blocksize, vcn_ofs;
int err;
BOOL need_end_writeback;
bool need_end_writeback;
unsigned char blocksize_bits;
vi = page->mapping->host;
......@@ -626,7 +626,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
rl = NULL;
err = 0;
do {
BOOL is_retry = FALSE;
bool is_retry = false;
if (unlikely(block >= dblock)) {
/*
......@@ -768,7 +768,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
}
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE;
is_retry = true;
/*
* Attempt to map runlist, dropping lock for
* the duration.
......@@ -874,12 +874,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
/* Submit the prepared buffers for i/o. */
need_end_writeback = TRUE;
need_end_writeback = true;
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh(WRITE, bh);
need_end_writeback = FALSE;
need_end_writeback = false;
}
bh = next;
} while (bh != head);
......@@ -932,7 +932,7 @@ static int ntfs_write_mst_block(struct page *page,
runlist_element *rl;
int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
unsigned bh_size, rec_size_bits;
BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
bool sync, is_mft, page_is_dirty, rec_is_dirty;
unsigned char bh_size_bits;
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
......@@ -975,10 +975,10 @@ static int ntfs_write_mst_block(struct page *page,
rl = NULL;
err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
page_is_dirty = rec_is_dirty = FALSE;
page_is_dirty = rec_is_dirty = false;
rec_start_bh = NULL;
do {
BOOL is_retry = FALSE;
bool is_retry = false;
if (likely(block < rec_block)) {
if (unlikely(block >= dblock)) {
......@@ -1009,10 +1009,10 @@ static int ntfs_write_mst_block(struct page *page,
}
if (!buffer_dirty(bh)) {
/* Clean records are not written out. */
rec_is_dirty = FALSE;
rec_is_dirty = false;
continue;
}
rec_is_dirty = TRUE;
rec_is_dirty = true;
rec_start_bh = bh;
}
/* Need to map the buffer if it is not mapped already. */
......@@ -1053,7 +1053,7 @@ static int ntfs_write_mst_block(struct page *page,
*/
if (!is_mft && !is_retry &&
lcn == LCN_RL_NOT_MAPPED) {
is_retry = TRUE;
is_retry = true;
/*
* Attempt to map runlist, dropping
* lock for the duration.
......@@ -1063,7 +1063,7 @@ static int ntfs_write_mst_block(struct page *page,
if (likely(!err2))
goto lock_retry_remap;
if (err2 == -ENOMEM)
page_is_dirty = TRUE;
page_is_dirty = true;
lcn = err2;
} else {
err2 = -EIO;
......@@ -1145,7 +1145,7 @@ static int ntfs_write_mst_block(struct page *page,
* means we need to redirty the page before
* returning.
*/
page_is_dirty = TRUE;
page_is_dirty = true;
/*
* Remove the buffers in this mft record from
* the list of buffers to write.
......
......@@ -80,7 +80,7 @@ static inline void ntfs_unmap_page(struct page *page)
*
* The unlocked and uptodate page is returned on success or an encoded error
* on failure. Caller has to test for error using the IS_ERR() macro on the
* return value. If that evaluates to TRUE, the negative error code can be
* return value. If that evaluates to 'true', the negative error code can be
* obtained using PTR_ERR() on the return value of ntfs_map_page().
*/
static inline struct page *ntfs_map_page(struct address_space *mapping,
......
......@@ -67,7 +67,7 @@
* the attribute has zero allocated size, i.e. there simply is no runlist.
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
......@@ -90,7 +90,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
runlist_element *rl;
struct page *put_this_page = NULL;
int err = 0;
BOOL ctx_is_temporary, ctx_needs_reset;
bool ctx_is_temporary, ctx_needs_reset;
ntfs_attr_search_ctx old_ctx = { NULL, };
ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
......@@ -100,7 +100,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
else
base_ni = ni->ext.base_ntfs_ino;
if (!ctx) {
ctx_is_temporary = ctx_needs_reset = TRUE;
ctx_is_temporary = ctx_needs_reset = true;
m = map_mft_record(base_ni);
if (IS_ERR(m))
return PTR_ERR(m);
......@@ -115,7 +115,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
BUG_ON(IS_ERR(ctx->mrec));
a = ctx->attr;
BUG_ON(!a->non_resident);
ctx_is_temporary = FALSE;
ctx_is_temporary = false;
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
read_lock_irqsave(&ni->size_lock, flags);
allocated_size_vcn = ni->allocated_size >>
......@@ -136,7 +136,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
ni->name, ni->name_len) &&
sle64_to_cpu(a->data.non_resident.lowest_vcn)
<= vcn && end_vcn >= vcn))
ctx_needs_reset = FALSE;
ctx_needs_reset = false;
else {
/* Save the old search context. */
old_ctx = *ctx;
......@@ -158,7 +158,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
* needed attribute extent.
*/
ntfs_attr_reinit_search_ctx(ctx);
ctx_needs_reset = TRUE;
ctx_needs_reset = true;
}
}
if (ctx_needs_reset) {
......@@ -336,16 +336,16 @@ int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
* LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc).
*
* Locking: - The runlist must be locked on entry and is left locked on return.
* - If @write_locked is FALSE, i.e. the runlist is locked for reading,
* - If @write_locked is 'false', i.e. the runlist is locked for reading,
* the lock may be dropped inside the function so you cannot rely on
* the runlist still being the same when this function returns.
*/
LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
const BOOL write_locked)
const bool write_locked)
{
LCN lcn;
unsigned long flags;
BOOL is_retry = FALSE;
bool is_retry = false;
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
ni->mft_no, (unsigned long long)vcn,
......@@ -390,7 +390,7 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
down_read(&ni->runlist.lock);
}
if (likely(!err)) {
is_retry = TRUE;
is_retry = true;
goto retry_remap;
}
if (err == -ENOENT)
......@@ -449,7 +449,7 @@ LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
* -EIO - Critical error (runlist/file is corrupt, i/o error, etc).
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
......@@ -469,7 +469,7 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
unsigned long flags;
runlist_element *rl;
int err = 0;
BOOL is_retry = FALSE;
bool is_retry = false;
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
......@@ -518,7 +518,7 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
*/
err = ntfs_map_runlist_nolock(ni, vcn, ctx);
if (likely(!err)) {
is_retry = TRUE;
is_retry = true;
goto retry_remap;
}
}
......@@ -558,8 +558,8 @@ runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
* On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is
* undefined and in particular do not rely on it not changing.
*
* If @ctx->is_first is TRUE, the search begins with @ctx->attr itself. If it
* is FALSE, the search begins after @ctx->attr.
* If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it
* is 'false', the search begins after @ctx->attr.
*
* If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
* @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
......@@ -599,11 +599,11 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
/*
* Iterate over attributes in mft record starting at @ctx->attr, or the
* attribute following that, if @ctx->is_first is TRUE.
* attribute following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
a = ctx->attr;
ctx->is_first = FALSE;
ctx->is_first = false;
} else
a = (ATTR_RECORD*)((u8*)ctx->attr +
le32_to_cpu(ctx->attr->length));
......@@ -890,11 +890,11 @@ static int ntfs_external_attr_find(const ATTR_TYPE type,
ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
/*
* Iterate over entries in attribute list starting at @ctx->al_entry,
* or the entry following that, if @ctx->is_first is TRUE.
* or the entry following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
al_entry = ctx->al_entry;
ctx->is_first = FALSE;
ctx->is_first = false;
} else
al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
le16_to_cpu(ctx->al_entry->length));
......@@ -1127,7 +1127,7 @@ static int ntfs_external_attr_find(const ATTR_TYPE type,
ctx->mrec = ctx->base_mrec;
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
ctx->is_first = TRUE;
ctx->is_first = true;
ctx->ntfs_ino = base_ni;
ctx->base_ntfs_ino = NULL;
ctx->base_mrec = NULL;
......@@ -1224,7 +1224,7 @@ static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
/* Sanity checks are performed elsewhere. */
.attr = (ATTR_RECORD*)((u8*)mrec +
le16_to_cpu(mrec->attrs_offset)),
.is_first = TRUE,
.is_first = true,
.ntfs_ino = ni,
};
}
......@@ -1243,7 +1243,7 @@ void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
{
if (likely(!ctx->base_ntfs_ino)) {
/* No attribute list. */
ctx->is_first = TRUE;
ctx->is_first = true;
/* Sanity checks are performed elsewhere. */
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
......@@ -1585,7 +1585,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
return -ENOMEM;
/* Start by allocating clusters to hold the attribute value. */
rl = ntfs_cluster_alloc(vol, 0, new_size >>
vol->cluster_size_bits, -1, DATA_ZONE, TRUE);
vol->cluster_size_bits, -1, DATA_ZONE, true);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
ntfs_debug("Failed to allocate cluster%s, error code "
......@@ -1919,7 +1919,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
unsigned long flags;
int err, mp_size;
u32 attr_len = 0; /* Silence stupid gcc warning. */
BOOL mp_rebuilt;
bool mp_rebuilt;
#ifdef NTFS_DEBUG
read_lock_irqsave(&ni->size_lock, flags);
......@@ -2222,7 +2222,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
(new_alloc_size - allocated_size) >>
vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
rl->lcn + rl->length : -1, DATA_ZONE, TRUE);
rl->lcn + rl->length : -1, DATA_ZONE, true);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
if (start < 0 || start >= allocated_size)
......@@ -2265,7 +2265,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
BUG_ON(!rl2);
BUG_ON(!rl2->length);
BUG_ON(rl2->lcn < LCN_HOLE);
mp_rebuilt = FALSE;
mp_rebuilt = false;
/* Get the size for the new mapping pairs array for this extent. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
if (unlikely(mp_size <= 0)) {
......@@ -2300,7 +2300,7 @@ s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
err = -EOPNOTSUPP;
goto undo_alloc;
}
mp_rebuilt = TRUE;
mp_rebuilt = true;
/* Generate the mapping pairs array directly into the attr record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
......
......@@ -40,10 +40,10 @@
* Structure must be initialized to zero before the first call to one of the
* attribute search functions. Initialize @mrec to point to the mft record to
* search, and @attr to point to the first attribute within @mrec (not necessary
* if calling the _first() functions), and set @is_first to TRUE (not necessary
* if calling the _first() functions), and set @is_first to 'true' (not necessary
* if calling the _first() functions).
*
* If @is_first is TRUE, the search begins with @attr. If @is_first is FALSE,
* If @is_first is 'true', the search begins with @attr. If @is_first is 'false',
* the search begins after @attr. This is so that, after the first call to one
* of the search attribute functions, we can call the function again, without
* any modification of the search context, to automagically get the next
......@@ -52,7 +52,7 @@
typedef struct {
MFT_RECORD *mrec;
ATTR_RECORD *attr;
BOOL is_first;
bool is_first;
ntfs_inode *ntfs_ino;
ATTR_LIST_ENTRY *al_entry;
ntfs_inode *base_ntfs_ino;
......@@ -65,7 +65,7 @@ extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn,
extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn);
extern LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
const BOOL write_locked);
const bool write_locked);
extern runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni,
const VCN vcn, ntfs_attr_search_ctx *ctx);
......
......@@ -34,18 +34,18 @@
* @start_bit: first bit to set
* @count: number of bits to set
* @value: value to set the bits to (i.e. 0 or 1)
* @is_rollback: if TRUE this is a rollback operation
* @is_rollback: if 'true' this is a rollback operation
*
* Set @count bits starting at bit @start_bit in the bitmap described by the
* vfs inode @vi to @value, where @value is either 0 or 1.
*
* @is_rollback should always be FALSE, it is for internal use to rollback
* @is_rollback should always be 'false', it is for internal use to rollback
* errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead.
*
* Return 0 on success and -errno on error.
*/
int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
const s64 count, const u8 value, const BOOL is_rollback)
const s64 count, const u8 value, const bool is_rollback)
{
s64 cnt = count;
pgoff_t index, end_index;
......@@ -172,7 +172,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
return PTR_ERR(page);
if (count != cnt)
pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt,
value ? 0 : 1, TRUE);
value ? 0 : 1, true);
else
pos = 0;
if (!pos) {
......
......@@ -30,7 +30,7 @@
#include "types.h"
extern int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
const s64 count, const u8 value, const BOOL is_rollback);
const s64 count, const u8 value, const bool is_rollback);
/**
* ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
......@@ -48,7 +48,7 @@ static inline int ntfs_bitmap_set_bits_in_run(struct inode *vi,
const s64 start_bit, const s64 count, const u8 value)
{
return __ntfs_bitmap_set_bits_in_run(vi, start_bit, count, value,
FALSE);
false);
}
/**
......
......@@ -26,7 +26,7 @@
#include "types.h"
#include "volume.h"
static inline BOOL ntfs_is_collation_rule_supported(COLLATION_RULE cr) {
static inline bool ntfs_is_collation_rule_supported(COLLATION_RULE cr) {
int i;
/*
......@@ -35,12 +35,12 @@ static inline BOOL ntfs_is_collation_rule_supported(COLLATION_RULE cr) {
* now.
*/
if (unlikely(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG))
return FALSE;
return false;
i = le32_to_cpu(cr);
if (likely(((i >= 0) && (i <= 0x02)) ||
((i >= 0x10) && (i <= 0x13))))
return TRUE;
return FALSE;
return true;
return false;
}
extern int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr,
......
......@@ -600,7 +600,7 @@ int ntfs_read_compressed_block(struct page *page)
rl = NULL;
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) {
BOOL is_retry = FALSE;
bool is_retry = false;
if (!rl) {
lock_retry_remap:
......@@ -626,7 +626,7 @@ int ntfs_read_compressed_block(struct page *page)
break;
if (is_retry || lcn != LCN_RL_NOT_MAPPED)
goto rl_err;
is_retry = TRUE;
is_retry = true;
/*
* Attempt to map runlist, dropping lock for the
* duration.
......
......@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u32 attr_rec_len = 0;
unsigned blocksize, u;
int err, mp_size;
BOOL rl_write_locked, was_hole, is_retry;
bool rl_write_locked, was_hole, is_retry;
unsigned char blocksize_bits;
struct {
u8 runlist_merged:1;
......@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
return -ENOMEM;
}
} while (++u < nr_pages);
rl_write_locked = FALSE;
rl_write_locked = false;
rl = NULL;
err = 0;
vcn = lcn = -1;
vcn_len = 0;
lcn_block = -1;
was_hole = FALSE;
was_hole = false;
cpos = pos >> vol->cluster_size_bits;
end = pos + bytes;
cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
......@@ -760,7 +760,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
}
continue;
}
is_retry = FALSE;
is_retry = false;
if (!rl) {
down_read(&ni->runlist.lock);
retry_remap:
......@@ -776,7 +776,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
* Successful remap, setup the map cache and
* use that to deal with the buffer.
*/
was_hole = FALSE;
was_hole = false;
vcn = bh_cpos;
vcn_len = rl[1].vcn - vcn;
lcn_block = lcn << (vol->cluster_size_bits -
......@@ -792,7 +792,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (likely(vcn + vcn_len >= cend)) {
if (rl_write_locked) {
up_write(&ni->runlist.lock);
rl_write_locked = FALSE;
rl_write_locked = false;
} else
up_read(&ni->runlist.lock);
rl = NULL;
......@@ -818,13 +818,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
*/
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
rl_write_locked = TRUE;
rl_write_locked = true;
goto retry_remap;
}
err = ntfs_map_runlist_nolock(ni, bh_cpos,
NULL);
if (likely(!err)) {
is_retry = TRUE;
is_retry = true;
goto retry_remap;
}
/*
......@@ -903,7 +903,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (!rl_write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
rl_write_locked = TRUE;
rl_write_locked = true;
goto retry_remap;
}
/* Find the previous last allocated cluster. */
......@@ -917,7 +917,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
}
}
rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
FALSE);
false);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
ntfs_debug("Failed to allocate cluster, error code %i.",
......@@ -1093,7 +1093,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
status.mft_attr_mapped = 0;
status.mp_rebuilt = 0;
/* Setup the map cache and use that to deal with the buffer. */
was_hole = TRUE;
was_hole = true;
vcn = bh_cpos;
vcn_len = 1;
lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
......@@ -1105,7 +1105,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
*/
if (likely(vcn + vcn_len >= cend)) {
up_write(&ni->runlist.lock);
rl_write_locked = FALSE;
rl_write_locked = false;
rl = NULL;
}
goto map_buffer_cached;
......@@ -1117,7 +1117,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (likely(!err)) {
if (unlikely(rl_write_locked)) {
up_write(&ni->runlist.lock);
rl_write_locked = FALSE;
rl_write_locked = false;
} else if (unlikely(rl))
up_read(&ni->runlist.lock);
rl = NULL;
......@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write(
do {
s64 bh_pos;
struct page *page;
BOOL partial;
bool partial;
page = pages[u];
bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
bh = head = page_buffers(page);
partial = FALSE;
partial = false;
do {
s64 bh_end;
bh_end = bh_pos + blocksize;
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh))
partial = TRUE;
partial = true;
} else {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
......@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
*/
down_read(&ni->runlist.lock);
lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
vol->cluster_size_bits, FALSE);
vol->cluster_size_bits, false);
up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) {
status = -EIO;
......
......@@ -204,7 +204,7 @@ int ntfs_index_lookup(const void *key, const int key_len,
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) {
ir_done:
ictx->is_in_root = TRUE;
ictx->is_in_root = true;
ictx->ir = ir;
ictx->actx = actx;
ictx->base_ni = base_ni;
......@@ -374,7 +374,7 @@ int ntfs_index_lookup(const void *key, const int key_len,
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) {
ia_done:
ictx->is_in_root = FALSE;
ictx->is_in_root = false;
ictx->actx = NULL;
ictx->base_ni = NULL;
ictx->ia = ia;
......
......@@ -37,12 +37,12 @@
* @entry: index entry (points into @ir or @ia)
* @data: index entry data (points into @entry)
* @data_len: length in bytes of @data
* @is_in_root: TRUE if @entry is in @ir and FALSE if it is in @ia
* @is_in_root: 'true' if @entry is in @ir and 'false' if it is in @ia
* @ir: index root if @is_in_root and NULL otherwise
* @actx: attribute search context if @is_in_root and NULL otherwise
* @base_ni: base inode if @is_in_root and NULL otherwise
* @ia: index block if @is_in_root is FALSE and NULL otherwise
* @page: page if @is_in_root is FALSE and NULL otherwise
* @ia: index block if @is_in_root is 'false' and NULL otherwise
* @page: page if @is_in_root is 'false' and NULL otherwise
*
* @idx_ni is the index inode this context belongs to.
*
......@@ -50,11 +50,11 @@
* are the index entry data and its length in bytes, respectively. @data
* simply points into @entry. This is probably what the user is interested in.
*
* If @is_in_root is TRUE, @entry is in the index root attribute @ir described
* If @is_in_root is 'true', @entry is in the index root attribute @ir described
* by the attribute search context @actx and the base inode @base_ni. @ia and
* @page are NULL in this case.
*
* If @is_in_root is FALSE, @entry is in the index allocation attribute and @ia
* If @is_in_root is 'false', @entry is in the index allocation attribute and @ia
* and @page point to the index allocation block and the mapped, locked page it
* is in, respectively. @ir, @actx and @base_ni are NULL in this case.
*
......@@ -77,7 +77,7 @@ typedef struct {
INDEX_ENTRY *entry;
void *data;
u16 data_len;
BOOL is_in_root;
bool is_in_root;
INDEX_ROOT *ir;
ntfs_attr_search_ctx *actx;
ntfs_inode *base_ni;
......
......@@ -2301,7 +2301,7 @@ void ntfs_clear_big_inode(struct inode *vi)
}
#ifdef NTFS_RW
if (NInoDirty(ni)) {
BOOL was_bad = (is_bad_inode(vi));
bool was_bad = (is_bad_inode(vi));
/* Committing the inode also commits all extent inodes. */
ntfs_commit_inode(vi);
......@@ -3015,7 +3015,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
MFT_RECORD *m;
STANDARD_INFORMATION *si;
int err = 0;
BOOL modified = FALSE;
bool modified = false;
ntfs_debug("Entering for %sinode 0x%lx.", NInoAttr(ni) ? "attr " : "",
vi->i_ino);
......@@ -3057,7 +3057,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
sle64_to_cpu(si->last_data_change_time),
(long long)sle64_to_cpu(nt));
si->last_data_change_time = nt;
modified = TRUE;
modified = true;
}
nt = utc2ntfs(vi->i_ctime);
if (si->last_mft_change_time != nt) {
......@@ -3066,7 +3066,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
sle64_to_cpu(si->last_mft_change_time),
(long long)sle64_to_cpu(nt));
si->last_mft_change_time = nt;
modified = TRUE;
modified = true;
}
nt = utc2ntfs(vi->i_atime);
if (si->last_access_time != nt) {
......@@ -3075,7 +3075,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
(long long)sle64_to_cpu(si->last_access_time),
(long long)sle64_to_cpu(nt));
si->last_access_time = nt;
modified = TRUE;
modified = true;
}
/*
* If we just modified the standard information attribute we need to
......
......@@ -142,13 +142,13 @@ typedef le32 NTFS_RECORD_TYPE;
* operator! (-8
*/
static inline BOOL __ntfs_is_magic(le32 x, NTFS_RECORD_TYPE r)
static inline bool __ntfs_is_magic(le32 x, NTFS_RECORD_TYPE r)
{
return (x == r);
}
#define ntfs_is_magic(x, m) __ntfs_is_magic(x, magic_##m)
static inline BOOL __ntfs_is_magicp(le32 *p, NTFS_RECORD_TYPE r)
static inline bool __ntfs_is_magicp(le32 *p, NTFS_RECORD_TYPE r)
{
return (*p == r);
}
......@@ -323,7 +323,7 @@ typedef le64 leMFT_REF;
#define MREF_LE(x) ((unsigned long)(le64_to_cpu(x) & MFT_REF_MASK_CPU))
#define MSEQNO_LE(x) ((u16)((le64_to_cpu(x) >> 48) & 0xffff))
#define IS_ERR_MREF(x) (((x) & 0x0000800000000000ULL) ? 1 : 0)
#define IS_ERR_MREF(x) (((x) & 0x0000800000000000ULL) ? true : false)
#define ERR_MREF(x) ((u64)((s64)(x)))
#define MREF_ERR(x) ((int)((s64)(x)))
......
......@@ -76,7 +76,7 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
* @count: number of clusters to allocate
* @start_lcn: starting lcn at which to allocate the clusters (or -1 if none)
* @zone: zone from which to allocate the clusters
* @is_extension: if TRUE, this is an attribute extension
* @is_extension: if 'true', this is an attribute extension
*
* Allocate @count clusters preferably starting at cluster @start_lcn or at the
* current allocator position if @start_lcn is -1, on the mounted ntfs volume
......@@ -87,11 +87,11 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
* @start_vcn specifies the vcn of the first allocated cluster. This makes
* merging the resulting runlist with the old runlist easier.
*
* If @is_extension is TRUE, the caller is allocating clusters to extend an
* attribute and if it is FALSE, the caller is allocating clusters to fill a
* If @is_extension is 'true', the caller is allocating clusters to extend an
* attribute and if it is 'false', the caller is allocating clusters to fill a
* hole in an attribute. Practically the difference is that if @is_extension
* is TRUE the returned runlist will be terminated with LCN_ENOENT and if
* @is_extension is FALSE the runlist will be terminated with