quota.c 42.3 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9 10 11 12 13 14 15 16 17
 */

/*
 * Quota change tags are associated with each transaction that allocates or
 * deallocates space.  Those changes are accumulated locally to each node (in a
 * per-node file) and then are periodically synced to the quota file.  This
 * avoids the bottleneck of constantly touching the quota file, but introduces
 * fuzziness in the current usage value of IDs that are being used on different
 * nodes in the cluster simultaneously.  So, it is possible for a user on
 * multiple nodes to overrun their quota, but that overrun is controlable.
18
 * Since quota tags are part of transactions, there is no need for a quota check
David Teigland's avatar
David Teigland committed
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 * program to be run on node crashes or anything like that.
 *
 * There are couple of knobs that let the administrator manage the quota
 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
 * sitting on one node before being synced to the quota file.  (The default is
 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
 * of quota file syncs increases as the user moves closer to their limit.  The
 * more frequent the syncs, the more accurate the quota enforcement, but that
 * means that there is more contention between the nodes for the quota file.
 * The default value is one.  This sets the maximum theoretical quota overrun
 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
 * number greater than one makes quota syncs more frequent and reduces the
 * maximum overrun.  Numbers less than one (but greater than zero) make quota
 * syncs less frequent.
 *
 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
 * the quota file, so it is not being constantly read.
 */

39 40
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

David Teigland's avatar
David Teigland committed
41 42
#include <linux/sched.h>
#include <linux/slab.h>
43
#include <linux/mm.h>
David Teigland's avatar
David Teigland committed
44 45 46 47
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
48
#include <linux/fs.h>
49
#include <linux/bio.h>
50
#include <linux/gfs2_ondisk.h>
51 52
#include <linux/kthread.h>
#include <linux/freezer.h>
53
#include <linux/quota.h>
54
#include <linux/dqblk_xfs.h>
55
#include <linux/lockref.h>
56
#include <linux/list_lru.h>
57 58 59 60
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
#include <linux/jhash.h>
61
#include <linux/vmalloc.h>
David Teigland's avatar
David Teigland committed
62 63

#include "gfs2.h"
64
#include "incore.h"
David Teigland's avatar
David Teigland committed
65 66 67 68 69 70 71 72 73
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
74
#include "inode.h"
75
#include "util.h"
David Teigland's avatar
David Teigland committed
76

77
#define GFS2_QD_HASH_SHIFT      12
Fabian Frederick's avatar
Fabian Frederick committed
78
#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
79 80 81
#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)

/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
82
/*                     -> sd_bitmap_lock                              */
83
static DEFINE_SPINLOCK(qd_lock);
84
struct list_lru gfs2_qd_lru;
85

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];

static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
				 const struct kqid qid)
{
	unsigned int h;

	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
	h = jhash(&qid, sizeof(struct kqid), h);

	return h & GFS2_QD_HASH_MASK;
}

static inline void spin_lock_bucket(unsigned int hash)
{
        hlist_bl_lock(&qd_hash_table[hash]);
}

static inline void spin_unlock_bucket(unsigned int hash)
{
        hlist_bl_unlock(&qd_hash_table[hash]);
}

static void gfs2_qd_dealloc(struct rcu_head *rcu)
{
	struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
	kmem_cache_free(gfs2_quotad_cachep, qd);
}

115
static void gfs2_qd_dispose(struct list_head *list)
116 117 118 119
{
	struct gfs2_quota_data *qd;
	struct gfs2_sbd *sdp;

120 121
	while (!list_empty(list)) {
		qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
122
		sdp = qd->qd_gl->gl_name.ln_sbd;
123

124 125
		list_del(&qd->qd_lru);

126
		/* Free from the filesystem-specific list */
127
		spin_lock(&qd_lock);
128
		list_del(&qd->qd_list);
129
		spin_unlock(&qd_lock);
130

131 132 133 134
		spin_lock_bucket(qd->qd_hash);
		hlist_bl_del_rcu(&qd->qd_hlist);
		spin_unlock_bucket(qd->qd_hash);

135 136 137 138
		gfs2_assert_warn(sdp, !qd->qd_change);
		gfs2_assert_warn(sdp, !qd->qd_slot_count);
		gfs2_assert_warn(sdp, !qd->qd_bh_count);

139
		gfs2_glock_put(qd->qd_gl);
140 141 142
		atomic_dec(&sdp->sd_quota_count);

		/* Delete it from the common reclaim list */
143
		call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
144
	}
145 146 147
}


148 149
static enum lru_status gfs2_qd_isolate(struct list_head *item,
		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
150 151 152 153 154 155 156 157 158
{
	struct list_head *dispose = arg;
	struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);

	if (!spin_trylock(&qd->qd_lockref.lock))
		return LRU_SKIP;

	if (qd->qd_lockref.count == 0) {
		lockref_mark_dead(&qd->qd_lockref);
159
		list_lru_isolate_move(lru, &qd->qd_lru, dispose);
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	}

	spin_unlock(&qd->qd_lockref.lock);
	return LRU_REMOVED;
}

static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
					 struct shrink_control *sc)
{
	LIST_HEAD(dispose);
	unsigned long freed;

	if (!(sc->gfp_mask & __GFP_FS))
		return SHRINK_STOP;

175 176
	freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
				     gfs2_qd_isolate, &dispose);
177 178 179

	gfs2_qd_dispose(&dispose);

180 181
	return freed;
}
182

183 184
static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
					  struct shrink_control *sc)
185
{
186
	return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
187 188
}

189 190 191 192 193 194 195 196
struct shrinker gfs2_qd_shrinker = {
	.count_objects = gfs2_qd_shrink_count,
	.scan_objects = gfs2_qd_shrink_scan,
	.seeks = DEFAULT_SEEKS,
	.flags = SHRINKER_NUMA_AWARE,
};


197 198
static u64 qd2index(struct gfs2_quota_data *qd)
{
199 200
	struct kqid qid = qd->qd_id;
	return (2 * (u64)from_kqid(&init_user_ns, qid)) +
201
		((qid.type == USRQUOTA) ? 0 : 1);
202 203
}

204
static u64 qd2offset(struct gfs2_quota_data *qd)
David Teigland's avatar
David Teigland committed
205
{
206
	u64 offset;
David Teigland's avatar
David Teigland committed
207

208
	offset = qd2index(qd);
David Teigland's avatar
David Teigland committed
209 210 211 212 213
	offset *= sizeof(struct gfs2_quota);

	return offset;
}

214
static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
David Teigland's avatar
David Teigland committed
215 216 217 218
{
	struct gfs2_quota_data *qd;
	int error;

219
	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
David Teigland's avatar
David Teigland committed
220
	if (!qd)
221
		return NULL;
David Teigland's avatar
David Teigland committed
222

223
	qd->qd_sbd = sdp;
224 225
	qd->qd_lockref.count = 1;
	spin_lock_init(&qd->qd_lockref.lock);
226
	qd->qd_id = qid;
David Teigland's avatar
David Teigland committed
227
	qd->qd_slot = -1;
228
	INIT_LIST_HEAD(&qd->qd_lru);
229
	qd->qd_hash = hash;
David Teigland's avatar
David Teigland committed
230

231
	error = gfs2_glock_get(sdp, qd2index(qd),
David Teigland's avatar
David Teigland committed
232 233 234 235
			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
	if (error)
		goto fail;

236
	return qd;
David Teigland's avatar
David Teigland committed
237

238
fail:
239
	kmem_cache_free(gfs2_quotad_cachep, qd);
240
	return NULL;
David Teigland's avatar
David Teigland committed
241 242
}

243 244 245
static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
						     const struct gfs2_sbd *sdp,
						     struct kqid qid)
David Teigland's avatar
David Teigland committed
246
{
247 248
	struct gfs2_quota_data *qd;
	struct hlist_bl_node *h;
David Teigland's avatar
David Teigland committed
249

250 251 252 253 254 255 256 257
	hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
		if (!qid_eq(qd->qd_id, qid))
			continue;
		if (qd->qd_sbd != sdp)
			continue;
		if (lockref_get_not_dead(&qd->qd_lockref)) {
			list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
			return qd;
David Teigland's avatar
David Teigland committed
258
		}
259
	}
David Teigland's avatar
David Teigland committed
260

261 262
	return NULL;
}
David Teigland's avatar
David Teigland committed
263 264


265 266 267 268 269
static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
		  struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd, *new_qd;
	unsigned int hash = gfs2_qd_hash(sdp, qid);
David Teigland's avatar
David Teigland committed
270

271 272 273
	rcu_read_lock();
	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
	rcu_read_unlock();
David Teigland's avatar
David Teigland committed
274

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	if (qd)
		return 0;

	new_qd = qd_alloc(hash, sdp, qid);
	if (!new_qd)
		return -ENOMEM;

	spin_lock(&qd_lock);
	spin_lock_bucket(hash);
	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
	if (qd == NULL) {
		*qdp = new_qd;
		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
		hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
		atomic_inc(&sdp->sd_quota_count);
	}
	spin_unlock_bucket(hash);
	spin_unlock(&qd_lock);

	if (qd) {
		gfs2_glock_put(new_qd->qd_gl);
		kmem_cache_free(gfs2_quotad_cachep, new_qd);
David Teigland's avatar
David Teigland committed
297
	}
298 299

	return 0;
David Teigland's avatar
David Teigland committed
300 301
}

302

David Teigland's avatar
David Teigland committed
303 304
static void qd_hold(struct gfs2_quota_data *qd)
{
305
	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
306 307
	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
	lockref_get(&qd->qd_lockref);
David Teigland's avatar
David Teigland committed
308 309 310 311
}

static void qd_put(struct gfs2_quota_data *qd)
{
312 313
	if (lockref_put_or_lock(&qd->qd_lockref))
		return;
314

315 316 317
	qd->qd_lockref.count = 0;
	list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
	spin_unlock(&qd->qd_lockref.lock);
318

David Teigland's avatar
David Teigland committed
319 320 321 322
}

static int slot_get(struct gfs2_quota_data *qd)
{
323 324 325
	struct gfs2_sbd *sdp = qd->qd_sbd;
	unsigned int bit;
	int error = 0;
David Teigland's avatar
David Teigland committed
326

327
	spin_lock(&sdp->sd_bitmap_lock);
328 329
	if (qd->qd_slot_count != 0)
		goto out;
David Teigland's avatar
David Teigland committed
330

331 332 333 334 335
	error = -ENOSPC;
	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
	if (bit < sdp->sd_quota_slots) {
		set_bit(bit, sdp->sd_quota_bitmap);
		qd->qd_slot = bit;
336
		error = 0;
337 338
out:
		qd->qd_slot_count++;
David Teigland's avatar
David Teigland committed
339
	}
340
	spin_unlock(&sdp->sd_bitmap_lock);
David Teigland's avatar
David Teigland committed
341

342
	return error;
David Teigland's avatar
David Teigland committed
343 344 345 346
}

static void slot_hold(struct gfs2_quota_data *qd)
{
347
	struct gfs2_sbd *sdp = qd->qd_sbd;
David Teigland's avatar
David Teigland committed
348

349
	spin_lock(&sdp->sd_bitmap_lock);
David Teigland's avatar
David Teigland committed
350 351
	gfs2_assert(sdp, qd->qd_slot_count);
	qd->qd_slot_count++;
352
	spin_unlock(&sdp->sd_bitmap_lock);
David Teigland's avatar
David Teigland committed
353 354 355 356
}

static void slot_put(struct gfs2_quota_data *qd)
{
357
	struct gfs2_sbd *sdp = qd->qd_sbd;
David Teigland's avatar
David Teigland committed
358

359
	spin_lock(&sdp->sd_bitmap_lock);
David Teigland's avatar
David Teigland committed
360 361
	gfs2_assert(sdp, qd->qd_slot_count);
	if (!--qd->qd_slot_count) {
362
		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
David Teigland's avatar
David Teigland committed
363 364
		qd->qd_slot = -1;
	}
365
	spin_unlock(&sdp->sd_bitmap_lock);
David Teigland's avatar
David Teigland committed
366 367 368 369
}

static int bh_get(struct gfs2_quota_data *qd)
{
370
	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
371
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teigland's avatar
David Teigland committed
372 373 374
	unsigned int block, offset;
	struct buffer_head *bh;
	int error;
375
	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
David Teigland's avatar
David Teigland committed
376

377
	mutex_lock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
378 379

	if (qd->qd_bh_count++) {
380
		mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
381 382 383 384
		return 0;
	}

	block = qd->qd_slot / sdp->sd_qc_per_block;
385
	offset = qd->qd_slot % sdp->sd_qc_per_block;
David Teigland's avatar
David Teigland committed
386

Fabian Frederick's avatar
Fabian Frederick committed
387
	bh_map.b_size = BIT(ip->i_inode.i_blkbits);
388
	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
David Teigland's avatar
David Teigland committed
389 390
	if (error)
		goto fail;
391
	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
David Teigland's avatar
David Teigland committed
392 393 394 395 396 397 398 399 400 401 402
	if (error)
		goto fail;
	error = -EIO;
	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
		goto fail_brelse;

	qd->qd_bh = bh;
	qd->qd_bh_qc = (struct gfs2_quota_change *)
		(bh->b_data + sizeof(struct gfs2_meta_header) +
		 offset * sizeof(struct gfs2_quota_change));

403
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
404 405 406

	return 0;

407
fail_brelse:
David Teigland's avatar
David Teigland committed
408
	brelse(bh);
409
fail:
David Teigland's avatar
David Teigland committed
410
	qd->qd_bh_count--;
411
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
412 413 414 415 416
	return error;
}

static void bh_put(struct gfs2_quota_data *qd)
{
417
	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
David Teigland's avatar
David Teigland committed
418

419
	mutex_lock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
420 421 422 423 424 425
	gfs2_assert(sdp, qd->qd_bh_count);
	if (!--qd->qd_bh_count) {
		brelse(qd->qd_bh);
		qd->qd_bh = NULL;
		qd->qd_bh_qc = NULL;
	}
426
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
427 428
}

429 430 431 432 433 434 435 436
static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
			 u64 *sync_gen)
{
	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
	    (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
		return 0;

437 438
	if (!lockref_get_not_dead(&qd->qd_lockref))
		return 0;
439

440
	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
441 442
	set_bit(QDF_LOCKED, &qd->qd_flags);
	qd->qd_change_sync = qd->qd_change;
443
	slot_hold(qd);
444 445 446
	return 1;
}

David Teigland's avatar
David Teigland committed
447 448 449 450 451 452 453 454
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd = NULL;
	int error;
	int found = 0;

	*qdp = NULL;

455
	if (sb_rdonly(sdp->sd_vfs))
David Teigland's avatar
David Teigland committed
456 457
		return 0;

458
	spin_lock(&qd_lock);
David Teigland's avatar
David Teigland committed
459 460

	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
461 462 463
		found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
		if (found)
			break;
David Teigland's avatar
David Teigland committed
464 465 466 467 468
	}

	if (!found)
		qd = NULL;

469
	spin_unlock(&qd_lock);
David Teigland's avatar
David Teigland committed
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488

	if (qd) {
		gfs2_assert_warn(sdp, qd->qd_change_sync);
		error = bh_get(qd);
		if (error) {
			clear_bit(QDF_LOCKED, &qd->qd_flags);
			slot_put(qd);
			qd_put(qd);
			return error;
		}
	}

	*qdp = qd;

	return 0;
}

static void qd_unlock(struct gfs2_quota_data *qd)
{
489
	gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
490
			 test_bit(QDF_LOCKED, &qd->qd_flags));
David Teigland's avatar
David Teigland committed
491 492 493 494 495 496
	clear_bit(QDF_LOCKED, &qd->qd_flags);
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

497
static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
David Teigland's avatar
David Teigland committed
498 499 500 501
		    struct gfs2_quota_data **qdp)
{
	int error;

502
	error = qd_get(sdp, qid, qdp);
David Teigland's avatar
David Teigland committed
503 504 505 506 507 508 509 510 511 512 513 514 515
	if (error)
		return error;

	error = slot_get(*qdp);
	if (error)
		goto fail;

	error = bh_get(*qdp);
	if (error)
		goto fail_slot;

	return 0;

516
fail_slot:
David Teigland's avatar
David Teigland committed
517
	slot_put(*qdp);
518
fail:
David Teigland's avatar
David Teigland committed
519 520 521 522 523 524 525 526 527 528 529
	qd_put(*qdp);
	return error;
}

static void qdsb_put(struct gfs2_quota_data *qd)
{
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
/**
 * gfs2_qa_alloc - make sure we have a quota allocations data structure,
 *                 if necessary
 * @ip: the inode for this reservation
 */
int gfs2_qa_alloc(struct gfs2_inode *ip)
{
	int error = 0;
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);

	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
		return 0;

	down_write(&ip->i_rw_mutex);
	if (ip->i_qadata == NULL) {
		ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
		if (!ip->i_qadata)
			error = -ENOMEM;
	}
	up_write(&ip->i_rw_mutex);
	return error;
}

553
void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount)
554 555
{
	down_write(&ip->i_rw_mutex);
556
	if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
557 558 559 560 561 562
		kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
		ip->i_qadata = NULL;
	}
	up_write(&ip->i_rw_mutex);
}

563
int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
David Teigland's avatar
David Teigland committed
564
{
565
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
566
	struct gfs2_quota_data **qd;
David Teigland's avatar
David Teigland committed
567 568
	int error;

569 570 571 572 573
	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
		return 0;

	if (ip->i_qadata == NULL) {
		error = gfs2_rsqa_alloc(ip);
574 575 576
		if (error)
			return error;
	}
577

578
	qd = ip->i_qadata->qa_qd;
579

580
	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
David Teigland's avatar
David Teigland committed
581 582 583
	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
		return -EIO;

584
	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
David Teigland's avatar
David Teigland committed
585 586
	if (error)
		goto out;
587
	ip->i_qadata->qa_qd_num++;
David Teigland's avatar
David Teigland committed
588 589
	qd++;

590
	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
David Teigland's avatar
David Teigland committed
591 592
	if (error)
		goto out;
593
	ip->i_qadata->qa_qd_num++;
David Teigland's avatar
David Teigland committed
594 595
	qd++;

596 597
	if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
	    !uid_eq(uid, ip->i_inode.i_uid)) {
598
		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
David Teigland's avatar
David Teigland committed
599 600
		if (error)
			goto out;
601
		ip->i_qadata->qa_qd_num++;
David Teigland's avatar
David Teigland committed
602 603 604
		qd++;
	}

605 606
	if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
	    !gid_eq(gid, ip->i_inode.i_gid)) {
607
		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
David Teigland's avatar
David Teigland committed
608 609
		if (error)
			goto out;
610
		ip->i_qadata->qa_qd_num++;
David Teigland's avatar
David Teigland committed
611 612 613
		qd++;
	}

614
out:
David Teigland's avatar
David Teigland committed
615 616 617 618 619 620 621
	if (error)
		gfs2_quota_unhold(ip);
	return error;
}

void gfs2_quota_unhold(struct gfs2_inode *ip)
{
622
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
623
	u32 x;
David Teigland's avatar
David Teigland committed
624

625
	if (ip->i_qadata == NULL)
626
		return;
David Teigland's avatar
David Teigland committed
627 628
	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));

629 630 631
	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
		qdsb_put(ip->i_qadata->qa_qd[x]);
		ip->i_qadata->qa_qd[x] = NULL;
David Teigland's avatar
David Teigland committed
632
	}
633
	ip->i_qadata->qa_qd_num = 0;
David Teigland's avatar
David Teigland committed
634 635 636 637
}

static int sort_qd(const void *a, const void *b)
{
638 639
	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
David Teigland's avatar
David Teigland committed
640

641
	if (qid_lt(qd_a->qd_id, qd_b->qd_id))
642
		return -1;
643
	if (qid_lt(qd_b->qd_id, qd_a->qd_id))
644 645
		return 1;
	return 0;
David Teigland's avatar
David Teigland committed
646 647
}

648
static void do_qc(struct gfs2_quota_data *qd, s64 change)
David Teigland's avatar
David Teigland committed
649
{
650
	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
651
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teigland's avatar
David Teigland committed
652
	struct gfs2_quota_change *qc = qd->qd_bh_qc;
653
	s64 x;
David Teigland's avatar
David Teigland committed
654

655
	mutex_lock(&sdp->sd_quota_mutex);
656
	gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
David Teigland's avatar
David Teigland committed
657 658 659 660

	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
		qc->qc_change = 0;
		qc->qc_flags = 0;
661
		if (qd->qd_id.type == USRQUOTA)
David Teigland's avatar
David Teigland committed
662
			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
663
		qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
David Teigland's avatar
David Teigland committed
664 665
	}

666
	x = be64_to_cpu(qc->qc_change) + change;
David Teigland's avatar
David Teigland committed
667 668
	qc->qc_change = cpu_to_be64(x);

669
	spin_lock(&qd_lock);
David Teigland's avatar
David Teigland committed
670
	qd->qd_change = x;
671
	spin_unlock(&qd_lock);
David Teigland's avatar
David Teigland committed
672 673 674 675 676 677 678 679 680 681 682 683

	if (!x) {
		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
		clear_bit(QDF_CHANGE, &qd->qd_flags);
		qc->qc_flags = 0;
		qc->qc_id = 0;
		slot_put(qd);
		qd_put(qd);
	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
		qd_hold(qd);
		slot_hold(qd);
	}
684

685 686
	if (change < 0) /* Reset quiet flag if we freed some blocks */
		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
687
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
688 689
}

690 691 692 693 694 695 696 697 698 699 700 701 702 703
static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
				  unsigned off, void *buf, unsigned bytes)
{
	struct inode *inode = &ip->i_inode;
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
	struct buffer_head *bh;
	void *kaddr;
	u64 blk;
	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
	unsigned to_write = bytes, pg_off = off;
	int done = 0;

704
	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	boff = off % bsize;

	page = find_or_create_page(mapping, index, GFP_NOFS);
	if (!page)
		return -ENOMEM;
	if (!page_has_buffers(page))
		create_empty_buffers(page, bsize, 0);

	bh = page_buffers(page);
	while (!done) {
		/* Find the beginning block within the page */
		if (pg_off >= ((bnum * bsize) + bsize)) {
			bh = bh->b_this_page;
			bnum++;
			blk++;
			continue;
		}
		if (!buffer_mapped(bh)) {
			gfs2_block_map(inode, blk, bh, 1);
			if (!buffer_mapped(bh))
				goto unlock_out;
			/* If it's a newly allocated disk block, zero it */
			if (buffer_new(bh))
				zero_user(page, bnum * bsize, bh->b_size);
		}
		if (PageUptodate(page))
			set_buffer_uptodate(bh);
		if (!buffer_uptodate(bh)) {
733
			ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh);
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
			wait_on_buffer(bh);
			if (!buffer_uptodate(bh))
				goto unlock_out;
		}
		gfs2_trans_add_data(ip->i_gl, bh);

		/* If we need to write to the next block as well */
		if (to_write > (bsize - boff)) {
			pg_off += (bsize - boff);
			to_write -= (bsize - boff);
			boff = pg_off % bsize;
			continue;
		}
		done = 1;
	}

	/* Write to the page, now that we have setup the buffer(s) */
	kaddr = kmap_atomic(page);
	memcpy(kaddr + off, buf, bytes);
	flush_dcache_page(page);
	kunmap_atomic(kaddr);
	unlock_page(page);
756
	put_page(page);
757 758 759 760 761

	return 0;

unlock_out:
	unlock_page(page);
762
	put_page(page);
763 764 765 766 767 768 769 770 771 772 773 774 775
	return -EIO;
}

static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
				 loff_t loc)
{
	unsigned long pg_beg;
	unsigned pg_off, nbytes, overflow = 0;
	int pg_oflow = 0, error;
	void *ptr;

	nbytes = sizeof(struct gfs2_quota);

776 777
	pg_beg = loc >> PAGE_SHIFT;
	pg_off = loc % PAGE_SIZE;
778 779

	/* If the quota straddles a page boundary, split the write in two */
780
	if ((pg_off + nbytes) > PAGE_SIZE) {
781
		pg_oflow = 1;
782
		overflow = (pg_off + nbytes) - PAGE_SIZE;
783 784 785 786 787 788 789 790 791 792 793 794 795
	}

	ptr = qp;
	error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
				       nbytes - overflow);
	/* If there's an overflow, write the remaining bytes to the next page */
	if (!error && pg_oflow)
		error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
					       ptr + nbytes - overflow,
					       overflow);
	return error;
}

796
/**
797 798 799
 * gfs2_adjust_quota - adjust record of current block usage
 * @ip: The quota inode
 * @loc: Offset of the entry in the quota file
800
 * @change: The amount of usage change to record
801
 * @qd: The quota data
802
 * @fdq: The updated limits to record
803 804 805
 *
 * This function was mostly borrowed from gfs2_block_truncate_page which was
 * in turn mostly borrowed from ext3
806 807
 *
 * Returns: 0 or -ve on error
808
 */
809

810
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
811
			     s64 change, struct gfs2_quota_data *qd,
812
			     struct qc_dqblk *fdq)
813
{
814
	struct inode *inode = &ip->i_inode;
815
	struct gfs2_sbd *sdp = GFS2_SB(inode);
816
	struct gfs2_quota q;
817
	int err;
818
	u64 size;
819

Steven Whitehouse's avatar
Steven Whitehouse committed
820 821 822 823 824
	if (gfs2_is_stuffed(ip)) {
		err = gfs2_unstuff_dinode(ip, NULL);
		if (err)
			return err;
	}
825 826

	memset(&q, 0, sizeof(struct gfs2_quota));
827
	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
828 829 830
	if (err < 0)
		return err;

831
	loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
832
	err = -EIO;
833
	be64_add_cpu(&q.qu_value, change);
834
	if (((s64)be64_to_cpu(q.qu_value)) < 0)
835
		q.qu_value = 0; /* Never go negative on quota usage */
836
	qd->qd_qb.qb_value = q.qu_value;
837
	if (fdq) {
838 839
		if (fdq->d_fieldmask & QC_SPC_SOFT) {
			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
840
			qd->qd_qb.qb_warn = q.qu_warn;
841
		}
842 843
		if (fdq->d_fieldmask & QC_SPC_HARD) {
			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
844
			qd->qd_qb.qb_limit = q.qu_limit;
845
		}
846 847
		if (fdq->d_fieldmask & QC_SPACE) {
			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
848
			qd->qd_qb.qb_value = q.qu_value;
849
		}
850 851
	}

852 853 854 855 856
	err = gfs2_write_disk_quota(ip, &q, loc);
	if (!err) {
		size = loc + sizeof(struct gfs2_quota);
		if (size > inode->i_size)
			i_size_write(inode, size);
857
		inode->i_mtime = inode->i_atime = current_time(inode);
858 859
		mark_inode_dirty(inode);
		set_bit(QDF_REFRESH, &qd->qd_flags);
860
	}
861

862 863 864
	return err;
}

David Teigland's avatar
David Teigland committed
865 866
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
867
	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
868
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
869
	struct gfs2_alloc_parms ap = { .aflags = 0, };
David Teigland's avatar
David Teigland committed
870 871 872 873
	unsigned int data_blocks, ind_blocks;
	struct gfs2_holder *ghs, i_gh;
	unsigned int qx, x;
	struct gfs2_quota_data *qd;
874
	unsigned reserved;
875
	loff_t offset;
876
	unsigned int nalloc = 0, blocks;
David Teigland's avatar
David Teigland committed
877 878
	int error;

879
	error = gfs2_rsqa_alloc(ip);
880 881 882
	if (error)
		return error;

David Teigland's avatar
David Teigland committed
883 884 885
	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
			      &data_blocks, &ind_blocks);

886
	ghs = kmalloc(num_qd * sizeof(struct gfs2_holder), GFP_NOFS);
David Teigland's avatar
David Teigland committed
887 888 889 890
	if (!ghs)
		return -ENOMEM;

	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
Al Viro's avatar
Al Viro committed
891
	inode_lock(&ip->i_inode);
David Teigland's avatar
David Teigland committed
892
	for (qx = 0; qx < num_qd; qx++) {
893
		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
David Teigland's avatar
David Teigland committed
894 895 896 897 898 899 900 901 902 903 904
					   GL_NOCACHE, &ghs[qx]);
		if (error)
			goto out;
	}

	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
	if (error)
		goto out;

	for (x = 0; x < num_qd; x++) {
		offset = qd2offset(qda[x]);
905 906
		if (gfs2_write_alloc_required(ip, offset,
					      sizeof(struct gfs2_quota)))
David Teigland's avatar
David Teigland committed
907 908 909
			nalloc++;
	}

910 911 912 913 914 915
	/* 
	 * 1 blk for unstuffing inode if stuffed. We add this extra
	 * block to the reservation unconditionally. If the inode
	 * doesn't need unstuffing, the block will be released to the 
	 * rgrp since it won't be allocated during the transaction
	 */
916 917 918 919
	/* +3 in the end for unstuffing block, inode size update block
	 * and another block in case quota straddles page boundary and 
	 * two blocks need to be updated instead of 1 */
	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
David Teigland's avatar
David Teigland committed
920

921
	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
922 923
	ap.target = reserved;
	error = gfs2_inplace_reserve(ip, &ap);
924 925
	if (error)
		goto out_alloc;
David Teigland's avatar
David Teigland committed
926

927
	if (nalloc)
928
		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
929 930 931 932

	error = gfs2_trans_begin(sdp, blocks, 0);
	if (error)
		goto out_ipres;
David Teigland's avatar
David Teigland committed
933 934 935 936

	for (x = 0; x < num_qd; x++) {
		qd = qda[x];
		offset = qd2offset(qd);
937
		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
938
		if (error)
David Teigland's avatar
David Teigland committed
939 940 941
			goto out_end_trans;

		do_qc(qd, -qd->qd_change_sync);
942
		set_bit(QDF_REFRESH, &qd->qd_flags);
David Teigland's avatar
David Teigland committed
943 944 945 946
	}

	error = 0;

947
out_end_trans:
David Teigland's avatar
David Teigland committed
948
	gfs2_trans_end(sdp);
949
out_ipres:
950
	gfs2_inplace_release(ip);
951
out_alloc:
David Teigland's avatar
David Teigland committed
952
	gfs2_glock_dq_uninit(&i_gh);
953
out:
David Teigland's avatar
David Teigland committed
954 955
	while (qx--)
		gfs2_glock_dq_uninit(&ghs[qx]);
Al Viro's avatar
Al Viro committed
956
	inode_unlock(&ip->i_inode);
David Teigland's avatar
David Teigland committed
957
	kfree(ghs);
958
	gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH);
David Teigland's avatar
David Teigland committed
959 960 961
	return error;
}

962 963 964 965 966 967 968 969 970 971
static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
{
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
	struct gfs2_quota q;
	struct gfs2_quota_lvb *qlvb;
	loff_t pos;
	int error;

	memset(&q, 0, sizeof(struct gfs2_quota));
	pos = qd2offset(qd);
972
	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
973 974 975
	if (error < 0)
		return error;

976
	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
977 978 979 980 981 982 983 984 985 986
	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
	qlvb->__pad = 0;
	qlvb->qb_limit = q.qu_limit;
	qlvb->qb_warn = q.qu_warn;
	qlvb->qb_value = q.qu_value;
	qd->qd_qb = *qlvb;

	return 0;
}

David Teigland's avatar
David Teigland committed
987 988 989
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
		    struct gfs2_holder *q_gh)
{
990
	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
991
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
David Teigland's avatar
David Teigland committed
992 993 994
	struct gfs2_holder i_gh;
	int error;

995
restart:
David Teigland's avatar
David Teigland committed
996 997 998 999
	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
	if (error)
		return error;

1000 1001 1002
	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
		force_refresh = FORCE;

1003
	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
David Teigland's avatar
David Teigland committed
1004

1005
	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
David Teigland's avatar
David Teigland committed
1006
		gfs2_glock_dq_uninit(q_gh);
1007 1008
		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
					   GL_NOCACHE, q_gh);
David Teigland's avatar
David Teigland committed
1009 1010 1011
		if (error)
			return error;

1012
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
David Teigland's avatar
David Teigland committed
1013 1014 1015
		if (error)
			goto fail;

1016 1017
		error = update_qd(sdp, qd);
		if (error)
1018
			goto fail_gunlock;
David Teigland's avatar
David Teigland committed
1019

1020
		gfs2_glock_dq_uninit(&i_gh);
1021 1022 1023
		gfs2_glock_dq_uninit(q_gh);
		force_refresh = 0;
		goto restart;
David Teigland's avatar
David Teigland committed
1024 1025 1026 1027
	}

	return 0;

1028
fail_gunlock:
David Teigland's avatar
David Teigland committed
1029
	gfs2_glock_dq_uninit(&i_gh);
1030
fail:
David Teigland's avatar
David Teigland committed
1031 1032 1033 1034
	gfs2_glock_dq_uninit(q_gh);
	return error;
}

1035
int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
David Teigland's avatar
David Teigland committed
1036
{
1037
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1038
	struct gfs2_quota_data *qd;
1039
	u32 x;
David Teigland's avatar
David Teigland committed
1040 1041 1042 1043 1044 1045
	int error = 0;

	if (capable(CAP_SYS_RESOURCE) ||
	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
		return 0;

1046 1047 1048 1049 1050
	error = gfs2_quota_hold(ip, uid, gid);
	if (error)
		return error;

	sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1051
	     sizeof(struct gfs2_quota_data *), sort_qd, NULL);
David Teigland's avatar
David Teigland committed
1052

1053 1054 1055
	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
		qd = ip->i_qadata->qa_qd[x];
		error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
David Teigland's avatar
David Teigland committed
1056 1057 1058 1059 1060 1061 1062 1063
		if (error)
			break;
	}

	if (!error)
		set_bit(GIF_QD_LOCKED, &ip->i_flags);
	else {
		while (x--)
1064
			gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
David Teigland's avatar
David Teigland committed
1065 1066 1067 1068 1069 1070 1071 1072
		gfs2_quota_unhold(ip);
	}

	return error;
}

static int need_sync(struct gfs2_quota_data *qd)
{
1073
	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
David Teigland's avatar
David Teigland committed
1074
	struct gfs2_tune *gt = &sdp->sd_tune;
1075
	s64 value;
David Teigland's avatar
David Teigland committed
1076 1077 1078 1079 1080 1081
	unsigned int num, den;
	int do_sync = 1;

	if (!qd->qd_qb.qb_limit)
		return 0;

1082
	spin_lock(&qd_lock);
David Teigland's avatar
David Teigland committed
1083
	value = qd->qd_change;
1084
	spin_unlock(&qd_lock);
David Teigland's avatar
David Teigland committed
1085 1086 1087 1088 1089 1090 1091 1092

	spin_lock(&gt->gt_spin);
	num = gt->gt_quota_scale_num;
	den = gt->gt_quota_scale_den;
	spin_unlock(&gt->gt_spin);

	if (value < 0)
		do_sync = 0;
1093 1094
	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teigland's avatar
David Teigland committed
1095 1096 1097
		do_sync = 0;
	else {
		value *= gfs2_jindex_size(sdp) * num;
1098
		value = div_s64(value, den);
1099
		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1100
		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teigland's avatar
David Teigland committed
1101 1102 1103 1104 1105 1106 1107 1108
			do_sync = 0;
	}

	return do_sync;
}

void gfs2_quota_unlock(struct gfs2_inode *ip)
{
1109
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teigland's avatar
David Teigland committed
1110 1111
	struct gfs2_quota_data *qda[4];
	unsigned int count = 0;
1112
	u32 x;
1113
	int found;
David Teigland's avatar
David Teigland committed
1114 1115 1116 1117

	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
		goto out;

1118
	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
David Teigland's avatar
David Teigland committed
1119 1120 1121
		struct gfs2_quota_data *qd;
		int sync;

1122
		qd = ip->i_qadata->qa_qd[x];
David Teigland's avatar
David Teigland committed
1123 1124
		sync = need_sync(qd);

1125
		gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1126 1127 1128
		if (!sync)
			continue;

1129
		spin_lock(&qd_lock);
1130
		found = qd_check_sync(sdp, qd, NULL);
1131
		spin_unlock(&qd_lock);
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142

		if (!found)
			continue;

		gfs2_assert_warn(sdp, qd->qd_change_sync);
		if (bh_get(qd)) {
			clear_bit(QDF_LOCKED, &qd->qd_flags);
			slot_put(qd);
			qd_put(qd);
			continue;
		}
David Teigland's avatar
David Teigland committed
1143

1144
		qda[count++] = qd;
David Teigland's avatar
David Teigland committed
1145 1146 1147 1148 1149 1150 1151 1152
	}

	if (count) {
		do_sync(count, qda);
		for (x = 0; x < count; x++)
			qd_unlock(qda[x]);
	}

1153
out:
David Teigland's avatar
David Teigland committed
1154 1155 1156 1157 1158 1159 1160
	gfs2_quota_unhold(ip);
}

#define MAX_LINE 256

static int print_message(struct gfs2_quota_data *qd, char *type)
{
1161
	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
David Teigland's avatar
David Teigland committed
1162

1163 1164
	fs_info(sdp, "quota %s for %s %u\n",
		type,
1165 1166
		(qd->qd_id.type == USRQUOTA) ? "user" : "group",
		from_kqid(&init_user_ns, qd->qd_id));
David Teigland's avatar
David Teigland committed
1167 1168 1169 1170

	return 0;
}

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
/**
 * gfs2_quota_check - check if allocating new blocks will exceed quota
 * @ip:  The inode for which this check is being performed
 * @uid: The uid to check against
 * @gid: The gid to check against
 * @ap:  The allocation parameters. ap->target contains the requested
 *       blocks. ap->min_target, if set, contains the minimum blks
 *       requested.
 *
 * Returns: 0 on success.
 *                  min_req = ap->min_target ? ap->min_target : ap->target;
 *                  quota must allow atleast min_req blks for success and
 *                  ap->allowed is set to the number of blocks allowed
 *
 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
 *                  of blocks available.
 */
1188 1189
int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
		     struct gfs2_alloc_parms *ap)
David Teigland's avatar
David Teigland committed
1190
{
1191
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teigland's avatar
David Teigland committed
1192
	struct gfs2_quota_data *qd;
1193
	s64 value, warn, limit;
1194
	u32 x;
David Teigland's avatar
David Teigland committed
1195 1196
	int error = 0;

1197
	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
David Teigland's avatar
David Teigland committed
1198 1199 1200 1201 1202 1203
	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
		return 0;

        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
                return 0;

1204 1205
	for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
		qd = ip->i_qadata->qa_qd[x];
David Teigland's avatar
David Teigland committed
1206

1207 1208
		if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
David Teigland's avatar
David Teigland committed
1209 1210
			continue;

1211 1212
		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1213
		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1214
		spin_lock(&qd_lock);
1215
		value += qd->qd_change;
1216
		spin_unlock(&qd_lock);
David Teigland's avatar
David Teigland committed
1217

1218 1219 1220 1221 1222 1223 1224
		if (limit > 0 && (limit - value) < ap->allowed)
			ap->allowed = limit - value;
		/* If we can't meet the target */
		if (limit && limit < (value + (s64)ap->target)) {
			/* If no min_target specified or we don't meet
			 * min_target, return -EDQUOT */
			if (!ap->min_target || ap->min_target > ap->allowed) {
1225 1226 1227 1228 1229 1230 1231
				if (!test_and_set_bit(QDF_QMSG_QUIET,
						      &qd->qd_flags)) {
					print_message(qd, "exceeded");
					quota_send_warning(qd->qd_id,
							   sdp->sd_vfs->s_dev,
							   QUOTA_NL_BHARDWARN);
				}
1232 1233 1234 1235
				error = -EDQUOT;
				break;
			}
		} else if (warn && warn < value &&
David Teigland's avatar
David Teigland committed
1236
			   time_after_eq(jiffies, qd->qd_last_warn +
1237 1238
					 gfs2_tune_get(sdp, gt_quota_warn_period)
					 * HZ)) {
1239
			quota_send_warning(qd->qd_id,
1240
					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
David Teigland's avatar
David Teigland committed
1241 1242 1243 1244 1245 1246 1247
			error = print_message(qd, "warning");
			qd->qd_last_warn = jiffies;
		}
	}
	return error;
}

1248
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1249
		       kuid_t uid, kgid_t gid)
David Teigland's avatar
David Teigland committed
1250 1251
{
	struct gfs2_quota_data *qd;
1252
	u32 x;
1253
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teigland's avatar
David Teigland committed <