slab.c 116 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * ([email protected])
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
 * slabs and you must pass objects with the same intializations to
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54 55 56 57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59 60 61 62 63 64 65 66 67 68 69 70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
Ingo Molnar's avatar
Ingo Molnar committed
71
 *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72 73 74 75 76 77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78 79 80 81 82 83 84 85 86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <[email protected]>.
 *	Shobhit Dayal <[email protected]>
 *	Alok N Kataria <[email protected]>
 *	Christoph Lameter <[email protected]m>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87 88 89 90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92 93 94 95 96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
Linus Torvalds's avatar
Linus Torvalds committed
98 99 100 101 102 103 104
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
105
#include	<linux/string.h>
106
#include	<linux/uaccess.h>
107
#include	<linux/nodemask.h>
108
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
109
#include	<linux/mutex.h>
110
#include	<linux/fault-inject.h>
111
#include	<linux/rtmutex.h>
112
#include	<linux/reciprocal_div.h>
Linus Torvalds's avatar
Linus Torvalds committed
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

/*
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
 *		  SLAB_RED_ZONE & SLAB_POISON.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)

#ifndef cache_line_size
#define cache_line_size()	L1_CACHE_BYTES
#endif

#ifndef ARCH_KMALLOC_MINALIGN
/*
 * Enforce a minimum alignment for the kmalloc caches.
 * Usually, the kmalloc caches are cache_line_size() aligned, except when
 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
 * Note that this flag disables some debug features.
 */
#define ARCH_KMALLOC_MINALIGN 0
#endif

#ifndef ARCH_SLAB_MINALIGN
/*
 * Enforce a minimum alignment for all caches.
 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
 * some debug features.
 */
#define ARCH_SLAB_MINALIGN 0
#endif

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
177
			 SLAB_CACHE_DMA | \
Linus Torvalds's avatar
Linus Torvalds committed
178 179
			 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds's avatar
Linus Torvalds committed
181
#else
182
# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
Linus Torvalds's avatar
Linus Torvalds committed
183 184
			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
185
			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
Linus Torvalds's avatar
Linus Torvalds committed
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
#endif

/*
 * kmem_bufctl_t:
 *
 * Bufctl's are used for linking objs within a slab
 * linked offsets.
 *
 * This implementation relies on "struct page" for locating the cache &
 * slab an object belongs to.
 * This allows the bufctl structure to be small (one int), but limits
 * the number of objects a slab (not a cache) can contain when off-slab
 * bufctls are used. The limit is the size of the largest general cache
 * that does not use off-slab slabs.
 * For 32bit archs with 4 kB pages, is this 56.
 * This is not serious, as it is only for large objects, when it is unwise
 * to have too many per slab.
 * Note: This limit can be raised by introducing a general cache whose size
 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 */

207
typedef unsigned int kmem_bufctl_t;
Linus Torvalds's avatar
Linus Torvalds committed
208 209
#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
210 211
#define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
Linus Torvalds's avatar
Linus Torvalds committed
212 213 214 215 216 217 218 219 220

/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
221 222 223 224 225 226
	struct list_head list;
	unsigned long colouroff;
	void *s_mem;		/* including colour offset */
	unsigned int inuse;	/* num of objs active in slab */
	kmem_bufctl_t free;
	unsigned short nodeid;
Linus Torvalds's avatar
Linus Torvalds committed
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
};

/*
 * struct slab_rcu
 *
 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 * arrange for kmem_freepages to be called via RCU.  This is useful if
 * we need to approach a kernel structure obliquely, from its address
 * obtained without the usual locking.  We can lock the structure to
 * stabilize it and check it's still at the given address, only if we
 * can be sure that the memory has not been meanwhile reused for some
 * other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
 *
 * We assume struct slab_rcu can overlay struct slab when destroying.
 */
struct slab_rcu {
246
	struct rcu_head head;
247
	struct kmem_cache *cachep;
248
	void *addr;
Linus Torvalds's avatar
Linus Torvalds committed
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
};

/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
268
	spinlock_t lock;
Andrew Morton's avatar
Andrew Morton committed
269 270 271 272 273 274
	void *entry[0];	/*
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
			 * [0] is for gcc 2.95. It should really be [].
			 */
Linus Torvalds's avatar
Linus Torvalds committed
275 276
};

Andrew Morton's avatar
Andrew Morton committed
277 278 279
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
280 281 282 283
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
284
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
285 286 287
};

/*
288
 * The slab lists for all objects.
Linus Torvalds's avatar
Linus Torvalds committed
289 290
 */
struct kmem_list3 {
291 292 293 294 295
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned int free_limit;
296
	unsigned int colour_next;	/* Per-node cache coloring */
297 298 299
	spinlock_t list_lock;
	struct array_cache *shared;	/* shared per node */
	struct array_cache **alien;	/* on other nodes */
300 301
	unsigned long next_reap;	/* updated without locking */
	int free_touched;		/* updated without locking */
Linus Torvalds's avatar
Linus Torvalds committed
302 303
};

304 305 306 307 308 309 310 311 312
/*
 * Need this for bootstrapping a per node allocator.
 */
#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define	CACHE_CACHE 0
#define	SIZE_AC 1
#define	SIZE_L3 (1 + MAX_NUMNODES)

313 314 315 316
static int drain_freelist(struct kmem_cache *cache,
			struct kmem_list3 *l3, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node);
317
static int enable_cpucache(struct kmem_cache *cachep);
318
static void cache_reap(struct work_struct *unused);
319

320
/*
Andrew Morton's avatar
Andrew Morton committed
321 322
 * This function must be completely optimized away if a constant is passed to
 * it.  Mostly the same as what is in linux/slab.h except it returns an index.
323
 */
324
static __always_inline int index_of(const size_t size)
325
{
326 327
	extern void __bad_size(void);

328 329 330 331 332 333 334 335 336 337
	if (__builtin_constant_p(size)) {
		int i = 0;

#define CACHE(x) \
	if (size <=x) \
		return i; \
	else \
		i++;
#include "linux/kmalloc_sizes.h"
#undef CACHE
338
		__bad_size();
339
	} else
340
		__bad_size();
341 342 343
	return 0;
}

344 345
static int slab_early_init = 1;

346 347
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
Linus Torvalds's avatar
Linus Torvalds committed
348

Pekka Enberg's avatar
Pekka Enberg committed
349
static void kmem_list3_init(struct kmem_list3 *parent)
350 351 352 353 354 355
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
356
	parent->colour_next = 0;
357 358 359 360 361
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
362 363 364 365
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
366 367
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
368 369
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
370 371 372 373
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
374 375

/*
376
 * struct kmem_cache
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379
 *
 * manages a cache.
 */
380

381
struct kmem_cache {
Linus Torvalds's avatar
Linus Torvalds committed
382
/* 1) per-cpu data, touched during every alloc/free */
383
	struct array_cache *array[NR_CPUS];
384
/* 2) Cache tunables. Protected by cache_chain_mutex */
385 386 387
	unsigned int batchcount;
	unsigned int limit;
	unsigned int shared;
388

389
	unsigned int buffer_size;
390
	u32 reciprocal_buffer_size;
391
/* 3) touched by every alloc & free from the backend */
392
	struct kmem_list3 *nodelists[MAX_NUMNODES];
393

Andrew Morton's avatar
Andrew Morton committed
394 395
	unsigned int flags;		/* constant flags */
	unsigned int num;		/* # of objs per slab */
Linus Torvalds's avatar
Linus Torvalds committed
396

397
/* 4) cache_grow/shrink */
Linus Torvalds's avatar
Linus Torvalds committed
398
	/* order of pgs per slab (2^n) */
399
	unsigned int gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
400 401

	/* force GFP flags, e.g. GFP_DMA */
402
	gfp_t gfpflags;
Linus Torvalds's avatar
Linus Torvalds committed
403

Andrew Morton's avatar
Andrew Morton committed
404
	size_t colour;			/* cache colouring range */
405
	unsigned int colour_off;	/* colour offset */
406
	struct kmem_cache *slabp_cache;
407
	unsigned int slab_size;
Andrew Morton's avatar
Andrew Morton committed
408
	unsigned int dflags;		/* dynamic flags */
Linus Torvalds's avatar
Linus Torvalds committed
409 410

	/* constructor func */
411
	void (*ctor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
412 413

	/* de-constructor func */
414
	void (*dtor) (void *, struct kmem_cache *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
415

416
/* 5) cache creation/removal */
417 418
	const char *name;
	struct list_head next;
Linus Torvalds's avatar
Linus Torvalds committed
419

420
/* 6) statistics */
Linus Torvalds's avatar
Linus Torvalds committed
421
#if STATS
422 423 424 425 426 427 428 429 430
	unsigned long num_active;
	unsigned long num_allocations;
	unsigned long high_mark;
	unsigned long grown;
	unsigned long reaped;
	unsigned long errors;
	unsigned long max_freeable;
	unsigned long node_allocs;
	unsigned long node_frees;
431
	unsigned long node_overflow;
432 433 434 435
	atomic_t allochit;
	atomic_t allocmiss;
	atomic_t freehit;
	atomic_t freemiss;
Linus Torvalds's avatar
Linus Torvalds committed
436 437
#endif
#if DEBUG
438 439 440 441 442 443 444 445
	/*
	 * If debugging is enabled, then the allocator can add additional
	 * fields and/or padding to every object. buffer_size contains the total
	 * object size including these internal fields, the following two
	 * variables contain the offset to the user object and its size.
	 */
	int obj_offset;
	int obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
446 447 448 449 450 451 452
#endif
};

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
453 454 455
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
456
 *
Adrian Bunk's avatar
Adrian Bunk committed
457
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
458 459 460 461 462 463 464 465 466 467
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
468
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
469 470 471 472 473
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
474 475
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
476
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
477
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
478 479 480 481 482
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
483 484 485 486 487 488 489 490 491
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
492
#define	STATS_ADD_REAPED(x,y)	do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
493 494 495
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
496
#define	STATS_INC_NODEFREES(x)	do { } while (0)
497
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
498
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
499 500 501 502 503 504 505 506
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
507 508
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
509
 * 0		: objp
510
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
511 512
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
513
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
514
 * 		redzone word.
515 516
 * cachep->obj_offset: The real object.
 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
Andrew Morton's avatar
Andrew Morton committed
517 518
 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
519
 */
520
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
521
{
522
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
523 524
}

525
static int obj_size(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
526
{
527
	return cachep->obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
528 529
}

530
static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
531 532
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
533
	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
534 535
}

536
static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
537 538 539
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
540
		return (unsigned long *)(objp + cachep->buffer_size -
541
					 2 * BYTES_PER_WORD);
542
	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
543 544
}

545
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
546 547
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
548
	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
549 550 551 552
}

#else

553 554
#define obj_offset(x)			0
#define obj_size(cachep)		(cachep->buffer_size)
Linus Torvalds's avatar
Linus Torvalds committed
555 556 557 558 559 560 561
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
Andrew Morton's avatar
Andrew Morton committed
562 563
 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
 * order.
Linus Torvalds's avatar
Linus Torvalds committed
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
 */
#if defined(CONFIG_LARGE_ALLOCS)
#define	MAX_OBJ_ORDER	13	/* up to 32Mb */
#define	MAX_GFP_ORDER	13	/* up to 32Mb */
#elif defined(CONFIG_MMU)
#define	MAX_OBJ_ORDER	5	/* 32 pages */
#define	MAX_GFP_ORDER	5	/* 32 pages */
#else
#define	MAX_OBJ_ORDER	8	/* up to 1Mb */
#define	MAX_GFP_ORDER	8	/* up to 1Mb */
#endif

/*
 * Do not go above this order unless 0 objects fit into the slab.
 */
#define	BREAK_GFP_ORDER_HI	1
#define	BREAK_GFP_ORDER_LO	0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;

Andrew Morton's avatar
Andrew Morton committed
583 584 585 586
/*
 * Functions for storing/retrieving the cachep and or slab from the page
 * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
 * these are used to find the cache which an obj belongs to.
Linus Torvalds's avatar
Linus Torvalds committed
587
 */
588 589 590 591 592 593 594
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
	page->lru.next = (struct list_head *)cache;
}

static inline struct kmem_cache *page_get_cache(struct page *page)
{
595 596
	if (unlikely(PageCompound(page)))
		page = (struct page *)page_private(page);
597
	BUG_ON(!PageSlab(page));
598 599 600 601 602 603 604 605 606 607
	return (struct kmem_cache *)page->lru.next;
}

static inline void page_set_slab(struct page *page, struct slab *slab)
{
	page->lru.prev = (struct list_head *)slab;
}

static inline struct slab *page_get_slab(struct page *page)
{
608 609
	if (unlikely(PageCompound(page)))
		page = (struct page *)page_private(page);
610
	BUG_ON(!PageSlab(page));
611 612
	return (struct slab *)page->lru.prev;
}
Linus Torvalds's avatar
Linus Torvalds committed
613

614 615 616 617 618 619 620 621 622 623 624 625
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_cache(page);
}

static inline struct slab *virt_to_slab(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_slab(page);
}

626 627 628 629 630 631
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
				 unsigned int idx)
{
	return slab->s_mem + cache->buffer_size * idx;
}

632 633 634 635 636 637 638 639
/*
 * We want to avoid an expensive divide : (offset / cache->buffer_size)
 *   Using the fact that buffer_size is a constant for a particular cache,
 *   we can replace (offset / cache->buffer_size) by
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct slab *slab, void *obj)
640
{
641 642
	u32 offset = (obj - slab->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
643 644
}

Andrew Morton's avatar
Andrew Morton committed
645 646 647
/*
 * These are the default caches for kmalloc. Custom caches can have other sizes.
 */
Linus Torvalds's avatar
Linus Torvalds committed
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
	CACHE(ULONG_MAX)
#undef CACHE
};
EXPORT_SYMBOL(malloc_sizes);

/* Must match cache_sizes above. Out of line to keep cache footprint low. */
struct cache_names {
	char *name;
	char *name_dma;
};

static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
665
	{NULL,}
Linus Torvalds's avatar
Linus Torvalds committed
666 667 668 669
#undef CACHE
};

static struct arraycache_init initarray_cache __initdata =
670
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
671
static struct arraycache_init initarray_generic =
672
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
673 674

/* internal cache of cache description objs */
675
static struct kmem_cache cache_cache = {
676 677 678
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
679
	.buffer_size = sizeof(struct kmem_cache),
680
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
681
#if DEBUG
682
	.obj_size = sizeof(struct kmem_cache),
Linus Torvalds's avatar
Linus Torvalds committed
683 684 685
#endif
};

686 687
#define BAD_ALIEN_MAGIC 0x01020304ul

688 689 690 691 692 693 694 695
#ifdef CONFIG_LOCKDEP

/*
 * Slab sometimes uses the kmalloc slabs to store the slab headers
 * for other slabs "off slab".
 * The locking for this is tricky in that it nests within the locks
 * of all other slabs in a few places; to deal with this special
 * locking we put on-slab caches into a separate lock-class.
696 697 698 699
 *
 * We set lock class for alien array caches which are up during init.
 * The lock annotation will be lost if all cpus of a node goes down and
 * then comes back up during hotplug
700
 */
701 702 703 704
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

static inline void init_lock_keys(void)
705 706 707

{
	int q;
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	struct cache_sizes *s = malloc_sizes;

	while (s->cs_size != ULONG_MAX) {
		for_each_node(q) {
			struct array_cache **alc;
			int r;
			struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
			if (!l3 || OFF_SLAB(s->cs_cachep))
				continue;
			lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
			alc = l3->alien;
			/*
			 * FIXME: This check for BAD_ALIEN_MAGIC
			 * should go away when common slab code is taught to
			 * work even without alien caches.
			 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
			 * for alloc_alien_cache,
			 */
			if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
				continue;
			for_each_node(r) {
				if (alc[r])
					lockdep_set_class(&alc[r]->lock,
					     &on_slab_alc_key);
			}
		}
		s++;
735 736 737
	}
}
#else
738
static inline void init_lock_keys(void)
739 740 741 742
{
}
#endif

743 744 745 746
/*
 * 1. Guard access to the cache-chain.
 * 2. Protect sanity of cpu_online_map against cpu hotplug events
 */
Ingo Molnar's avatar
Ingo Molnar committed
747
static DEFINE_MUTEX(cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
748 749 750 751 752 753 754 755
static struct list_head cache_chain;

/*
 * chicken and egg problem: delay the per-cpu array allocation
 * until the general caches are up.
 */
static enum {
	NONE,
756 757
	PARTIAL_AC,
	PARTIAL_L3,
Linus Torvalds's avatar
Linus Torvalds committed
758 759 760
	FULL
} g_cpucache_up;

761 762 763 764 765 766 767 768
/*
 * used by boot code to determine if it can use slab based allocator
 */
int slab_is_available(void)
{
	return g_cpucache_up == FULL;
}

769
static DEFINE_PER_CPU(struct delayed_work, reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
770

771
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
772 773 774 775
{
	return cachep->array[smp_processor_id()];
}

Andrew Morton's avatar
Andrew Morton committed
776 777
static inline struct kmem_cache *__find_general_cachep(size_t size,
							gfp_t gfpflags)
Linus Torvalds's avatar
Linus Torvalds committed
778 779 780 781 782
{
	struct cache_sizes *csizep = malloc_sizes;

#if DEBUG
	/* This happens if someone tries to call
783 784 785
	 * kmem_cache_create(), or __kmalloc(), before
	 * the generic caches are initialized.
	 */
786
	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds's avatar
Linus Torvalds committed
787 788 789 790 791
#endif
	while (size > csizep->cs_size)
		csizep++;

	/*
792
	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds's avatar
Linus Torvalds committed
793 794 795
	 * has cs_{dma,}cachep==NULL. Thus no special case
	 * for large kmalloc calls required.
	 */
796
#ifdef CONFIG_ZONE_DMA
Linus Torvalds's avatar
Linus Torvalds committed
797 798
	if (unlikely(gfpflags & GFP_DMA))
		return csizep->cs_dmacachep;
799
#endif
Linus Torvalds's avatar
Linus Torvalds committed
800 801 802
	return csizep->cs_cachep;
}

803
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
804 805 806 807
{
	return __find_general_cachep(size, gfpflags);
}

808
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
809
{
810 811
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
Linus Torvalds's avatar
Linus Torvalds committed
812

Andrew Morton's avatar
Andrew Morton committed
813 814 815
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
816 817 818 819 820 821 822
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
823

824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
	 * - One kmem_bufctl_t for each object
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;
	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
			  (buffer_size + sizeof(kmem_bufctl_t));

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
872 873 874 875
}

#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)

Andrew Morton's avatar
Andrew Morton committed
876 877
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
878 879
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
880
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
881 882 883
	dump_stack();
}

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
static DEFINE_PER_CPU(unsigned long, reap_node);

static void init_reap_node(int cpu)
{
	int node;

	node = next_node(cpu_to_node(cpu), node_online_map);
	if (node == MAX_NUMNODES)
915
		node = first_node(node_online_map);
916

917
	per_cpu(reap_node, cpu) = node;
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
}

static void next_reap_node(void)
{
	int node = __get_cpu_var(reap_node);

	/*
	 * Also drain per cpu pages on remote zones
	 */
	if (node != numa_node_id())
		drain_node_pages(node);

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
	__get_cpu_var(reap_node) = node;
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
941 942 943 944 945 946 947 948 949
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
static void __devinit start_cpu_timer(int cpu)
{
950
	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
951 952 953 954 955 956

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
957
	if (keventd_up() && reap_work->work.func == NULL) {
958
		init_reap_node(cpu);
959
		INIT_DELAYED_WORK(reap_work, cache_reap);
960 961
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
962 963 964
	}
}

965
static struct array_cache *alloc_arraycache(int node, int entries,
966
					    int batchcount)
Linus Torvalds's avatar
Linus Torvalds committed
967
{
968
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
969 970
	struct array_cache *nc = NULL;

971
	nc = kmalloc_node(memsize, GFP_KERNEL, node);
Linus Torvalds's avatar
Linus Torvalds committed
972 973 974 975 976
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
977
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
978 979 980 981
	}
	return nc;
}

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
	int nr = min(min(from->avail, max), to->limit - to->avail);

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	to->touched = 1;
	return nr;
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)

static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
	return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

1031
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1032 1033 1034 1035 1036 1037 1038
		 gfp_t flags, int nodeid)
{
	return NULL;
}

#else	/* CONFIG_NUMA */

1039
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1040
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1041

Pekka Enberg's avatar
Pekka Enberg committed
1042
static struct array_cache **alloc_alien_cache(int node, int limit)
1043 1044
{
	struct array_cache **ac_ptr;
1045
	int memsize = sizeof(void *) * MAX_NUMNODES;
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	int i;

	if (limit > 1)
		limit = 12;
	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
	if (ac_ptr) {
		for_each_node(i) {
			if (i == node || !node_online(i)) {
				ac_ptr[i] = NULL;
				continue;
			}
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
			if (!ac_ptr[i]) {
1059
				for (i--; i <= 0; i--)
1060 1061 1062 1063 1064 1065 1066 1067 1068
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

Pekka Enberg's avatar
Pekka Enberg committed
1069
static void free_alien_cache(struct array_cache **ac_ptr)
1070 1071 1072 1073 1074 1075
{
	int i;

	if (!ac_ptr)
		return;
	for_each_node(i)
1076
	    kfree(ac_ptr[i]);
1077 1078 1079
	kfree(ac_ptr);
}

1080
static void __drain_alien_cache(struct kmem_cache *cachep,
Pekka Enberg's avatar
Pekka Enberg committed
1081
				struct array_cache *ac, int node)
1082 1083 1084 1085 1086
{
	struct kmem_list3 *rl3 = cachep->nodelists[node];

	if (ac->avail) {
		spin_lock(&rl3->list_lock);
1087 1088 1089 1090 1091
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
1092 1093
		if (rl3->shared)
			transfer_objects(rl3->shared, ac, ac->limit);
1094

1095
		free_block(cachep, ac->entry, ac->avail, node);
1096 1097 1098 1099 1100
		ac->avail = 0;
		spin_unlock(&rl3->list_lock);
	}
}

1101 1102 1103 1104 1105 1106 1107 1108 1109
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{
	int node = __get_cpu_var(reap_node);

	if (l3->alien) {
		struct array_cache *ac = l3->alien[node];
1110 1111

		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1112 1113 1114 1115 1116 1117
			__drain_alien_cache(cachep, ac, node);
			spin_unlock_irq(&ac->lock);
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
1118 1119
static void drain_alien_cache(struct kmem_cache *cachep,
				struct array_cache **alien)
1120
{
1121
	int i = 0;
1122 1123 1124 1125
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
1126
		ac = alien[i];
1127 1128 1129 1130 1131 1132 1133
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
1134

1135
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1136 1137 1138 1139 1140
{
	struct slab *slabp = virt_to_slab(objp);
	int nodeid = slabp->nodeid;
	struct kmem_list3 *l3;
	struct array_cache *alien = NULL;
1141 1142 1143
	int node;

	node = numa_node_id();
1144 1145 1146 1147 1148

	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
1149
	if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches))
1150 1151
		return 0;

1152
	l3 = cachep->nodelists[node];
1153 1154 1155
	STATS_INC_NODEFREES(cachep);
	if (l3->alien && l3->alien[nodeid]) {
		alien = l3->alien[nodeid];
1156
		spin_lock(&alien->lock);
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
		if (unlikely(alien->avail == alien->limit)) {
			STATS_INC_ACOVERFLOW(cachep);
			__drain_alien_cache(cachep, alien, nodeid);
		}
		alien->entry[alien->avail++] = objp;
		spin_unlock(&alien->lock);
	} else {
		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
		free_block(cachep, &objp, 1, nodeid);
		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
	}
	return 1;
}
1170 1171
#endif

1172
static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1173
				    unsigned long action, void *hcpu)
Linus Torvalds's avatar
Linus Torvalds committed
1174 1175
{
	long cpu = (long)hcpu;
1176
	struct kmem_cache *cachep;
1177 1178 1179
	struct kmem_list3 *l3 = NULL;
	int node = cpu_to_node(cpu);
	int memsize = sizeof(struct kmem_list3);
Linus Torvalds's avatar
Linus Torvalds committed
1180 1181 1182

	switch (action) {
	case CPU_UP_PREPARE:
Ingo Molnar's avatar
Ingo Molnar committed
1183
		mutex_lock(&cache_chain_mutex);
Andrew Morton's avatar
Andrew Morton committed
1184 1185
		/*
		 * We need to do this right in the beginning since
1186 1187 1188 1189 1190
		 * alloc_arraycache's are going to use this list.
		 * kmalloc_node allows us to add the slab to the right
		 * kmem_list3 and not this cpu's kmem_list3
		 */

Linus Torvalds's avatar
Linus Torvalds committed
1191
		list_for_each_entry(cachep, &cache_chain, next) {
Andrew Morton's avatar
Andrew Morton committed
1192 1193
			/*
			 * Set up the size64 kmemlist for cpu before we can
1194 1195 1196 1197
			 * begin anything. Make sure some other cpu on this
			 * node has not already allocated this
			 */
			if (!cachep->nodelists[node]) {
Andrew Morton's avatar
Andrew Morton committed
1198 1199
				l3 = kmalloc_node(memsize, GFP_KERNEL, node);
				if (!l3)
1200 1201 1202
					goto bad;
				kmem_list3_init(l3);
				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1203
				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1204

1205 1206 1207 1208 1209
				/*
				 * The l3s don't come and go as CPUs come and
				 * go.  cache_chain_mutex is sufficient
				 * protection here.
				 */
1210 1211
				cachep->nodelists[node] = l3;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1212

1213 1214
			spin_lock_irq(&cachep->nodelists[node]->list_lock);
			cachep->nodelists[node]->free_limit =
Andrew Morton's avatar
Andrew Morton committed
1215 1216
				(1 + nr_cpus_node(node)) *
				cachep->batchcount + cachep->num;
1217 1218 1219
			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
		}

Andrew Morton's avatar
Andrew Morton committed
1220 1221 1222 1223
		/*
		 * Now we can go ahead with allocating the shared arrays and
		 * array caches
		 */
1224
		list_for_each_entry(cachep, &cache_chain, next) {
1225
			struct array_cache *nc;
1226
			struct array_cache *shared;
1227
			struct array_cache **alien = NULL;
1228

1229
			nc = alloc_arraycache(node, cachep->limit,
1230
						cachep->batchcount);
Linus Torvalds's avatar
Linus Torvalds committed
1231 1232
			if (!nc)
				goto bad;
1233 1234 1235 1236 1237
			shared = alloc_arraycache(node,
					cachep->shared * cachep->batchcount,
					0xbaadf00d);
			if (!shared)
				goto bad;
1238

1239 1240 1241 1242 1243
			if (use_alien_caches) {
                                alien = alloc_alien_cache(node, cachep->limit);
                                if (!alien)
                                        goto bad;
                        }
Linus Torvalds's avatar
Linus Torvalds committed
1244
			cachep->array[cpu] = nc;
1245 1246 1247
			l3 = cachep->nodelists[node];
			BUG_ON(!l3);

1248 1249 1250 1251 1252 1253 1254 1255
			spin_lock_irq(&l3->list_lock);
			if (!l3->shared) {
				/*
				 * We are serialised from CPU_DEAD or
				 * CPU_UP_CANCELLED by the cpucontrol lock
				 */
				l3->shared = shared;
				shared = NULL;
1256
			}
1257 1258 1259 1260 1261 1262 1263 1264 1265
#ifdef CONFIG_NUMA
			if (!l3->alien) {
				l3->alien = alien;
				alien = NULL;
			}
#endif
			spin_unlock_irq(&l3->list_lock);
			kfree(shared);
			free_alien_cache(alien);
Linus Torvalds's avatar
Linus Torvalds committed
1266 1267 1268
		}
		break;
	case CPU_ONLINE:
1269
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1270 1271 1272
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
1273 1274 1275 1276 1277 1278
	case CPU_DOWN_PREPARE:
		mutex_lock(&cache_chain_mutex);
		break;
	case CPU_DOWN_FAILED:
		mutex_unlock(&cache_chain_mutex);
		break;
Linus Torvalds's avatar
Linus Torvalds committed
1279
	case CPU_DEAD:
1280 1281 1282 1283 1284 1285 1286 1287
		/*
		 * Even if all the cpus of a node are down, we don't free the
		 * kmem_list3 of any cache. This to avoid a race between
		 * cpu_down, and a kmalloc allocation from another cpu for
		 * memory from the node of the cpu going down.  The list3
		 * structure is usually allocated from kmem_cache_create() and
		 * gets destroyed at kmem_cache_destroy().
		 */
Linus Torvalds's avatar
Linus Torvalds committed
1288
		/* fall thru */
1289
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1290 1291 1292
	case CPU_UP_CANCELED:
		list_for_each_entry(cachep, &cache_chain, next) {
			struct array_cache *nc;
1293 1294
			struct array_cache *shared;
			struct array_cache **alien;
1295
			cpumask_t mask;
Linus Torvalds's avatar
Linus Torvalds committed
1296

1297
			mask = node_to_cpumask(node);
Linus Torvalds's avatar
Linus Torvalds committed
1298 1299 1300
			/* cpu is dead; no one can alloc from it. */
			nc = cachep->array[cpu];
			cachep->array[cpu] = NULL;
1301 1302 1303
			l3 = cachep->nodelists[node];

			if (!l3)
1304
				goto free_array_cache;
1305

1306
			spin_lock_irq(&l3->list_lock);
1307 1308 1309 1310

			/* Free limit for this kmem_list3 */
			l3->free_limit -= cachep->batchcount;
			if (nc)
1311
				free_block(cachep, nc->entry, nc->avail, node);
1312 1313

			if (!cpus_empty(mask)) {
1314
				spin_unlock_irq(&l3->list_lock);
1315
				goto free_array_cache;
1316
			}
1317

1318 1319
			shared = l3->shared;
			if (shared) {
1320
				free_block(cachep, l3->shared->entry,
1321
					   l3->shared->avail, node);
1322 1323 1324
				l3->shared = NULL;
			}

1325 1326 1327 1328 1329 1330 1331 1332 1333
			alien = l3->alien;
			l3->alien = NULL;

			spin_unlock_irq(&l3->list_lock);

			kfree(shared);
			if (alien) {
				drain_alien_cache(cachep, alien);
				free_alien_cache(alien);
1334
			}
1335
free_array_cache:
Linus Torvalds's avatar
Linus Torvalds committed
1336 1337
			kfree(nc);
		}
1338 1339 1340 1341 1342 1343 1344 1345 1346
		/*
		 * In the previous loop, all the objects were freed to
		 * the respective cache's slabs,  now we can go ahead and
		 * shrink each nodelist to its limit.
		 */
		list_for_each_entry(cachep, &cache_chain, next) {
			l3 = cachep->nodelists[node];
			if (!l3)
				continue;
1347
			drain_freelist(cachep, l3, l3->free_objects);
1348
		}
Ingo Molnar's avatar
Ingo Molnar committed
1349
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1350 1351 1352
		break;
	}
	return NOTIFY_OK;
Andrew Morton's avatar
Andrew Morton committed
1353
bad:
Linus Torvalds's avatar
Linus Torvalds committed
1354 1355 1356
	return NOTIFY_BAD;
}

1357 1358 1359
static struct notifier_block __cpuinitdata cpucache_notifier = {
	&cpuup_callback, NULL, 0
};
Linus Torvalds's avatar
Linus Torvalds committed
1360

1361 1362 1363
/*
 * swap the static kmem_list3 with kmalloced memory
 */
Andrew Morton's avatar
Andrew Morton committed
1364 1365
static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
			int nodeid)
1366 1367 1368 1369 1370 1371 1372 1373
{
	struct kmem_list3 *ptr;

	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
	BUG_ON(!ptr);

	local_irq_disable();
	memcpy(ptr, list, sizeof(struct kmem_list3));
1374 1375 1376 1377 1378
	/*
	 * Do not assume that spinlocks can be initialized via memcpy:
	 */
	spin_lock_init(&ptr->list_lock);

1379 1380 1381 1382 1383
	MAKE_ALL_LISTS(cachep, ptr, nodeid);
	cachep->nodelists[nodeid] = ptr;
	local_irq_enable();
}

Andrew Morton's avatar
Andrew Morton committed
1384 1385 1386
/*
 * Initialisation.  Called after the page allocator have been initialised and
 * before smp_init().
Linus Torvalds's avatar
Linus Torvalds committed
1387 1388 1389 1390 1391 1392
 */
void __init kmem_cache_init(void)
{
	size_t left_over;
	struct cache_sizes *sizes;
	struct cache_names *names;
1393
	int i;
1394
	int order;
1395
	int node;
1396 1397 1398 1399 1400 1401

	for (i = 0; i < NUM_INIT_LISTS; i++) {
		kmem_list3_init(&initkmem_list3[i]);
		if (i < MAX_NUMNODES)
			cache_cache.nodelists[i] = NULL;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411

	/*
	 * Fragmentation resistance on low memory - only use bigger
	 * page orders on machines with more than 32MB of memory.
	 */
	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
		slab_break_gfp_order = BREAK_GFP_ORDER_HI;

	/* Bootstrap is tricky, because several objects are allocated
	 * from caches that do not exist yet:
Andrew Morton's avatar
Andrew Morton committed
1412 1413 1414
	 * 1) initialize the cache_cache cache: it contains the struct
	 *    kmem_cache structures of all caches, except cache_cache itself:
	 *    cache_cache is statically allocated.
Christoph Lameter's avatar