mm_inline.h 3.44 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H

5
#include <linux/huge_mm.h>
6
#include <linux/swap.h>
7

8 9 10 11
/**
 * page_is_file_cache - should the page be on a file LRU or anon LRU?
 * @page: the page to test
 *
12
 * Returns 1 if @page is page cache page backed by a regular filesystem,
13 14 15 16 17 18 19 20 21 22
 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
 * Used by functions that manipulate the LRU lists, to sort a page
 * onto the right LRU list.
 *
 * We would like to get this info without a page flag, but the state
 * needs to survive until the page is last deleted from the LRU, which
 * could be as far down as __page_cache_release.
 */
static inline int page_is_file_cache(struct page *page)
{
23
	return !PageSwapBacked(page);
24 25
}

26
static __always_inline void __update_lru_size(struct lruvec *lruvec,
27 28
				enum lru_list lru, enum zone_type zid,
				int nr_pages)
29
{
30 31 32
	struct pglist_data *pgdat = lruvec_pgdat(lruvec);

	__mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
33 34
	__mod_zone_page_state(&pgdat->node_zones[zid],
				NR_ZONE_LRU_BASE + lru, nr_pages);
35 36 37
}

static __always_inline void update_lru_size(struct lruvec *lruvec,
38 39
				enum lru_list lru, enum zone_type zid,
				int nr_pages)
40
{
41
	__update_lru_size(lruvec, lru, zid, nr_pages);
42
#ifdef CONFIG_MEMCG
43
	mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
44 45 46
#endif
}

47 48
static __always_inline void add_page_to_lru_list(struct page *page,
				struct lruvec *lruvec, enum lru_list lru)
49
{
50
	update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
Hugh Dickins's avatar
Hugh Dickins committed
51
	list_add(&page->lru, &lruvec->lists[lru]);
52 53
}

54 55 56 57 58 59 60
static __always_inline void add_page_to_lru_list_tail(struct page *page,
				struct lruvec *lruvec, enum lru_list lru)
{
	update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
	list_add_tail(&page->lru, &lruvec->lists[lru]);
}

61 62
static __always_inline void del_page_from_lru_list(struct page *page,
				struct lruvec *lruvec, enum lru_list lru)
63 64
{
	list_del(&page->lru);
65
	update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
66 67
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/**
 * page_lru_base_type - which LRU list type should a page be on?
 * @page: the page to test
 *
 * Used for LRU list index arithmetic.
 *
 * Returns the base LRU type - file or anon - @page should be on.
 */
static inline enum lru_list page_lru_base_type(struct page *page)
{
	if (page_is_file_cache(page))
		return LRU_INACTIVE_FILE;
	return LRU_INACTIVE_ANON;
}

83 84 85 86 87 88 89
/**
 * page_off_lru - which LRU list was page on? clearing its lru flags.
 * @page: the page to test
 *
 * Returns the LRU list a page was on, as an index into the array of LRU
 * lists; and clears its Unevictable or Active flags, ready for freeing.
 */
90
static __always_inline enum lru_list page_off_lru(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
91
{
Hugh Dickins's avatar
Hugh Dickins committed
92
	enum lru_list lru;
93

94 95
	if (PageUnevictable(page)) {
		__ClearPageUnevictable(page);
Hugh Dickins's avatar
Hugh Dickins committed
96
		lru = LRU_UNEVICTABLE;
97
	} else {
Hugh Dickins's avatar
Hugh Dickins committed
98
		lru = page_lru_base_type(page);
99 100
		if (PageActive(page)) {
			__ClearPageActive(page);
Hugh Dickins's avatar
Hugh Dickins committed
101
			lru += LRU_ACTIVE;
102
		}
Linus Torvalds's avatar
Linus Torvalds committed
103
	}
104
	return lru;
Linus Torvalds's avatar
Linus Torvalds committed
105
}
106

107 108 109 110 111 112 113
/**
 * page_lru - which LRU list should a page be on?
 * @page: the page to test
 *
 * Returns the LRU list a page should be on, as an index
 * into the array of LRU lists.
 */
114
static __always_inline enum lru_list page_lru(struct page *page)
115
{
116
	enum lru_list lru;
117

118 119 120
	if (PageUnevictable(page))
		lru = LRU_UNEVICTABLE;
	else {
121
		lru = page_lru_base_type(page);
122 123 124
		if (PageActive(page))
			lru += LRU_ACTIVE;
	}
125 126
	return lru;
}
127

128 129
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))

130
#endif