Revert "FROMLIST: mm: multi-gen LRU: groundwork"
This reverts commit f88ed5a3d3.
To be replaced with upstream version.
Bug: 249601646
Change-Id: I5a206480f838c304fb1c960fec2615894c2421bb
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
@@ -795,8 +795,7 @@ static int fuse_check_page(struct page *page)
|
||||
1 << PG_active |
|
||||
1 << PG_workingset |
|
||||
1 << PG_reclaim |
|
||||
1 << PG_waiters |
|
||||
LRU_GEN_MASK | LRU_REFS_MASK))) {
|
||||
1 << PG_waiters))) {
|
||||
dump_page(page, "fuse: trying to steal weird page");
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1134,8 +1134,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
|
||||
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
||||
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
|
||||
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
|
||||
#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
|
||||
#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
|
||||
|
||||
/*
|
||||
* Define the bit shifts to access each section. For non-existent
|
||||
|
||||
@@ -27,13 +27,10 @@ static inline int page_is_file_lru(struct page *page)
|
||||
|
||||
static __always_inline void __update_lru_size(struct lruvec *lruvec,
|
||||
enum lru_list lru, enum zone_type zid,
|
||||
long nr_pages)
|
||||
int nr_pages)
|
||||
{
|
||||
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
||||
|
||||
lockdep_assert_held(&lruvec->lru_lock);
|
||||
WARN_ON_ONCE(nr_pages != (int)nr_pages);
|
||||
|
||||
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
|
||||
__mod_zone_page_state(&pgdat->node_zones[zid],
|
||||
NR_ZONE_LRU_BASE + lru, nr_pages);
|
||||
@@ -90,177 +87,11 @@ static __always_inline enum lru_list page_lru(struct page *page)
|
||||
return lru;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
|
||||
static inline bool lru_gen_enabled(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool lru_gen_in_fault(void)
|
||||
{
|
||||
return current->in_lru_fault;
|
||||
}
|
||||
|
||||
static inline int lru_gen_from_seq(unsigned long seq)
|
||||
{
|
||||
return seq % MAX_NR_GENS;
|
||||
}
|
||||
|
||||
static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
|
||||
{
|
||||
unsigned long max_seq = lruvec->lrugen.max_seq;
|
||||
|
||||
VM_BUG_ON(gen >= MAX_NR_GENS);
|
||||
|
||||
/* see the comment on MIN_NR_GENS */
|
||||
return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
|
||||
}
|
||||
|
||||
static inline void lru_gen_update_size(struct lruvec *lruvec, struct page *page,
|
||||
int old_gen, int new_gen)
|
||||
{
|
||||
int type = page_is_file_lru(page);
|
||||
int zone = page_zonenum(page);
|
||||
int delta = thp_nr_pages(page);
|
||||
enum lru_list lru = type * LRU_INACTIVE_FILE;
|
||||
struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
|
||||
VM_BUG_ON(old_gen != -1 && old_gen >= MAX_NR_GENS);
|
||||
VM_BUG_ON(new_gen != -1 && new_gen >= MAX_NR_GENS);
|
||||
VM_BUG_ON(old_gen == -1 && new_gen == -1);
|
||||
|
||||
if (old_gen >= 0)
|
||||
WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
|
||||
lrugen->nr_pages[old_gen][type][zone] - delta);
|
||||
if (new_gen >= 0)
|
||||
WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
|
||||
lrugen->nr_pages[new_gen][type][zone] + delta);
|
||||
|
||||
/* addition */
|
||||
if (old_gen < 0) {
|
||||
if (lru_gen_is_active(lruvec, new_gen))
|
||||
lru += LRU_ACTIVE;
|
||||
__update_lru_size(lruvec, lru, zone, delta);
|
||||
return;
|
||||
}
|
||||
|
||||
/* deletion */
|
||||
if (new_gen < 0) {
|
||||
if (lru_gen_is_active(lruvec, old_gen))
|
||||
lru += LRU_ACTIVE;
|
||||
__update_lru_size(lruvec, lru, zone, -delta);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
|
||||
{
|
||||
int gen;
|
||||
unsigned long old_flags, new_flags;
|
||||
int type = page_is_file_lru(page);
|
||||
int zone = page_zonenum(page);
|
||||
struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
|
||||
if (PageUnevictable(page))
|
||||
return false;
|
||||
/*
|
||||
* There are three common cases for this page:
|
||||
* 1. If it's hot, e.g., freshly faulted in or previously hot and
|
||||
* migrated, add it to the youngest generation.
|
||||
* 2. If it's cold but can't be evicted immediately, i.e., an anon page
|
||||
* not in swapcache or a dirty page pending writeback, add it to the
|
||||
* second oldest generation.
|
||||
* 3. Everything else (clean, cold) is added to the oldest generation.
|
||||
*/
|
||||
if (PageActive(page))
|
||||
gen = lru_gen_from_seq(lrugen->max_seq);
|
||||
else if ((type == LRU_GEN_ANON && !PageSwapCache(page)) ||
|
||||
(PageReclaim(page) && (PageDirty(page) || PageWriteback(page))))
|
||||
gen = lru_gen_from_seq(lrugen->min_seq[type] + 1);
|
||||
else
|
||||
gen = lru_gen_from_seq(lrugen->min_seq[type]);
|
||||
|
||||
do {
|
||||
new_flags = old_flags = READ_ONCE(page->flags);
|
||||
VM_BUG_ON_PAGE(new_flags & LRU_GEN_MASK, page);
|
||||
|
||||
/* see the comment on MIN_NR_GENS */
|
||||
new_flags &= ~(LRU_GEN_MASK | BIT(PG_active));
|
||||
new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
|
||||
} while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
|
||||
|
||||
lru_gen_update_size(lruvec, page, -1, gen);
|
||||
/* for rotate_reclaimable_page() */
|
||||
if (reclaiming)
|
||||
list_add_tail(&page->lru, &lrugen->lists[gen][type][zone]);
|
||||
else
|
||||
list_add(&page->lru, &lrugen->lists[gen][type][zone]);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
|
||||
{
|
||||
int gen;
|
||||
unsigned long old_flags, new_flags;
|
||||
|
||||
do {
|
||||
new_flags = old_flags = READ_ONCE(page->flags);
|
||||
if (!(new_flags & LRU_GEN_MASK))
|
||||
return false;
|
||||
|
||||
VM_BUG_ON_PAGE(PageActive(page), page);
|
||||
VM_BUG_ON_PAGE(PageUnevictable(page), page);
|
||||
|
||||
gen = ((new_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
|
||||
|
||||
new_flags &= ~LRU_GEN_MASK;
|
||||
/* for shrink_page_list() */
|
||||
if (reclaiming)
|
||||
new_flags &= ~(BIT(PG_referenced) | BIT(PG_reclaim));
|
||||
else if (lru_gen_is_active(lruvec, gen))
|
||||
new_flags |= BIT(PG_active);
|
||||
} while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
|
||||
|
||||
lru_gen_update_size(lruvec, page, gen, -1);
|
||||
list_del(&page->lru);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline bool lru_gen_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool lru_gen_in_fault(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
static __always_inline void add_page_to_lru_list(struct page *page,
|
||||
struct lruvec *lruvec)
|
||||
{
|
||||
enum lru_list lru = page_lru(page);
|
||||
|
||||
if (lru_gen_add_page(lruvec, page, false))
|
||||
return;
|
||||
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
|
||||
list_add(&page->lru, &lruvec->lists[lru]);
|
||||
}
|
||||
@@ -270,9 +101,6 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
|
||||
{
|
||||
enum lru_list lru = page_lru(page);
|
||||
|
||||
if (lru_gen_add_page(lruvec, page, true))
|
||||
return;
|
||||
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
|
||||
list_add_tail(&page->lru, &lruvec->lists[lru]);
|
||||
}
|
||||
@@ -280,9 +108,6 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
|
||||
static __always_inline void del_page_from_lru_list(struct page *page,
|
||||
struct lruvec *lruvec)
|
||||
{
|
||||
if (lru_gen_del_page(lruvec, page, false))
|
||||
return;
|
||||
|
||||
list_del(&page->lru);
|
||||
update_lru_size(lruvec, page_lru(page), page_zonenum(page),
|
||||
-thp_nr_pages(page));
|
||||
|
||||
@@ -298,96 +298,6 @@ enum lruvec_flags {
|
||||
*/
|
||||
};
|
||||
|
||||
#endif /* !__GENERATING_BOUNDS_H */
|
||||
|
||||
/*
|
||||
* Evictable pages are divided into multiple generations. The youngest and the
|
||||
* oldest generation numbers, max_seq and min_seq, are monotonically increasing.
|
||||
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
|
||||
* offset within MAX_NR_GENS, gen, indexes the LRU list of the corresponding
|
||||
* generation. The gen counter in page->flags stores gen+1 while a page is on
|
||||
* one of lrugen->lists[]. Otherwise it stores 0.
|
||||
*
|
||||
* A page is added to the youngest generation on faulting. The aging needs to
|
||||
* check the accessed bit at least twice before handing this page over to the
|
||||
* eviction. The first check takes care of the accessed bit set on the initial
|
||||
* fault; the second check makes sure this page hasn't been used since then.
|
||||
* This process, AKA second chance, requires a minimum of two generations,
|
||||
* hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
|
||||
* LRU, these two generations are considered active; the rest of generations, if
|
||||
* they exist, are considered inactive. See lru_gen_is_active(). PG_active is
|
||||
* always cleared while a page is on one of lrugen->lists[] so that the aging
|
||||
* needs not to worry about it. And it's set again when a page considered active
|
||||
* is isolated for non-reclaiming purposes, e.g., migration. See
|
||||
* lru_gen_add_page() and lru_gen_del_page().
|
||||
*
|
||||
* MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice of the
|
||||
* categories of the active/inactive LRU when keeping track of accesses through
|
||||
* page tables. It requires order_base_2(MAX_NR_GENS+1) bits in page->flags.
|
||||
*/
|
||||
#define MIN_NR_GENS 2U
|
||||
#define MAX_NR_GENS 4U
|
||||
|
||||
#ifndef __GENERATING_BOUNDS_H
|
||||
|
||||
struct lruvec;
|
||||
|
||||
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
|
||||
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
|
||||
enum {
|
||||
LRU_GEN_ANON,
|
||||
LRU_GEN_FILE,
|
||||
};
|
||||
|
||||
/*
|
||||
* The youngest generation number is stored in max_seq for both anon and file
|
||||
* types as they are aged on an equal footing. The oldest generation numbers are
|
||||
* stored in min_seq[] separately for anon and file types as clean file pages
|
||||
* can be evicted regardless of swap constraints.
|
||||
*
|
||||
* Normally anon and file min_seq are in sync. But if swapping is constrained,
|
||||
* e.g., out of swap space, file min_seq is allowed to advance and leave anon
|
||||
* min_seq behind.
|
||||
*/
|
||||
struct lru_gen_struct {
|
||||
/* the aging increments the youngest generation number */
|
||||
unsigned long max_seq;
|
||||
/* the eviction increments the oldest generation numbers */
|
||||
unsigned long min_seq[ANON_AND_FILE];
|
||||
/* the multi-gen LRU lists */
|
||||
struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
|
||||
/* the sizes of the above lists */
|
||||
unsigned long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
|
||||
};
|
||||
|
||||
void lru_gen_init_lruvec(struct lruvec *lruvec);
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
void lru_gen_init_memcg(struct mem_cgroup *memcg);
|
||||
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_LRU_GEN */
|
||||
|
||||
static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
struct lruvec {
|
||||
struct list_head lists[NR_LRU_LISTS];
|
||||
/* per lruvec lru_lock for memcg */
|
||||
@@ -405,10 +315,6 @@ struct lruvec {
|
||||
unsigned long refaults[ANON_AND_FILE];
|
||||
/* Various lruvec state flags (enum lruvec_flags) */
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* evictable pages divided into generations */
|
||||
struct lru_gen_struct lrugen;
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG
|
||||
struct pglist_data *pgdat;
|
||||
#endif
|
||||
|
||||
@@ -55,8 +55,7 @@
|
||||
#define SECTIONS_WIDTH 0
|
||||
#endif
|
||||
|
||||
#if ZONES_WIDTH + LRU_GEN_WIDTH + LRU_REFS_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
|
||||
<= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#define NODES_WIDTH NODES_SHIFT
|
||||
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#error "Vmemmap: No space for nodes field in page flags"
|
||||
@@ -90,8 +89,8 @@
|
||||
#define LAST_CPUPID_SHIFT 0
|
||||
#endif
|
||||
|
||||
#if ZONES_WIDTH + LRU_GEN_WIDTH + LRU_REFS_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
|
||||
KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \
|
||||
<= BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
|
||||
#else
|
||||
#define LAST_CPUPID_WIDTH 0
|
||||
@@ -101,8 +100,8 @@
|
||||
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
||||
#endif
|
||||
|
||||
#if ZONES_WIDTH + LRU_GEN_WIDTH + LRU_REFS_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
|
||||
KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \
|
||||
> BITS_PER_LONG - NR_PAGEFLAGS
|
||||
#error "Not enough bits in page flags"
|
||||
#endif
|
||||
|
||||
|
||||
@@ -848,7 +848,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
|
||||
1UL << PG_private | 1UL << PG_private_2 | \
|
||||
1UL << PG_writeback | 1UL << PG_reserved | \
|
||||
1UL << PG_slab | 1UL << PG_active | \
|
||||
1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
|
||||
1UL << PG_unevictable | __PG_MLOCKED)
|
||||
|
||||
/*
|
||||
* Flags checked when a page is prepped for return by the page allocator.
|
||||
@@ -859,7 +859,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
|
||||
* alloc-free cycle to prevent from reusing the page.
|
||||
*/
|
||||
#define PAGE_FLAGS_CHECK_AT_PREP \
|
||||
((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
|
||||
(PAGEFLAGS_MASK & ~__PG_HWPOISON)
|
||||
|
||||
#define PAGE_FLAGS_PRIVATE \
|
||||
(1UL << PG_private | 1UL << PG_private_2)
|
||||
|
||||
@@ -925,10 +925,6 @@ struct task_struct {
|
||||
#ifdef CONFIG_MEMCG
|
||||
unsigned in_user_fault:1;
|
||||
#endif
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
/* whether the LRU algorithm may apply to this access */
|
||||
unsigned in_lru_fault:1;
|
||||
#endif
|
||||
#ifdef CONFIG_COMPAT_BRK
|
||||
unsigned brk_randomized:1;
|
||||
#endif
|
||||
|
||||
@@ -22,13 +22,6 @@ int main(void)
|
||||
DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
|
||||
#endif
|
||||
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
DEFINE(LRU_GEN_WIDTH, order_base_2(MAX_NR_GENS + 1));
|
||||
DEFINE(LRU_REFS_WIDTH, 0);
|
||||
#else
|
||||
DEFINE(LRU_GEN_WIDTH, 0);
|
||||
DEFINE(LRU_REFS_WIDTH, 0);
|
||||
#endif
|
||||
/* End of constants */
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -923,14 +923,6 @@ config ANON_VMA_NAME
|
||||
area from being merged with adjacent virtual memory areas due to the
|
||||
difference in their name.
|
||||
|
||||
config LRU_GEN
|
||||
bool "Multi-Gen LRU"
|
||||
depends on MMU
|
||||
# the following options can use up the spare bits in page flags
|
||||
depends on !MAXSMP && (64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP)
|
||||
help
|
||||
A high performance LRU implementation to overcommit memory.
|
||||
|
||||
source "mm/damon/Kconfig"
|
||||
|
||||
config ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
|
||||
|
||||
@@ -2364,8 +2364,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
|
||||
#ifdef CONFIG_64BIT
|
||||
(1L << PG_arch_2) |
|
||||
#endif
|
||||
(1L << PG_dirty) |
|
||||
LRU_GEN_MASK | LRU_REFS_MASK));
|
||||
(1L << PG_dirty)));
|
||||
|
||||
/* ->mapping in first tail page is compound_mapcount */
|
||||
VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
|
||||
|
||||
@@ -5170,7 +5170,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
|
||||
static void mem_cgroup_free(struct mem_cgroup *memcg)
|
||||
{
|
||||
lru_gen_exit_memcg(memcg);
|
||||
memcg_wb_domain_exit(memcg);
|
||||
__mem_cgroup_free(memcg);
|
||||
}
|
||||
@@ -5235,7 +5234,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
|
||||
memcg->deferred_split_queue.split_queue_len = 0;
|
||||
#endif
|
||||
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
||||
lru_gen_init_memcg(memcg);
|
||||
trace_android_vh_mem_cgroup_alloc(memcg);
|
||||
return memcg;
|
||||
fail:
|
||||
|
||||
25
mm/memory.c
25
mm/memory.c
@@ -5119,27 +5119,6 @@ static inline void mm_account_fault(struct pt_regs *regs,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
static void lru_gen_enter_fault(struct vm_area_struct *vma)
|
||||
{
|
||||
/* the LRU algorithm doesn't apply to sequential or random reads */
|
||||
current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
|
||||
}
|
||||
|
||||
static void lru_gen_exit_fault(void)
|
||||
{
|
||||
current->in_lru_fault = false;
|
||||
}
|
||||
#else
|
||||
static void lru_gen_enter_fault(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static void lru_gen_exit_fault(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
/*
|
||||
* By the time we get here, we already hold the mm semaphore
|
||||
*
|
||||
@@ -5175,8 +5154,6 @@ vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
|
||||
if (flags & FAULT_FLAG_USER)
|
||||
mem_cgroup_enter_user_fault();
|
||||
|
||||
lru_gen_enter_fault(vma);
|
||||
|
||||
if (unlikely(is_vm_hugetlb_page(vma))) {
|
||||
VM_BUG_ON(flags & FAULT_FLAG_SPECULATIVE);
|
||||
ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
|
||||
@@ -5184,8 +5161,6 @@ vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
|
||||
ret = __handle_mm_fault(vma, address, flags, seq);
|
||||
}
|
||||
|
||||
lru_gen_exit_fault();
|
||||
|
||||
if (flags & FAULT_FLAG_USER) {
|
||||
mem_cgroup_exit_user_fault();
|
||||
/*
|
||||
|
||||
@@ -65,16 +65,14 @@ void __init mminit_verify_pageflags_layout(void)
|
||||
|
||||
shift = 8 * sizeof(unsigned long);
|
||||
width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
|
||||
- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
|
||||
- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
|
||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
|
||||
"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
|
||||
"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
|
||||
SECTIONS_WIDTH,
|
||||
NODES_WIDTH,
|
||||
ZONES_WIDTH,
|
||||
LAST_CPUPID_WIDTH,
|
||||
KASAN_TAG_WIDTH,
|
||||
LRU_GEN_WIDTH,
|
||||
LRU_REFS_WIDTH,
|
||||
NR_PAGEFLAGS);
|
||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
|
||||
"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
|
||||
|
||||
@@ -82,8 +82,6 @@ void lruvec_init(struct lruvec *lruvec)
|
||||
|
||||
for_each_lru(lru)
|
||||
INIT_LIST_HEAD(&lruvec->lists[lru]);
|
||||
|
||||
lru_gen_init_lruvec(lruvec);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
|
||||
|
||||
@@ -446,11 +446,6 @@ void lru_cache_add(struct page *page)
|
||||
VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
/* see the comment in lru_gen_add_page() */
|
||||
if (lru_gen_enabled() && !PageUnevictable(page) &&
|
||||
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
|
||||
SetPageActive(page);
|
||||
|
||||
get_page(page);
|
||||
local_lock(&lru_pvecs.lock);
|
||||
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
|
||||
@@ -552,7 +547,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
|
||||
|
||||
static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
|
||||
{
|
||||
if (!PageUnevictable(page) && (PageActive(page) || lru_gen_enabled())) {
|
||||
if (PageActive(page) && !PageUnevictable(page)) {
|
||||
int nr_pages = thp_nr_pages(page);
|
||||
|
||||
del_page_from_lru_list(page, lruvec);
|
||||
@@ -666,7 +661,7 @@ void deactivate_file_page(struct page *page)
|
||||
*/
|
||||
void deactivate_page(struct page *page)
|
||||
{
|
||||
if (PageLRU(page) && !PageUnevictable(page) && (PageActive(page) || lru_gen_enabled())) {
|
||||
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
|
||||
struct pagevec *pvec;
|
||||
|
||||
local_lock(&lru_pvecs.lock);
|
||||
|
||||
75
mm/vmscan.c
75
mm/vmscan.c
@@ -2907,81 +2907,6 @@ static bool can_age_anon_pages(struct pglist_data *pgdat,
|
||||
return can_demote(pgdat->node_id, sc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LRU_GEN
|
||||
|
||||
/******************************************************************************
|
||||
* shorthand helpers
|
||||
******************************************************************************/
|
||||
|
||||
#define for_each_gen_type_zone(gen, type, zone) \
|
||||
for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
|
||||
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
|
||||
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
|
||||
|
||||
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
|
||||
{
|
||||
struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
if (memcg) {
|
||||
struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
|
||||
|
||||
/* for hotadd_new_pgdat() */
|
||||
if (!lruvec->pgdat)
|
||||
lruvec->pgdat = pgdat;
|
||||
|
||||
return lruvec;
|
||||
}
|
||||
#endif
|
||||
VM_BUG_ON(!mem_cgroup_disabled());
|
||||
|
||||
return pgdat ? &pgdat->__lruvec : NULL;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* initialization
|
||||
******************************************************************************/
|
||||
|
||||
void lru_gen_init_lruvec(struct lruvec *lruvec)
|
||||
{
|
||||
int gen, type, zone;
|
||||
struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
||||
|
||||
lrugen->max_seq = MIN_NR_GENS + 1;
|
||||
|
||||
for_each_gen_type_zone(gen, type, zone)
|
||||
INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
void lru_gen_init_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
void lru_gen_exit_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
int nid;
|
||||
|
||||
for_each_node(nid) {
|
||||
struct lruvec *lruvec = get_lruvec(memcg, nid);
|
||||
|
||||
VM_BUG_ON(memchr_inv(lruvec->lrugen.nr_pages, 0,
|
||||
sizeof(lruvec->lrugen.nr_pages)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init init_lru_gen(void)
|
||||
{
|
||||
BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
|
||||
BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
|
||||
|
||||
return 0;
|
||||
};
|
||||
late_initcall(init_lru_gen);
|
||||
|
||||
#endif /* CONFIG_LRU_GEN */
|
||||
|
||||
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
{
|
||||
unsigned long nr[NR_LRU_LISTS];
|
||||
|
||||
Reference in New Issue
Block a user