Revert "FROMLIST: mm: multi-gen LRU: support page table walks"

This reverts commit 5280d76d38.

To be replaced with upstream version.

Bug: 249601646
Change-Id: I5bf4095117082b6627d182a8d987ca78a18fa392
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh
2022-11-07 15:59:16 -08:00
parent 52ed44a334
commit 02dc0d1dda
10 changed files with 13 additions and 1142 deletions

View File

@@ -1014,7 +1014,6 @@ static int exec_mmap(struct mm_struct *mm)
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
lru_gen_add_mm(mm);
/*
* This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for
@@ -1027,7 +1026,6 @@ static int exec_mmap(struct mm_struct *mm)
activate_mm(active_mm, mm);
if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
lru_gen_use_mm(mm);
tsk->mm->vmacache_seqnum = 0;
vmacache_flush(tsk);
task_unlock(tsk);

View File

@@ -349,11 +349,6 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
#ifdef CONFIG_LRU_GEN
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
#endif
/* for dynamic low */
ANDROID_VENDOR_DATA(1);
ANDROID_OEM_DATA_ARRAY(1, 2);

View File

@@ -3,7 +3,6 @@
#define _LINUX_MM_TYPES_H
#include <linux/mm_types_task.h>
#include <linux/sched.h>
#include <linux/auxvec.h>
#include <linux/kref.h>
@@ -17,8 +16,6 @@
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <linux/mmdebug.h>
#include <linux/android_kabi.h>
#include <asm/mmu.h>
@@ -634,22 +631,6 @@ struct mm_struct {
#ifdef CONFIG_IOMMU_SUPPORT
u32 pasid;
#endif
#ifdef CONFIG_LRU_GEN
struct {
/* this mm_struct is on lru_gen_mm_list */
struct list_head list;
#ifdef CONFIG_MEMCG
/* points to the memcg of "owner" above */
struct mem_cgroup *memcg;
#endif
/*
* Set when switching to this mm_struct, as a hint of
* whether it has been used since the last time per-node
* page table walkers cleared the corresponding bits.
*/
nodemask_t nodes;
} lru_gen;
#endif /* CONFIG_LRU_GEN */
ANDROID_KABI_RESERVE(1);
} __randomize_layout;
@@ -678,65 +659,6 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return (struct cpumask *)&mm->cpu_bitmap;
}
#ifdef CONFIG_LRU_GEN
struct lru_gen_mm_list {
/* mm_struct list for page table walkers */
struct list_head fifo;
/* protects the list above */
spinlock_t lock;
};
void lru_gen_add_mm(struct mm_struct *mm);
void lru_gen_del_mm(struct mm_struct *mm);
#ifdef CONFIG_MEMCG
void lru_gen_migrate_mm(struct mm_struct *mm);
#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
INIT_LIST_HEAD(&mm->lru_gen.list);
#ifdef CONFIG_MEMCG
mm->lru_gen.memcg = NULL;
#endif
nodes_clear(mm->lru_gen.nodes);
}
static inline void lru_gen_use_mm(struct mm_struct *mm)
{
/* unlikely but not a bug when racing with lru_gen_migrate_mm() */
VM_WARN_ON(list_empty(&mm->lru_gen.list));
if (!(current->flags & PF_KTHREAD) && !nodes_full(mm->lru_gen.nodes))
nodes_setall(mm->lru_gen.nodes);
}
#else /* !CONFIG_LRU_GEN */
static inline void lru_gen_add_mm(struct mm_struct *mm)
{
}
static inline void lru_gen_del_mm(struct mm_struct *mm)
{
}
#ifdef CONFIG_MEMCG
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
}
#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
}
static inline void lru_gen_use_mm(struct mm_struct *mm)
{
}
#endif /* CONFIG_LRU_GEN */
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);

View File

@@ -405,58 +405,6 @@ struct lru_gen_struct {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
};
enum {
MM_PTE_TOTAL, /* total leaf entries */
MM_PTE_OLD, /* old leaf entries */
MM_PTE_YOUNG, /* young leaf entries */
MM_PMD_TOTAL, /* total non-leaf entries */
MM_PMD_FOUND, /* non-leaf entries found in Bloom filters */
MM_PMD_ADDED, /* non-leaf entries added to Bloom filters */
NR_MM_STATS
};
/* mnemonic codes for the mm stats above */
#define MM_STAT_CODES "toydfa"
/* double-buffering Bloom filters */
#define NR_BLOOM_FILTERS 2
struct lru_gen_mm_state {
/* set to max_seq after each iteration */
unsigned long seq;
/* where the current iteration starts (inclusive) */
struct list_head *head;
/* where the last iteration ends (exclusive) */
struct list_head *tail;
/* to wait for the last page table walker to finish */
struct wait_queue_head wait;
/* Bloom filters flip after each iteration */
unsigned long *filters[NR_BLOOM_FILTERS];
/* the mm stats for debugging */
unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
/* the number of concurrent page table walkers */
int nr_walkers;
};
struct lru_gen_mm_walk {
/* the lruvec under reclaim */
struct lruvec *lruvec;
/* unstable max_seq from lru_gen_struct */
unsigned long max_seq;
/* the next address within an mm to scan */
unsigned long next_addr;
/* to batch page table entries */
unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)];
/* to batch promoted pages */
int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
/* to batch the mm stats */
int mm_stats[NR_MM_STATS];
/* total batched items */
int batched;
bool can_swap;
bool full_scan;
};
void lru_gen_init_lruvec(struct lruvec *lruvec);
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
@@ -507,8 +455,6 @@ struct lruvec {
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
struct lru_gen_struct lrugen;
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
@@ -1101,11 +1047,6 @@ typedef struct pglist_data {
unsigned long flags;
#ifdef CONFIG_LRU_GEN
/* kswap mm walk data */
struct lru_gen_mm_walk mm_walk;
#endif
ZONE_PADDING(_pad2_)
/* Per-node vmstats */

View File

@@ -139,10 +139,6 @@ union swap_header {
*/
struct reclaim_state {
unsigned long reclaimed_slab;
#ifdef CONFIG_LRU_GEN
/* per-thread mm walk data */
struct lru_gen_mm_walk *mm_walk;
#endif
};
#ifdef __KERNEL__

View File

@@ -423,7 +423,6 @@ assign_new_owner:
goto retry;
}
WRITE_ONCE(mm->owner, c);
lru_gen_migrate_mm(mm);
task_unlock(c);
put_task_struct(c);
}

View File

@@ -1132,7 +1132,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
goto fail_nocontext;
mm->user_ns = get_user_ns(user_ns);
lru_gen_init_mm(mm);
return mm;
fail_nocontext:
@@ -1175,7 +1174,6 @@ static inline void __mmput(struct mm_struct *mm)
}
if (mm->binfmt)
module_put(mm->binfmt->module);
lru_gen_del_mm(mm);
mmdrop(mm);
}
@@ -2675,13 +2673,6 @@ pid_t kernel_clone(struct kernel_clone_args *args)
get_task_struct(p);
}
if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
/* lock the task to synchronize with memcg migration */
task_lock(p);
lru_gen_add_mm(p->mm);
task_unlock(p);
}
wake_up_new_task(p);
/* forking complete and child started to run, tell ptracer */

View File

@@ -5088,7 +5088,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
* finish_task_switch()'s mmdrop().
*/
switch_mm_irqs_off(prev->active_mm, next->mm, next);
lru_gen_use_mm(next->mm);
if (!prev->mm) { // from kernel
/* will mmdrop() in finish_task_switch(). */

View File

@@ -6208,29 +6208,6 @@ static void mem_cgroup_move_task(void)
}
#endif
#ifdef CONFIG_LRU_GEN
static void mem_cgroup_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct task_struct *task = NULL;
cgroup_taskset_for_each_leader(task, css, tset)
break;
if (!task)
return;
task_lock(task);
if (task->mm && task->mm->owner == task)
lru_gen_migrate_mm(task->mm);
task_unlock(task);
}
#else
static void mem_cgroup_attach(struct cgroup_taskset *tset)
{
}
#endif /* CONFIG_LRU_GEN */
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
{
if (value == PAGE_COUNTER_MAX)
@@ -6574,7 +6551,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
.css_reset = mem_cgroup_css_reset,
.css_rstat_flush = mem_cgroup_css_rstat_flush,
.can_attach = mem_cgroup_can_attach,
.attach = mem_cgroup_attach,
.cancel_attach = mem_cgroup_cancel_attach,
.post_attach = mem_cgroup_move_task,
.dfl_cftypes = memory_files,

File diff suppressed because it is too large Load Diff