ANDROID: implement wrapper for reverse migration

Reverse migration is used to do the balancing the occupancy of memory
zones in a node in the system whose imabalance may be caused by
migration of pages to other zones by an operation, eg: hotremove and
then hotadding the same memory. In this case there is a lot of free
memory in newly hotadd memory which can be filled up by the previous
migrated pages(as part of offline/hotremove) thus may free up some
pressure in other zones of the node.

Upstream discussion: https://lore.kernel.org/all/ee78c83d-da9b-f6d1-4f66-934b7782acfb@codeaurora.org/

Change-Id: Ib3137dab0db66ecf6858c4077dcadb9dfd0c6b1c
Bug: 201263307
Signed-off-by: Charan Teja Reddy <quic_charante@quicinc.com>
This commit is contained in:
Charan Teja Reddy
2021-02-16 13:59:45 +05:30
committed by Suren Baghdasaryan
parent c34dfc24e0
commit f47b852faa
5 changed files with 52 additions and 0 deletions

View File

@@ -180,6 +180,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
extern int kcompactd_run(int nid);
extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
extern unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list);
#else
static inline void reset_isolation_suitable(pg_data_t *pgdat)
@@ -225,6 +227,12 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat,
{
}
static unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list)
{
return 0;
}
#endif /* CONFIG_COMPACTION */
struct node;

View File

@@ -1096,6 +1096,7 @@ static inline struct pglist_data *NODE_DATA(int nid)
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
extern int isolate_anon_lru_page(struct page *page);
/**
* for_each_online_pgdat - helper macro to iterate over all online nodes

View File

@@ -758,6 +758,29 @@ isolate_freepages_range(struct compact_control *cc,
return pfn;
}
unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list)
{
unsigned long isolated;
unsigned int order;
if (!PageBuddy(page))
return 0;
order = buddy_order(page);
isolated = __isolate_free_page(page, order);
if (!isolated)
return 0;
set_page_private(page, order);
list_add(&page->lru, list);
split_map_pages(list);
return isolated;
}
EXPORT_SYMBOL_GPL(isolate_and_split_free_page);
/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(pg_data_t *pgdat)
{

View File

@@ -168,6 +168,7 @@ void putback_movable_pages(struct list_head *l)
}
}
}
EXPORT_SYMBOL_GPL(putback_movable_pages);
/*
* Restore a potential migration pte to a working pte entry
@@ -1601,6 +1602,7 @@ out:
return rc;
}
EXPORT_SYMBOL_GPL(migrate_pages);
struct page *alloc_migration_target(struct page *page, unsigned long private)
{

View File

@@ -496,6 +496,24 @@ unsigned long get_pfnblock_flags_mask(const struct page *page,
{
return __get_pfnblock_flags_mask(page, pfn, mask);
}
EXPORT_SYMBOL_GPL(get_pfnblock_flags_mask);
int isolate_anon_lru_page(struct page *page)
{
int ret;
if (!PageLRU(page) || !PageAnon(page))
return -EINVAL;
if (!get_page_unless_zero(page))
return -EINVAL;
ret = isolate_lru_page(page);
put_page(page);
return ret;
}
EXPORT_SYMBOL_GPL(isolate_anon_lru_page);
static __always_inline int get_pfnblock_migratetype(const struct page *page,
unsigned long pfn)