ANDROID: mm: cma: skip problematic pageblock
alloc_contig_range is supposed to work on max(MAX_ORDER_NR_PAGES,
or pageblock_nr_pages) granularity aligned range. If it fails
at a page and return error to user, user doesn't know what page
makes the allocation failure and keep retrying another allocation
with new range including the failed page and encountered error
again and again until it could escape the out of the granularity
block. Instead, let's make CMA aware of what pfn was troubled in
previous trial and then continue to work new pageblock out of the
failed page so it doesn't see the repeated error repeatedly.
Currently, this option works for only __GFP_NORETRY case for
safe for existing CMA users.
Bug: 192475091
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: I0959c9df3d4b36408a68920abbb4d52d31026079
Signed-off-by: Richard Chang <richardycc@google.com>
(cherry picked from commit 0e688e972d)
This commit is contained in:
committed by
Richard Chang
parent
c63e78a29a
commit
60d2dad38e
@@ -693,6 +693,8 @@ static inline bool pm_suspended_storage(void)
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_CONTIG_ALLOC
|
||||
extern unsigned long pfn_max_align_up(unsigned long pfn);
|
||||
|
||||
#define ACR_ERR_ISOLATE (1 << 0)
|
||||
#define ACR_ERR_MIGRATE (1 << 1)
|
||||
#define ACR_ERR_TEST (1 << 2)
|
||||
@@ -702,6 +704,7 @@ struct acr_info {
|
||||
unsigned long nr_migrated;
|
||||
unsigned long nr_reclaimed;
|
||||
unsigned int err;
|
||||
unsigned long failed_pfn;
|
||||
};
|
||||
|
||||
/* The below functions must be run on a range from a single zone. */
|
||||
|
||||
@@ -44,7 +44,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
|
||||
*/
|
||||
int
|
||||
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned migratetype, int flags);
|
||||
unsigned migratetype, int flags,
|
||||
unsigned long *failed_pfn);
|
||||
|
||||
/*
|
||||
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
|
||||
@@ -58,7 +59,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
* Test all pages in [start_pfn, end_pfn) are isolated or not.
|
||||
*/
|
||||
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
||||
int isol_flags);
|
||||
int isol_flags, unsigned long *failed_pfn);
|
||||
|
||||
struct page *alloc_migrate_target(struct page *page, unsigned long private);
|
||||
|
||||
|
||||
12
mm/cma.c
12
mm/cma.c
@@ -535,8 +535,16 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
|
||||
trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
|
||||
count, align);
|
||||
/* try again with a bit different memory target */
|
||||
start = bitmap_no + mask + 1;
|
||||
|
||||
if (info.failed_pfn && gfp_mask & __GFP_NORETRY) {
|
||||
/* try again from following failed page */
|
||||
start = (pfn_max_align_up(info.failed_pfn + 1) -
|
||||
cma->base_pfn) >> cma->order_per_bit;
|
||||
|
||||
} else {
|
||||
/* try again with a bit different memory target */
|
||||
start = bitmap_no + mask + 1;
|
||||
}
|
||||
}
|
||||
|
||||
trace_cma_alloc_finish(cma->name, pfn, page, count, align);
|
||||
|
||||
@@ -1967,7 +1967,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
|
||||
/* set above range as isolated */
|
||||
ret = start_isolate_page_range(start_pfn, end_pfn,
|
||||
MIGRATE_MOVABLE,
|
||||
MEMORY_OFFLINE | REPORT_FAILURE);
|
||||
MEMORY_OFFLINE | REPORT_FAILURE, NULL);
|
||||
if (ret) {
|
||||
reason = "failure to isolate range";
|
||||
goto failed_removal_pcplists_disabled;
|
||||
@@ -2021,7 +2021,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
|
||||
goto failed_removal_isolated;
|
||||
}
|
||||
|
||||
ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
|
||||
ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE, NULL);
|
||||
|
||||
} while (ret);
|
||||
|
||||
|
||||
@@ -9111,7 +9111,7 @@ static unsigned long pfn_max_align_down(unsigned long pfn)
|
||||
pageblock_nr_pages) - 1);
|
||||
}
|
||||
|
||||
static unsigned long pfn_max_align_up(unsigned long pfn)
|
||||
unsigned long pfn_max_align_up(unsigned long pfn)
|
||||
{
|
||||
return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
|
||||
pageblock_nr_pages));
|
||||
@@ -9213,6 +9213,12 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
|
||||
page_pinner_failure_detect(page);
|
||||
}
|
||||
}
|
||||
|
||||
if (!list_empty(&cc->migratepages)) {
|
||||
page = list_first_entry(&cc->migratepages, struct page , lru);
|
||||
info->failed_pfn = page_to_pfn(page);
|
||||
}
|
||||
|
||||
putback_movable_pages(&cc->migratepages);
|
||||
info->err |= ACR_ERR_MIGRATE;
|
||||
return ret;
|
||||
@@ -9287,7 +9293,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
*/
|
||||
|
||||
ret = start_isolate_page_range(pfn_max_align_down(start),
|
||||
pfn_max_align_up(end), migratetype, 0);
|
||||
pfn_max_align_up(end), migratetype, 0,
|
||||
&info->failed_pfn);
|
||||
if (ret) {
|
||||
info->err |= ACR_ERR_ISOLATE;
|
||||
return ret;
|
||||
@@ -9354,7 +9361,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
}
|
||||
|
||||
/* Make sure the range is really isolated. */
|
||||
if (test_pages_isolated(outer_start, end, 0)) {
|
||||
if (test_pages_isolated(outer_start, end, 0, &info->failed_pfn)) {
|
||||
ret = -EBUSY;
|
||||
info->err |= ACR_ERR_TEST;
|
||||
goto done;
|
||||
|
||||
@@ -181,7 +181,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
|
||||
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
|
||||
*/
|
||||
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned migratetype, int flags)
|
||||
unsigned migratetype, int flags,
|
||||
unsigned long *failed_pfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long undo_pfn;
|
||||
@@ -197,6 +198,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
if (page) {
|
||||
if (set_migratetype_isolate(page, migratetype, flags)) {
|
||||
undo_pfn = pfn;
|
||||
if (failed_pfn)
|
||||
*failed_pfn = page_to_pfn(page);
|
||||
goto undo;
|
||||
}
|
||||
}
|
||||
@@ -278,7 +281,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
|
||||
|
||||
/* Caller should ensure that requested range is in a single zone */
|
||||
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
||||
int isol_flags)
|
||||
int isol_flags, unsigned long *failed_pfn)
|
||||
{
|
||||
unsigned long pfn, flags;
|
||||
struct page *page;
|
||||
@@ -297,6 +300,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
||||
}
|
||||
page = __first_valid_page(start_pfn, end_pfn - start_pfn);
|
||||
if ((pfn < end_pfn) || !page) {
|
||||
if (failed_pfn)
|
||||
*failed_pfn = pfn;
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user