From 60d2dad38e3dce7359b318c436dd06e5a55da15a Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 29 Jun 2021 12:08:44 -0700 Subject: [PATCH] ANDROID: mm: cma: skip problematic pageblock alloc_contig_range is supposed to work on max(MAX_ORDER_NR_PAGES, or pageblock_nr_pages) granularity aligned range. If it fails at a page and return error to user, user doesn't know what page makes the allocation failure and keep retrying another allocation with new range including the failed page and encountered error again and again until it could escape the out of the granularity block. Instead, let's make CMA aware of what pfn was troubled in previous trial and then continue to work new pageblock out of the failed page so it doesn't see the repeated error repeatedly. Currently, this option works for only __GFP_NORETRY case for safe for existing CMA users. Bug: 192475091 Signed-off-by: Minchan Kim Change-Id: I0959c9df3d4b36408a68920abbb4d52d31026079 Signed-off-by: Richard Chang (cherry picked from commit 0e688e972d7efc9fba8b5050d43245ab904c21a4) --- include/linux/gfp.h | 3 +++ include/linux/page-isolation.h | 5 +++-- mm/cma.c | 12 ++++++++++-- mm/memory_hotplug.c | 4 ++-- mm/page_alloc.c | 13 ++++++++++--- mm/page_isolation.c | 9 +++++++-- 6 files changed, 35 insertions(+), 11 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index ba820707464a..b86ae05b4282 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -693,6 +693,8 @@ static inline bool pm_suspended_storage(void) #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_CONTIG_ALLOC +extern unsigned long pfn_max_align_up(unsigned long pfn); + #define ACR_ERR_ISOLATE (1 << 0) #define ACR_ERR_MIGRATE (1 << 1) #define ACR_ERR_TEST (1 << 2) @@ -702,6 +704,7 @@ struct acr_info { unsigned long nr_migrated; unsigned long nr_reclaimed; unsigned int err; + unsigned long failed_pfn; }; /* The below functions must be run on a range from a single zone. */ diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 572458016331..ccd3ed46434f 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -44,7 +44,8 @@ int move_freepages_block(struct zone *zone, struct page *page, */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - unsigned migratetype, int flags); + unsigned migratetype, int flags, + unsigned long *failed_pfn); /* * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. @@ -58,7 +59,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, * Test all pages in [start_pfn, end_pfn) are isolated or not. */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, - int isol_flags); + int isol_flags, unsigned long *failed_pfn); struct page *alloc_migrate_target(struct page *page, unsigned long private); diff --git a/mm/cma.c b/mm/cma.c index 851677a7dcfa..3fddb38be598 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -535,8 +535,16 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), count, align); - /* try again with a bit different memory target */ - start = bitmap_no + mask + 1; + + if (info.failed_pfn && gfp_mask & __GFP_NORETRY) { + /* try again from following failed page */ + start = (pfn_max_align_up(info.failed_pfn + 1) - + cma->base_pfn) >> cma->order_per_bit; + + } else { + /* try again with a bit different memory target */ + start = bitmap_no + mask + 1; + } } trace_cma_alloc_finish(cma->name, pfn, page, count, align); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 4b9bd6ffaeed..ddb82d1082a2 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1967,7 +1967,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE, - MEMORY_OFFLINE | REPORT_FAILURE); + MEMORY_OFFLINE | REPORT_FAILURE, NULL); if (ret) { reason = "failure to isolate range"; goto failed_removal_pcplists_disabled; @@ -2021,7 +2021,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, goto failed_removal_isolated; } - ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); + ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE, NULL); } while (ret); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d122a59b85fe..78b69ca44311 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -9111,7 +9111,7 @@ static unsigned long pfn_max_align_down(unsigned long pfn) pageblock_nr_pages) - 1); } -static unsigned long pfn_max_align_up(unsigned long pfn) +unsigned long pfn_max_align_up(unsigned long pfn) { return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, pageblock_nr_pages)); @@ -9213,6 +9213,12 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, page_pinner_failure_detect(page); } } + + if (!list_empty(&cc->migratepages)) { + page = list_first_entry(&cc->migratepages, struct page , lru); + info->failed_pfn = page_to_pfn(page); + } + putback_movable_pages(&cc->migratepages); info->err |= ACR_ERR_MIGRATE; return ret; @@ -9287,7 +9293,8 @@ int alloc_contig_range(unsigned long start, unsigned long end, */ ret = start_isolate_page_range(pfn_max_align_down(start), - pfn_max_align_up(end), migratetype, 0); + pfn_max_align_up(end), migratetype, 0, + &info->failed_pfn); if (ret) { info->err |= ACR_ERR_ISOLATE; return ret; @@ -9354,7 +9361,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, } /* Make sure the range is really isolated. */ - if (test_pages_isolated(outer_start, end, 0)) { + if (test_pages_isolated(outer_start, end, 0, &info->failed_pfn)) { ret = -EBUSY; info->err |= ACR_ERR_TEST; goto done; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 31fe2e6e2eb1..c142c4eb327b 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -181,7 +181,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * Return: 0 on success and -EBUSY if any part of range cannot be isolated. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - unsigned migratetype, int flags) + unsigned migratetype, int flags, + unsigned long *failed_pfn) { unsigned long pfn; unsigned long undo_pfn; @@ -197,6 +198,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, if (page) { if (set_migratetype_isolate(page, migratetype, flags)) { undo_pfn = pfn; + if (failed_pfn) + *failed_pfn = page_to_pfn(page); goto undo; } } @@ -278,7 +281,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, /* Caller should ensure that requested range is in a single zone */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, - int isol_flags) + int isol_flags, unsigned long *failed_pfn) { unsigned long pfn, flags; struct page *page; @@ -297,6 +300,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, } page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) { + if (failed_pfn) + *failed_pfn = pfn; ret = -EBUSY; goto out; }