BACKPORT: mm: khugepaged: recalculate min_free_kbytes after stopping khugepaged

When initializing transparent huge pages, min_free_kbytes would be
calculated according to what khugepaged expected.

So when transparent huge pages get disabled, min_free_kbytes should be
recalculated instead of the higher value set by khugepaged.

Link: https://lkml.kernel.org/r/1633937809-16558-1-git-send-email-liangcaifan19@gmail.com
Signed-off-by: Liangcai Fan <liangcaifan19@gmail.com>
Signed-off-by: Chunyan Zhang <zhang.lyra@gmail.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

(cherry picked from commit bd3400ea173fb611cdf2030d03620185ff6c0b0e)

Bug: 235523176
Signed-off-by: Chinwen Chang <chinwen.chang@mediatek.com>
Change-Id: I815893d25186847933db2a0872528fb15a00b3c8
This commit is contained in:
Liangcai Fan
2021-11-05 13:41:36 -07:00
committed by Carlos Llamas
parent 62bb81afc5
commit bf46e6f5db
3 changed files with 15 additions and 3 deletions

View File

@@ -2520,6 +2520,7 @@ extern void memmap_init_range(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context, unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype); struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void); extern void setup_per_zone_wmarks(void);
extern void calculate_min_free_kbytes(void);
extern int __meminit init_per_zone_wmark_min(void); extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void); extern void mem_init(void);
extern void __init mmap_init(void); extern void __init mmap_init(void);

View File

@@ -2299,6 +2299,11 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0; int nr_zones = 0;
unsigned long recommended_min; unsigned long recommended_min;
if (!khugepaged_enabled()) {
calculate_min_free_kbytes();
goto update_wmarks;
}
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
/* /*
* We don't need to worry about fragmentation of * We don't need to worry about fragmentation of
@@ -2334,6 +2339,8 @@ static void set_recommended_min_free_kbytes(void)
min_free_kbytes = recommended_min; min_free_kbytes = recommended_min;
} }
update_wmarks:
setup_per_zone_wmarks(); setup_per_zone_wmarks();
} }
@@ -2355,12 +2362,11 @@ int start_stop_khugepaged(void)
if (!list_empty(&khugepaged_scan.mm_head)) if (!list_empty(&khugepaged_scan.mm_head))
wake_up_interruptible(&khugepaged_wait); wake_up_interruptible(&khugepaged_wait);
set_recommended_min_free_kbytes();
} else if (khugepaged_thread) { } else if (khugepaged_thread) {
kthread_stop(khugepaged_thread); kthread_stop(khugepaged_thread);
khugepaged_thread = NULL; khugepaged_thread = NULL;
} }
set_recommended_min_free_kbytes();
fail: fail:
mutex_unlock(&khugepaged_mutex); mutex_unlock(&khugepaged_mutex);
return err; return err;

View File

@@ -8594,7 +8594,7 @@ void setup_per_zone_wmarks(void)
* 8192MB: 11584k * 8192MB: 11584k
* 16384MB: 16384k * 16384MB: 16384k
*/ */
int __meminit init_per_zone_wmark_min(void) void calculate_min_free_kbytes(void)
{ {
unsigned long lowmem_kbytes; unsigned long lowmem_kbytes;
int new_min_free_kbytes; int new_min_free_kbytes;
@@ -8612,6 +8612,11 @@ int __meminit init_per_zone_wmark_min(void)
pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
new_min_free_kbytes, user_min_free_kbytes); new_min_free_kbytes, user_min_free_kbytes);
} }
}
int __meminit init_per_zone_wmark_min(void)
{
calculate_min_free_kbytes();
setup_per_zone_wmarks(); setup_per_zone_wmarks();
refresh_zone_stat_thresholds(); refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve(); setup_per_zone_lowmem_reserve();