FROMLIST: mm: fs: Invalidate BH LRU during page migration

Pages containing buffer_heads that are in one of the per-CPU
buffer_head LRU caches will be pinned and thus cannot be migrated.
This can prevent CMA allocations from succeeding, which are often used
on platforms with co-processors (such as a DSP) that can only use
physically contiguous memory. It can also prevent memory
hot-unplugging from succeeding, which involves migrating at least
MIN_MEMORY_BLOCK_SIZE bytes of memory, which ranges from 8 MiB to 1
GiB based on the architecture in use.

Correspondingly, invalidate the BH LRU caches before a migration
starts and stop any buffer_head from being cached in the LRU caches,
until migration has finished.

Bug: 180018981
Link: https://lore.kernel.org/linux-mm/20210319175127.886124-3-minchan@kernel.org/
Tested-by: Oliver Sang <oliver.sang@intel.com>
Reported-by: kernel test robot <oliver.sang@intel.com>
Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: Idb8279cb561812f5f1b43ddbb742c1808700754e
This commit is contained in:
Minchan Kim
2021-03-19 12:49:41 -07:00
committed by Suren Baghdasaryan
parent c6bc1396ce
commit a0a0b3f42e
3 changed files with 38 additions and 7 deletions

View File

@@ -1265,6 +1265,15 @@ static void bh_lru_install(struct buffer_head *bh)
int i; int i;
check_irqs_on(); check_irqs_on();
/*
* the refcount of buffer_head in bh_lru prevents dropping the
* attached page(i.e., try_to_free_buffers) so it could cause
* failing page migration.
* Skip putting upcoming bh into bh_lru until migration is done.
*/
if (lru_cache_disabled())
return;
bh_lru_lock(); bh_lru_lock();
b = this_cpu_ptr(&bh_lrus); b = this_cpu_ptr(&bh_lrus);
@@ -1405,6 +1414,15 @@ __bread_gfp(struct block_device *bdev, sector_t block,
} }
EXPORT_SYMBOL(__bread_gfp); EXPORT_SYMBOL(__bread_gfp);
static void __invalidate_bh_lrus(struct bh_lru *b)
{
int i;
for (i = 0; i < BH_LRU_SIZE; i++) {
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
}
/* /*
* invalidate_bh_lrus() is called rarely - but not only at unmount. * invalidate_bh_lrus() is called rarely - but not only at unmount.
* This doesn't race because it runs in each cpu either in irq * This doesn't race because it runs in each cpu either in irq
@@ -1413,16 +1431,12 @@ EXPORT_SYMBOL(__bread_gfp);
static void invalidate_bh_lru(void *arg) static void invalidate_bh_lru(void *arg)
{ {
struct bh_lru *b = &get_cpu_var(bh_lrus); struct bh_lru *b = &get_cpu_var(bh_lrus);
int i;
for (i = 0; i < BH_LRU_SIZE; i++) { __invalidate_bh_lrus(b);
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
put_cpu_var(bh_lrus); put_cpu_var(bh_lrus);
} }
static bool has_bh_in_lru(int cpu, void *dummy) bool has_bh_in_lru(int cpu, void *dummy)
{ {
struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
int i; int i;
@@ -1484,6 +1498,16 @@ static void evict_bh_lrus(struct xarray *busy_bhs)
busy_bhs, 1); busy_bhs, 1);
} }
void invalidate_bh_lrus_cpu(int cpu)
{
struct bh_lru *b;
bh_lru_lock();
b = per_cpu_ptr(&bh_lrus, cpu);
__invalidate_bh_lrus(b);
bh_lru_unlock();
}
void set_bh_page(struct buffer_head *bh, void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset) struct page *page, unsigned long offset)
{ {

View File

@@ -194,6 +194,8 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
struct buffer_head *__bread_gfp(struct block_device *, struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp); sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void); void invalidate_bh_lrus(void);
void invalidate_bh_lrus_cpu(int cpu);
bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh); void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh); void unlock_buffer(struct buffer_head *bh);
@@ -406,6 +408,8 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {} static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
static inline void invalidate_bh_lrus_cpu(int cpu) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
#define buffer_heads_over_limit 0 #define buffer_heads_over_limit 0
#endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLOCK */

View File

@@ -36,6 +36,7 @@
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/page_idle.h> #include <linux/page_idle.h>
#include <linux/local_lock.h> #include <linux/local_lock.h>
#include <linux/buffer_head.h>
#include "internal.h" #include "internal.h"
@@ -665,6 +666,7 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
activate_page_drain(cpu); activate_page_drain(cpu);
invalidate_bh_lrus_cpu(cpu);
} }
/** /**
@@ -852,7 +854,8 @@ inline void __lru_add_drain_all(bool force_all_cpus)
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
need_activate_page_drain(cpu)) { need_activate_page_drain(cpu) ||
has_bh_in_lru(cpu, NULL)) {
INIT_WORK(work, lru_add_drain_per_cpu); INIT_WORK(work, lru_add_drain_per_cpu);
queue_work_on(cpu, mm_percpu_wq, work); queue_work_on(cpu, mm_percpu_wq, work);
__cpumask_set_cpu(cpu, &has_work); __cpumask_set_cpu(cpu, &has_work);