mm: memcontrol: rename lruvec_holds_page_lru_lock to page_matches_lruvec
lruvec_holds_page_lru_lock() doesn't check anything about locking and is used to check whether the page belongs to the lruvec. So rename it to page_matches_lruvec(). Link: https://lkml.kernel.org/r/20210417043538.9793-6-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Roman Gushchin <guro@fb.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
f2e4d28dd9
commit
7467c39128
@@ -1492,8 +1492,8 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
|
|||||||
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
|
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool lruvec_holds_page_lru_lock(struct page *page,
|
/* Test requires a stable page->memcg binding, see page_memcg() */
|
||||||
struct lruvec *lruvec)
|
static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
|
||||||
{
|
{
|
||||||
return lruvec_pgdat(lruvec) == page_pgdat(page) &&
|
return lruvec_pgdat(lruvec) == page_pgdat(page) &&
|
||||||
lruvec_memcg(lruvec) == page_memcg(page);
|
lruvec_memcg(lruvec) == page_memcg(page);
|
||||||
@@ -1504,7 +1504,7 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
|
|||||||
struct lruvec *locked_lruvec)
|
struct lruvec *locked_lruvec)
|
||||||
{
|
{
|
||||||
if (locked_lruvec) {
|
if (locked_lruvec) {
|
||||||
if (lruvec_holds_page_lru_lock(page, locked_lruvec))
|
if (page_matches_lruvec(page, locked_lruvec))
|
||||||
return locked_lruvec;
|
return locked_lruvec;
|
||||||
|
|
||||||
unlock_page_lruvec_irq(locked_lruvec);
|
unlock_page_lruvec_irq(locked_lruvec);
|
||||||
@@ -1518,7 +1518,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
|
|||||||
struct lruvec *locked_lruvec, unsigned long *flags)
|
struct lruvec *locked_lruvec, unsigned long *flags)
|
||||||
{
|
{
|
||||||
if (locked_lruvec) {
|
if (locked_lruvec) {
|
||||||
if (lruvec_holds_page_lru_lock(page, locked_lruvec))
|
if (page_matches_lruvec(page, locked_lruvec))
|
||||||
return locked_lruvec;
|
return locked_lruvec;
|
||||||
|
|
||||||
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
|
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
|
||||||
|
|||||||
@@ -2063,7 +2063,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
|
|||||||
* All pages were isolated from the same lruvec (and isolation
|
* All pages were isolated from the same lruvec (and isolation
|
||||||
* inhibits memcg migration).
|
* inhibits memcg migration).
|
||||||
*/
|
*/
|
||||||
VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
|
VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
|
||||||
add_page_to_lru_list(page, lruvec);
|
add_page_to_lru_list(page, lruvec);
|
||||||
nr_pages = thp_nr_pages(page);
|
nr_pages = thp_nr_pages(page);
|
||||||
nr_moved += nr_pages;
|
nr_moved += nr_pages;
|
||||||
|
|||||||
Reference in New Issue
Block a user