mm/huge_memory.c: remove dedicated macro HPAGE_CACHE_INDEX_MASK
[ Upstream commit b2bd53f18b ]
Patch series "Cleanup and fixup for huge_memory:, v3.
This series contains cleanups to remove dedicated macro and remove
unnecessary tlb_remove_page_size() for huge zero pmd. Also this adds
missing read-only THP checking for transparent_hugepage_enabled() and
avoids discarding hugepage if other processes are mapping it. More
details can be found in the respective changelogs.
Thi patch (of 5):
Rewrite the pgoff checking logic to remove macro HPAGE_CACHE_INDEX_MASK
which is only used here to simplify the code.
Link: https://lkml.kernel.org/r/20210511134857.1581273-1-linmiaohe@huawei.com
Link: https://lkml.kernel.org/r/20210511134857.1581273-2-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
9b0b9edea1
commit
aa41f7a2a6
@@ -161,15 +161,13 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
|
|||||||
|
|
||||||
bool transparent_hugepage_enabled(struct vm_area_struct *vma);
|
bool transparent_hugepage_enabled(struct vm_area_struct *vma);
|
||||||
|
|
||||||
#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
|
|
||||||
|
|
||||||
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
|
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
|
||||||
unsigned long haddr)
|
unsigned long haddr)
|
||||||
{
|
{
|
||||||
/* Don't have to check pgoff for anonymous vma */
|
/* Don't have to check pgoff for anonymous vma */
|
||||||
if (!vma_is_anonymous(vma)) {
|
if (!vma_is_anonymous(vma)) {
|
||||||
if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
|
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
|
||||||
(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
|
HPAGE_PMD_NR))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user