ANDROID: mm: avoid using vmacache in lockless vma search
When searching vma under RCU protection vmcache should be avoided because a race with munmap() might result in finding a vma and placing it into vmcache after munmap() removed that vma and called vmcache_invalidate. Once that vma is freed, vmcache will be left with an invalid vma pointer. Bug: 257443051 Change-Id: I62438305fcf5139974f4f7d3bae5b22c74084a59 Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
@@ -2734,6 +2734,8 @@ extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
|
|||||||
#define expand_upwards(vma, address) (0)
|
#define expand_upwards(vma, address) (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern struct vm_area_struct *find_vma_from_tree(struct mm_struct *mm,
|
||||||
|
unsigned long addr);
|
||||||
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
||||||
extern struct vm_area_struct * __find_vma(struct mm_struct * mm, unsigned long addr);
|
extern struct vm_area_struct * __find_vma(struct mm_struct * mm, unsigned long addr);
|
||||||
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
|
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
|
||||||
|
|||||||
@@ -216,7 +216,7 @@ struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
|
|||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
vma = __find_vma(mm, addr);
|
vma = find_vma_from_tree(mm, addr);
|
||||||
if (vma) {
|
if (vma) {
|
||||||
if (vma->vm_start > addr ||
|
if (vma->vm_start > addr ||
|
||||||
!atomic_inc_unless_negative(&vma->file_ref_count))
|
!atomic_inc_unless_negative(&vma->file_ref_count))
|
||||||
|
|||||||
25
mm/mmap.c
25
mm/mmap.c
@@ -2279,16 +2279,10 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
|||||||
|
|
||||||
EXPORT_SYMBOL(get_unmapped_area);
|
EXPORT_SYMBOL(get_unmapped_area);
|
||||||
|
|
||||||
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
struct vm_area_struct *find_vma_from_tree(struct mm_struct *mm, unsigned long addr)
|
||||||
struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
|
|
||||||
{
|
{
|
||||||
struct rb_node *rb_node;
|
struct rb_node *rb_node;
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma = NULL;
|
||||||
|
|
||||||
/* Check the cache first. */
|
|
||||||
vma = vmacache_find(mm, addr);
|
|
||||||
if (likely(vma))
|
|
||||||
return vma;
|
|
||||||
|
|
||||||
rb_node = mm->mm_rb.rb_node;
|
rb_node = mm->mm_rb.rb_node;
|
||||||
|
|
||||||
@@ -2306,6 +2300,21 @@ struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
|
|||||||
rb_node = rb_node->rb_right;
|
rb_node = rb_node->rb_right;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return vma;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
||||||
|
struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
|
/* Check the cache first. */
|
||||||
|
vma = vmacache_find(mm, addr);
|
||||||
|
if (likely(vma))
|
||||||
|
return vma;
|
||||||
|
|
||||||
|
vma = find_vma_from_tree(mm, addr);
|
||||||
|
|
||||||
if (vma)
|
if (vma)
|
||||||
vmacache_update(addr, vma);
|
vmacache_update(addr, vma);
|
||||||
return vma;
|
return vma;
|
||||||
|
|||||||
33
mm/nommu.c
33
mm/nommu.c
@@ -659,6 +659,22 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
|
|||||||
vm_area_free(vma);
|
vm_area_free(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct vm_area_struct *find_vma_from_tree(struct mm_struct *mm, unsigned long addr)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
|
/* trawl the list (there may be multiple mappings in which addr
|
||||||
|
* resides) */
|
||||||
|
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||||
|
if (vma->vm_start > addr)
|
||||||
|
return NULL;
|
||||||
|
if (vma->vm_end > addr)
|
||||||
|
return vma;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* look up the first VMA in which addr resides, NULL if none
|
* look up the first VMA in which addr resides, NULL if none
|
||||||
* - should be called with mm->mmap_lock at least held readlocked
|
* - should be called with mm->mmap_lock at least held readlocked
|
||||||
@@ -667,23 +683,16 @@ struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
|
|||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
/* check the cache first */
|
/* Check the cache first. */
|
||||||
vma = vmacache_find(mm, addr);
|
vma = vmacache_find(mm, addr);
|
||||||
if (likely(vma))
|
if (likely(vma))
|
||||||
return vma;
|
return vma;
|
||||||
|
|
||||||
/* trawl the list (there may be multiple mappings in which addr
|
vma = find_vma_from_tree(mm, addr);
|
||||||
* resides) */
|
|
||||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
||||||
if (vma->vm_start > addr)
|
|
||||||
return NULL;
|
|
||||||
if (vma->vm_end > addr) {
|
|
||||||
vmacache_update(addr, vma);
|
|
||||||
return vma;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
if (vma)
|
||||||
|
vmacache_update(addr, vma);
|
||||||
|
return vma;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__find_vma);
|
EXPORT_SYMBOL(__find_vma);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user