ANDROID: add vma->file_ref_count to synchronize vma->vm_file destruction
In order to prevent destruction of vma->vm_file while it's being used during speculative page fault handling, introduce an atomic refcounter. Bug: 234527424 Signed-off-by: Suren Baghdasaryan <surenb@google.com> Change-Id: I0e971156f3e76feb45136bac1582a7eaab8c75df
This commit is contained in:
committed by
Carlos Llamas
parent
0864756fb0
commit
4daa3c254e
@@ -679,6 +679,9 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
|
|||||||
memset(vma, 0, sizeof(*vma));
|
memset(vma, 0, sizeof(*vma));
|
||||||
vma->vm_mm = mm;
|
vma->vm_mm = mm;
|
||||||
vma->vm_ops = &dummy_vm_ops;
|
vma->vm_ops = &dummy_vm_ops;
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
atomic_set(&vma->file_ref_count, 1);
|
||||||
|
#endif
|
||||||
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3377,6 +3380,18 @@ static inline bool pte_spinlock(struct vm_fault *vmf)
|
|||||||
return __pte_map_lock(vmf);
|
return __pte_map_lock(vmf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool vma_get_file_ref(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
return atomic_inc_not_zero(&vma->file_ref_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern void fput(struct file *);
|
||||||
|
static inline void vma_put_file_ref(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
if (vma && atomic_dec_and_test(&vma->file_ref_count))
|
||||||
|
fput(vma->vm_file);
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
|
#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
|
||||||
#define pte_map_lock(___vmf) \
|
#define pte_map_lock(___vmf) \
|
||||||
|
|||||||
@@ -417,6 +417,9 @@ struct vm_area_struct {
|
|||||||
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
|
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
|
||||||
#endif
|
#endif
|
||||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
|
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
atomic_t file_ref_count;
|
||||||
|
#endif
|
||||||
} __randomize_layout;
|
} __randomize_layout;
|
||||||
|
|
||||||
struct core_thread {
|
struct core_thread {
|
||||||
|
|||||||
@@ -383,8 +383,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
|
|||||||
|
|
||||||
static inline void ____vm_area_free(struct vm_area_struct *vma)
|
static inline void ____vm_area_free(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
if (vma->vm_file)
|
|
||||||
fput(vma->vm_file);
|
|
||||||
kmem_cache_free(vm_area_cachep, vma);
|
kmem_cache_free(vm_area_cachep, vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -402,10 +400,15 @@ void vm_area_free(struct vm_area_struct *vma)
|
|||||||
free_anon_vma_name(vma);
|
free_anon_vma_name(vma);
|
||||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
if (atomic_read(&vma->vm_mm->mm_users) > 1) {
|
if (atomic_read(&vma->vm_mm->mm_users) > 1) {
|
||||||
|
if (vma->vm_file)
|
||||||
|
vma_put_file_ref(vma);
|
||||||
|
|
||||||
call_rcu(&vma->vm_rcu, __vm_area_free);
|
call_rcu(&vma->vm_rcu, __vm_area_free);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
if (vma->vm_file)
|
||||||
|
fput(vma->vm_file);
|
||||||
____vm_area_free(vma);
|
____vm_area_free(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user