Revert "ANDROID: mm: add a field to store names for private anonymous memory"
This reverts commit 60500a4228.
Replacing out-of-tree implementation with the upstream one.
Bug: 120441514
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: Ic34c8e16d51ccf9f00cb59d2de341e911bcb2828
This commit is contained in:
@@ -431,8 +431,6 @@ is not associated with a file:
|
||||
[stack] the stack of the main process
|
||||
[vdso] the "virtual dynamic shared object",
|
||||
the kernel system call handler
|
||||
[anon:<name>] an anonymous mapping that has been
|
||||
named by userspace
|
||||
======= ====================================
|
||||
|
||||
or if empty, the mapping is anonymous.
|
||||
@@ -466,7 +464,6 @@ Memory Area, or VMA) there is a series of lines such as the following::
|
||||
Locked: 0 kB
|
||||
THPeligible: 0
|
||||
VmFlags: rd ex mr mw me dw
|
||||
Name: name from userspace
|
||||
|
||||
The first of these lines shows the same information as is displayed for the
|
||||
mapping in /proc/PID/maps. Following lines show the size of the mapping
|
||||
@@ -564,9 +561,6 @@ be vanished or the reverse -- new added. Interpretation of their meaning
|
||||
might change in future as well. So each consumer of these flags has to
|
||||
follow each specific kernel version for the exact semantic.
|
||||
|
||||
The "Name" field will only be present on a mapping that has been named by
|
||||
userspace, and will show the name passed in by userspace.
|
||||
|
||||
This file is only present if the CONFIG_MMU kernel configuration option is
|
||||
enabled.
|
||||
|
||||
|
||||
@@ -123,56 +123,6 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
|
||||
{
|
||||
const char __user *name = vma_get_anon_name(vma);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
unsigned long page_start_vaddr;
|
||||
unsigned long page_offset;
|
||||
unsigned long num_pages;
|
||||
unsigned long max_len = NAME_MAX;
|
||||
int i;
|
||||
|
||||
page_start_vaddr = (unsigned long)name & PAGE_MASK;
|
||||
page_offset = (unsigned long)name - page_start_vaddr;
|
||||
num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
|
||||
|
||||
seq_puts(m, "[anon:");
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
int len;
|
||||
int write_len;
|
||||
const char *kaddr;
|
||||
long pages_pinned;
|
||||
struct page *page;
|
||||
|
||||
pages_pinned = get_user_pages_remote(current, mm,
|
||||
page_start_vaddr, 1, 0, &page, NULL, NULL);
|
||||
if (pages_pinned < 1) {
|
||||
seq_puts(m, "<fault>]");
|
||||
return;
|
||||
}
|
||||
|
||||
kaddr = (const char *)kmap(page);
|
||||
len = min(max_len, PAGE_SIZE - page_offset);
|
||||
write_len = strnlen(kaddr + page_offset, len);
|
||||
seq_write(m, kaddr + page_offset, write_len);
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
|
||||
/* if strnlen hit a null terminator then we're done */
|
||||
if (write_len != len)
|
||||
break;
|
||||
|
||||
max_len -= len;
|
||||
page_offset = 0;
|
||||
page_start_vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
seq_putc(m, ']');
|
||||
}
|
||||
|
||||
static void *m_start(struct seq_file *m, loff_t *ppos)
|
||||
{
|
||||
struct proc_maps_private *priv = m->private;
|
||||
@@ -369,15 +319,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (is_stack(vma)) {
|
||||
if (is_stack(vma))
|
||||
name = "[stack]";
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (vma_get_anon_name(vma)) {
|
||||
seq_pad(m, ' ');
|
||||
seq_print_vma_name(m, vma);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
@@ -875,11 +818,6 @@ static int show_smap(struct seq_file *m, void *v)
|
||||
smap_gather_stats(vma, &mss, 0);
|
||||
|
||||
show_map_vma(m, vma);
|
||||
if (vma_get_anon_name(vma)) {
|
||||
seq_puts(m, "Name: ");
|
||||
seq_print_vma_name(m, vma);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
|
||||
SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
|
||||
|
||||
@@ -877,8 +877,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
||||
new_flags, vma->anon_vma,
|
||||
vma->vm_file, vma->vm_pgoff,
|
||||
vma_policy(vma),
|
||||
NULL_VM_UFFD_CTX,
|
||||
vma_get_anon_name(vma));
|
||||
NULL_VM_UFFD_CTX);
|
||||
if (prev)
|
||||
vma = prev;
|
||||
else
|
||||
@@ -1437,8 +1436,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
prev = vma_merge(mm, prev, start, vma_end, new_flags,
|
||||
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
|
||||
vma_policy(vma),
|
||||
((struct vm_userfaultfd_ctx){ ctx }),
|
||||
vma_get_anon_name(vma));
|
||||
((struct vm_userfaultfd_ctx){ ctx }));
|
||||
if (prev) {
|
||||
vma = prev;
|
||||
goto next;
|
||||
@@ -1615,8 +1613,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
prev = vma_merge(mm, prev, start, vma_end, new_flags,
|
||||
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
|
||||
vma_policy(vma),
|
||||
NULL_VM_UFFD_CTX,
|
||||
vma_get_anon_name(vma));
|
||||
NULL_VM_UFFD_CTX);
|
||||
if (prev) {
|
||||
vma = prev;
|
||||
goto next;
|
||||
|
||||
@@ -2548,7 +2548,7 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
|
||||
extern struct vm_area_struct *vma_merge(struct mm_struct *,
|
||||
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
|
||||
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
|
||||
struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
|
||||
struct mempolicy *, struct vm_userfaultfd_ctx);
|
||||
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
|
||||
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
|
||||
unsigned long addr, int new_below);
|
||||
|
||||
@@ -350,18 +350,11 @@ struct vm_area_struct {
|
||||
/*
|
||||
* For areas with an address space and backing store,
|
||||
* linkage into the address_space->i_mmap interval tree.
|
||||
*
|
||||
* For private anonymous mappings, a pointer to a null terminated string
|
||||
* in the user process containing the name given to the vma, or NULL
|
||||
* if unnamed.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
struct rb_node rb;
|
||||
unsigned long rb_subtree_last;
|
||||
} shared;
|
||||
const char __user *anon_name;
|
||||
};
|
||||
struct {
|
||||
struct rb_node rb;
|
||||
unsigned long rb_subtree_last;
|
||||
} shared;
|
||||
|
||||
/*
|
||||
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
|
||||
@@ -816,13 +809,4 @@ typedef struct {
|
||||
unsigned long val;
|
||||
} swp_entry_t;
|
||||
|
||||
/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
|
||||
static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_file)
|
||||
return NULL;
|
||||
|
||||
return vma->anon_name;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_MM_TYPES_H */
|
||||
|
||||
@@ -269,7 +269,4 @@ struct prctl_mm_map {
|
||||
# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
|
||||
# define PR_SCHED_CORE_MAX 4
|
||||
|
||||
#define PR_SET_VMA 0x53564d41
|
||||
# define PR_SET_VMA_ANON_NAME 0
|
||||
|
||||
#endif /* _LINUX_PRCTL_H */
|
||||
|
||||
152
kernel/sys.c
152
kernel/sys.c
@@ -42,8 +42,6 @@
|
||||
#include <linux/version.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/syscall_user_dispatch.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mempolicy.h>
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/syscalls.h>
|
||||
@@ -2263,153 +2261,6 @@ int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
|
||||
struct vm_area_struct **prev,
|
||||
unsigned long start, unsigned long end,
|
||||
const char __user *name_addr)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int error = 0;
|
||||
pgoff_t pgoff;
|
||||
|
||||
if (name_addr == vma_get_anon_name(vma)) {
|
||||
*prev = vma;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
||||
*prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
|
||||
vma->vm_file, pgoff, vma_policy(vma),
|
||||
vma->vm_userfaultfd_ctx, name_addr);
|
||||
if (*prev) {
|
||||
vma = *prev;
|
||||
goto success;
|
||||
}
|
||||
|
||||
*prev = vma;
|
||||
|
||||
if (start != vma->vm_start) {
|
||||
error = split_vma(mm, vma, start, 1);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (end != vma->vm_end) {
|
||||
error = split_vma(mm, vma, end, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
success:
|
||||
if (!vma->vm_file)
|
||||
vma->anon_name = name_addr;
|
||||
|
||||
out:
|
||||
if (error == -ENOMEM)
|
||||
error = -EAGAIN;
|
||||
return error;
|
||||
}
|
||||
|
||||
static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
|
||||
unsigned long arg)
|
||||
{
|
||||
unsigned long tmp;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
int unmapped_error = 0;
|
||||
int error = -EINVAL;
|
||||
|
||||
/*
|
||||
* If the interval [start,end) covers some unmapped address
|
||||
* ranges, just ignore them, but return -ENOMEM at the end.
|
||||
* - this matches the handling in madvise.
|
||||
*/
|
||||
vma = find_vma_prev(current->mm, start, &prev);
|
||||
if (vma && start > vma->vm_start)
|
||||
prev = vma;
|
||||
|
||||
for (;;) {
|
||||
/* Still start < end. */
|
||||
error = -ENOMEM;
|
||||
if (!vma)
|
||||
return error;
|
||||
|
||||
/* Here start < (end|vma->vm_end). */
|
||||
if (start < vma->vm_start) {
|
||||
unmapped_error = -ENOMEM;
|
||||
start = vma->vm_start;
|
||||
if (start >= end)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Here vma->vm_start <= start < (end|vma->vm_end) */
|
||||
tmp = vma->vm_end;
|
||||
if (end < tmp)
|
||||
tmp = end;
|
||||
|
||||
/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
|
||||
error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
|
||||
(const char __user *)arg);
|
||||
if (error)
|
||||
return error;
|
||||
start = tmp;
|
||||
if (prev && start < prev->vm_end)
|
||||
start = prev->vm_end;
|
||||
error = unmapped_error;
|
||||
if (start >= end)
|
||||
return error;
|
||||
if (prev)
|
||||
vma = prev->vm_next;
|
||||
else /* madvise_remove dropped mmap_lock */
|
||||
vma = find_vma(current->mm, start);
|
||||
}
|
||||
}
|
||||
|
||||
static int prctl_set_vma(unsigned long opt, unsigned long start,
|
||||
unsigned long len_in, unsigned long arg)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
int error;
|
||||
unsigned long len;
|
||||
unsigned long end;
|
||||
|
||||
if (start & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
len = (len_in + ~PAGE_MASK) & PAGE_MASK;
|
||||
|
||||
/* Check to see whether len was rounded up from small -ve to zero */
|
||||
if (len_in && !len)
|
||||
return -EINVAL;
|
||||
|
||||
end = start + len;
|
||||
if (end < start)
|
||||
return -EINVAL;
|
||||
|
||||
if (end == start)
|
||||
return 0;
|
||||
|
||||
mmap_write_lock(mm);
|
||||
|
||||
switch (opt) {
|
||||
case PR_SET_VMA_ANON_NAME:
|
||||
error = prctl_set_vma_anon_name(start, end, arg);
|
||||
break;
|
||||
default:
|
||||
error = -EINVAL;
|
||||
}
|
||||
|
||||
mmap_write_unlock(mm);
|
||||
|
||||
return error;
|
||||
}
|
||||
#else /* CONFIG_MMU */
|
||||
static int prctl_set_vma(unsigned long opt, unsigned long start,
|
||||
unsigned long len_in, unsigned long arg)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
|
||||
|
||||
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
||||
@@ -2624,9 +2475,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
||||
return -EINVAL;
|
||||
error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
|
||||
break;
|
||||
case PR_SET_VMA:
|
||||
error = prctl_set_vma(arg2, arg3, arg4, arg5);
|
||||
break;
|
||||
case PR_PAC_RESET_KEYS:
|
||||
if (arg3 || arg4 || arg5)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -138,7 +138,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
|
||||
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
||||
*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
|
||||
vma->vm_file, pgoff, vma_policy(vma),
|
||||
vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
|
||||
vma->vm_userfaultfd_ctx);
|
||||
if (*prev) {
|
||||
vma = *prev;
|
||||
goto success;
|
||||
|
||||
@@ -810,8 +810,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
|
||||
((vmstart - vma->vm_start) >> PAGE_SHIFT);
|
||||
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
|
||||
vma->anon_vma, vma->vm_file, pgoff,
|
||||
new_pol, vma->vm_userfaultfd_ctx,
|
||||
vma_get_anon_name(vma));
|
||||
new_pol, vma->vm_userfaultfd_ctx);
|
||||
if (prev) {
|
||||
vma = prev;
|
||||
next = vma->vm_next;
|
||||
|
||||
@@ -511,7 +511,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
|
||||
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
||||
*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
|
||||
vma->vm_file, pgoff, vma_policy(vma),
|
||||
vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
|
||||
vma->vm_userfaultfd_ctx);
|
||||
if (*prev) {
|
||||
vma = *prev;
|
||||
goto success;
|
||||
|
||||
39
mm/mmap.c
39
mm/mmap.c
@@ -1029,8 +1029,7 @@ again:
|
||||
*/
|
||||
static inline int is_mergeable_vma(struct vm_area_struct *vma,
|
||||
struct file *file, unsigned long vm_flags,
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
||||
const char __user *anon_name)
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
|
||||
{
|
||||
/*
|
||||
* VM_SOFTDIRTY should not prevent from VMA merging, if we
|
||||
@@ -1048,8 +1047,6 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
|
||||
return 0;
|
||||
if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
|
||||
return 0;
|
||||
if (vma_get_anon_name(vma) != anon_name)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1082,10 +1079,9 @@ static int
|
||||
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
|
||||
struct anon_vma *anon_vma, struct file *file,
|
||||
pgoff_t vm_pgoff,
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
||||
const char __user *anon_name)
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
|
||||
{
|
||||
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
|
||||
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
|
||||
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
|
||||
if (vma->vm_pgoff == vm_pgoff)
|
||||
return 1;
|
||||
@@ -1104,10 +1100,9 @@ static int
|
||||
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
|
||||
struct anon_vma *anon_vma, struct file *file,
|
||||
pgoff_t vm_pgoff,
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
||||
const char __user *anon_name)
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
|
||||
{
|
||||
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
|
||||
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
|
||||
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
|
||||
pgoff_t vm_pglen;
|
||||
vm_pglen = vma_pages(vma);
|
||||
@@ -1118,9 +1113,9 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
|
||||
* figure out whether that can be merged with its predecessor or its
|
||||
* successor. Or both (it neatly fills a hole).
|
||||
* Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
|
||||
* whether that can be merged with its predecessor or its successor.
|
||||
* Or both (it neatly fills a hole).
|
||||
*
|
||||
* In most cases - when called for mmap, brk or mremap - [addr,end) is
|
||||
* certain not to be mapped by the time vma_merge is called; but when
|
||||
@@ -1165,8 +1160,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||||
unsigned long end, unsigned long vm_flags,
|
||||
struct anon_vma *anon_vma, struct file *file,
|
||||
pgoff_t pgoff, struct mempolicy *policy,
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
|
||||
const char __user *anon_name)
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
|
||||
{
|
||||
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
|
||||
struct vm_area_struct *area, *next;
|
||||
@@ -1196,8 +1190,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||||
mpol_equal(vma_policy(prev), policy) &&
|
||||
can_vma_merge_after(prev, vm_flags,
|
||||
anon_vma, file, pgoff,
|
||||
vm_userfaultfd_ctx,
|
||||
anon_name)) {
|
||||
vm_userfaultfd_ctx)) {
|
||||
/*
|
||||
* OK, it can. Can we now merge in the successor as well?
|
||||
*/
|
||||
@@ -1206,8 +1199,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||||
can_vma_merge_before(next, vm_flags,
|
||||
anon_vma, file,
|
||||
pgoff+pglen,
|
||||
vm_userfaultfd_ctx,
|
||||
anon_name) &&
|
||||
vm_userfaultfd_ctx) &&
|
||||
is_mergeable_anon_vma(prev->anon_vma,
|
||||
next->anon_vma, NULL)) {
|
||||
/* cases 1, 6 */
|
||||
@@ -1230,8 +1222,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||||
mpol_equal(policy, vma_policy(next)) &&
|
||||
can_vma_merge_before(next, vm_flags,
|
||||
anon_vma, file, pgoff+pglen,
|
||||
vm_userfaultfd_ctx,
|
||||
anon_name)) {
|
||||
vm_userfaultfd_ctx)) {
|
||||
if (prev && addr < prev->vm_end) /* case 4 */
|
||||
err = __vma_adjust(prev, prev->vm_start,
|
||||
addr, prev->vm_pgoff, NULL, next);
|
||||
@@ -1764,7 +1755,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
* Can we just expand an old mapping?
|
||||
*/
|
||||
vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
|
||||
NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
|
||||
NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
|
||||
if (vma)
|
||||
goto out;
|
||||
|
||||
@@ -3066,7 +3057,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
|
||||
|
||||
/* Can we just expand an old private anonymous mapping? */
|
||||
vma = vma_merge(mm, prev, addr, addr + len, flags,
|
||||
NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
|
||||
NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
|
||||
if (vma)
|
||||
goto out;
|
||||
|
||||
@@ -3259,7 +3250,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
||||
return NULL; /* should never get here */
|
||||
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
|
||||
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
|
||||
vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
|
||||
vma->vm_userfaultfd_ctx);
|
||||
if (new_vma) {
|
||||
/*
|
||||
* Source vma may have been merged into new_vma
|
||||
|
||||
@@ -464,7 +464,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
||||
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
||||
*pprev = vma_merge(mm, *pprev, start, end, newflags,
|
||||
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
|
||||
vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
|
||||
vma->vm_userfaultfd_ctx);
|
||||
if (*pprev) {
|
||||
vma = *pprev;
|
||||
VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
|
||||
|
||||
Reference in New Issue
Block a user