Merge commit c288d9cd71 ("Merge tag 'for-5.14/io_uring-2021-06-30' of git://git.kernel.dk/linux-block") into android-mainline
Another small step en route to v5.14-rc1 Change-Id: I24899ab78da7d367574ed69ceaa82ab0837d9556 Signed-off-by: Lee Jones <lee.jones@linaro.org>
This commit is contained in:
@@ -46,7 +46,7 @@ extern int sysctl_page_lock_unfairness;
|
||||
|
||||
void init_mm_internals(void);
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
|
||||
#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
|
||||
extern unsigned long max_mapnr;
|
||||
|
||||
static inline void set_max_mapnr(unsigned long limit)
|
||||
@@ -124,16 +124,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
|
||||
#define lm_alias(x) __va(__pa_symbol(x))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* With CONFIG_CFI_CLANG, the compiler replaces function addresses in
|
||||
* instrumented C code with jump table addresses. Architectures that
|
||||
* support CFI can define this macro to return the actual function address
|
||||
* when needed.
|
||||
*/
|
||||
#ifndef function_nocfi
|
||||
#define function_nocfi(x) (x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* To prevent common memory management code establishing
|
||||
* a zero page mapping on a read fault.
|
||||
@@ -234,7 +224,11 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
|
||||
int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||
pgoff_t index, gfp_t gfp, void **shadowp);
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
||||
#else
|
||||
#define nth_page(page,n) ((page) + (n))
|
||||
#endif
|
||||
|
||||
/* to align the pointer to the (next) page boundary */
|
||||
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
|
||||
@@ -1341,7 +1335,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
|
||||
if (!is_cow_mapping(vma->vm_flags))
|
||||
return false;
|
||||
|
||||
if (!atomic_read(&vma->vm_mm->has_pinned))
|
||||
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
|
||||
return false;
|
||||
|
||||
return page_maybe_dma_pinned(page);
|
||||
@@ -1668,10 +1662,11 @@ struct address_space *page_mapping(struct page *page);
|
||||
static inline bool page_is_pfmemalloc(const struct page *page)
|
||||
{
|
||||
/*
|
||||
* Page index cannot be this large so this must be
|
||||
* a pfmemalloc page.
|
||||
* lru.next has bit 1 set if the page is allocated from the
|
||||
* pfmemalloc reserves. Callers may simply overwrite it if
|
||||
* they do not need to preserve that information.
|
||||
*/
|
||||
return page->index == -1UL;
|
||||
return (uintptr_t)page->lru.next & BIT(1);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1680,12 +1675,12 @@ static inline bool page_is_pfmemalloc(const struct page *page)
|
||||
*/
|
||||
static inline void set_page_pfmemalloc(struct page *page)
|
||||
{
|
||||
page->index = -1UL;
|
||||
page->lru.next = (void *)BIT(1);
|
||||
}
|
||||
|
||||
static inline void clear_page_pfmemalloc(struct page *page)
|
||||
{
|
||||
page->index = 0;
|
||||
page->lru.next = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1709,8 +1704,8 @@ extern bool can_do_mlock(void);
|
||||
#else
|
||||
static inline bool can_do_mlock(void) { return false; }
|
||||
#endif
|
||||
extern int user_shm_lock(size_t, struct user_struct *);
|
||||
extern void user_shm_unlock(size_t, struct user_struct *);
|
||||
extern int user_shm_lock(size_t, struct ucounts *);
|
||||
extern void user_shm_unlock(size_t, struct ucounts *);
|
||||
|
||||
/*
|
||||
* Parameter block passed down to zap_pte_range in exceptional cases.
|
||||
@@ -1850,12 +1845,8 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
|
||||
extern void do_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
|
||||
void __set_page_dirty(struct page *, struct address_space *, int warn);
|
||||
int __set_page_dirty_nobuffers(struct page *page);
|
||||
int __set_page_dirty_no_writeback(struct page *page);
|
||||
int redirty_page_for_writepage(struct writeback_control *wbc,
|
||||
struct page *page);
|
||||
void account_page_dirtied(struct page *page, struct address_space *mapping);
|
||||
void account_page_cleaned(struct page *page, struct address_space *mapping,
|
||||
struct bdi_writeback *wb);
|
||||
int set_page_dirty(struct page *page);
|
||||
@@ -2421,7 +2412,7 @@ static inline unsigned long free_initmem_default(int poison)
|
||||
extern char __init_begin[], __init_end[];
|
||||
|
||||
return free_reserved_area(&__init_begin, &__init_end,
|
||||
poison, "unused kernel");
|
||||
poison, "unused kernel image (initmem)");
|
||||
}
|
||||
|
||||
static inline unsigned long get_num_physpages(void)
|
||||
@@ -2461,7 +2452,7 @@ extern void get_pfn_range_for_nid(unsigned int nid,
|
||||
unsigned long *start_pfn, unsigned long *end_pfn);
|
||||
extern unsigned long find_min_pfn_with_active_regions(void);
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
#ifndef CONFIG_NUMA
|
||||
static inline int early_pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
return 0;
|
||||
@@ -2475,7 +2466,6 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||
extern void memmap_init_range(unsigned long, int, unsigned long,
|
||||
unsigned long, unsigned long, enum meminit_context,
|
||||
struct vmem_altmap *, int migratetype);
|
||||
extern void memmap_init_zone(struct zone *zone);
|
||||
extern void setup_per_zone_wmarks(void);
|
||||
extern int __meminit init_per_zone_wmark_min(void);
|
||||
extern void mem_init(void);
|
||||
@@ -2682,17 +2672,45 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
|
||||
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
|
||||
struct vm_area_struct **pprev);
|
||||
|
||||
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
|
||||
NULL if none. Assume start_addr < end_addr. */
|
||||
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
|
||||
/**
|
||||
* find_vma_intersection() - Look up the first VMA which intersects the interval
|
||||
* @mm: The process address space.
|
||||
* @start_addr: The inclusive start user address.
|
||||
* @end_addr: The exclusive end user address.
|
||||
*
|
||||
* Returns: The first VMA within the provided range, %NULL otherwise. Assumes
|
||||
* start_addr < end_addr.
|
||||
*/
|
||||
static inline
|
||||
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
|
||||
unsigned long start_addr,
|
||||
unsigned long end_addr)
|
||||
{
|
||||
struct vm_area_struct * vma = find_vma(mm,start_addr);
|
||||
struct vm_area_struct *vma = find_vma(mm, start_addr);
|
||||
|
||||
if (vma && end_addr <= vma->vm_start)
|
||||
vma = NULL;
|
||||
return vma;
|
||||
}
|
||||
|
||||
/**
|
||||
* vma_lookup() - Find a VMA at a specific address
|
||||
* @mm: The process address space.
|
||||
* @addr: The user address.
|
||||
*
|
||||
* Return: The vm_area_struct at the given address, %NULL otherwise.
|
||||
*/
|
||||
static inline
|
||||
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
struct vm_area_struct *vma = find_vma(mm, addr);
|
||||
|
||||
if (vma && addr < vma->vm_start)
|
||||
vma = NULL;
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long vm_start = vma->vm_start;
|
||||
|
||||
Reference in New Issue
Block a user