Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "191 patches. Subsystems affected by this patch series: kthread, ia64, scripts, ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab, slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap, mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization, pagealloc, and memory-failure)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (191 commits) mm,hwpoison: make get_hwpoison_page() call get_any_page() mm,hwpoison: send SIGBUS with error virutal address mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes mm/page_alloc: allow high-order pages to be stored on the per-cpu lists mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA docs: remove description of DISCONTIGMEM arch, mm: remove stale mentions of DISCONIGMEM mm: remove CONFIG_DISCONTIGMEM m68k: remove support for DISCONTIGMEM arc: remove support for DISCONTIGMEM arc: update comment about HIGHMEM implementation alpha: remove DISCONTIGMEM and NUMA mm/page_alloc: move free_the_page mm/page_alloc: fix counting of managed_pages mm/page_alloc: improve memmap_pages dbg msg mm: drop SECTION_SHIFT in code comments mm/page_alloc: introduce vm.percpu_pagelist_high_fraction mm/page_alloc: limit the number of pages on PCP lists when reclaim is active mm/page_alloc: scale the number of pages that are batch freed ...
This commit is contained in:
@@ -46,7 +46,7 @@ extern int sysctl_page_lock_unfairness;
|
||||
|
||||
void init_mm_internals(void);
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
|
||||
#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
|
||||
extern unsigned long max_mapnr;
|
||||
|
||||
static inline void set_max_mapnr(unsigned long limit)
|
||||
@@ -234,7 +234,11 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
|
||||
int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||
pgoff_t index, gfp_t gfp, void **shadowp);
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
||||
#else
|
||||
#define nth_page(page,n) ((page) + (n))
|
||||
#endif
|
||||
|
||||
/* to align the pointer to the (next) page boundary */
|
||||
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
|
||||
@@ -1341,7 +1345,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
|
||||
if (!is_cow_mapping(vma->vm_flags))
|
||||
return false;
|
||||
|
||||
if (!atomic_read(&vma->vm_mm->has_pinned))
|
||||
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
|
||||
return false;
|
||||
|
||||
return page_maybe_dma_pinned(page);
|
||||
@@ -1850,12 +1854,8 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
|
||||
extern void do_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
|
||||
void __set_page_dirty(struct page *, struct address_space *, int warn);
|
||||
int __set_page_dirty_nobuffers(struct page *page);
|
||||
int __set_page_dirty_no_writeback(struct page *page);
|
||||
int redirty_page_for_writepage(struct writeback_control *wbc,
|
||||
struct page *page);
|
||||
void account_page_dirtied(struct page *page, struct address_space *mapping);
|
||||
void account_page_cleaned(struct page *page, struct address_space *mapping,
|
||||
struct bdi_writeback *wb);
|
||||
int set_page_dirty(struct page *page);
|
||||
@@ -2420,7 +2420,7 @@ static inline unsigned long free_initmem_default(int poison)
|
||||
extern char __init_begin[], __init_end[];
|
||||
|
||||
return free_reserved_area(&__init_begin, &__init_end,
|
||||
poison, "unused kernel");
|
||||
poison, "unused kernel image (initmem)");
|
||||
}
|
||||
|
||||
static inline unsigned long get_num_physpages(void)
|
||||
@@ -2460,7 +2460,7 @@ extern void get_pfn_range_for_nid(unsigned int nid,
|
||||
unsigned long *start_pfn, unsigned long *end_pfn);
|
||||
extern unsigned long find_min_pfn_with_active_regions(void);
|
||||
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
#ifndef CONFIG_NUMA
|
||||
static inline int early_pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
return 0;
|
||||
@@ -2474,7 +2474,6 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||
extern void memmap_init_range(unsigned long, int, unsigned long,
|
||||
unsigned long, unsigned long, enum meminit_context,
|
||||
struct vmem_altmap *, int migratetype);
|
||||
extern void memmap_init_zone(struct zone *zone);
|
||||
extern void setup_per_zone_wmarks(void);
|
||||
extern int __meminit init_per_zone_wmark_min(void);
|
||||
extern void mem_init(void);
|
||||
@@ -2681,17 +2680,45 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
|
||||
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
|
||||
struct vm_area_struct **pprev);
|
||||
|
||||
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
|
||||
NULL if none. Assume start_addr < end_addr. */
|
||||
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
|
||||
/**
|
||||
* find_vma_intersection() - Look up the first VMA which intersects the interval
|
||||
* @mm: The process address space.
|
||||
* @start_addr: The inclusive start user address.
|
||||
* @end_addr: The exclusive end user address.
|
||||
*
|
||||
* Returns: The first VMA within the provided range, %NULL otherwise. Assumes
|
||||
* start_addr < end_addr.
|
||||
*/
|
||||
static inline
|
||||
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
|
||||
unsigned long start_addr,
|
||||
unsigned long end_addr)
|
||||
{
|
||||
struct vm_area_struct * vma = find_vma(mm,start_addr);
|
||||
struct vm_area_struct *vma = find_vma(mm, start_addr);
|
||||
|
||||
if (vma && end_addr <= vma->vm_start)
|
||||
vma = NULL;
|
||||
return vma;
|
||||
}
|
||||
|
||||
/**
|
||||
* vma_lookup() - Find a VMA at a specific address
|
||||
* @mm: The process address space.
|
||||
* @addr: The user address.
|
||||
*
|
||||
* Return: The vm_area_struct at the given address, %NULL otherwise.
|
||||
*/
|
||||
static inline
|
||||
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
struct vm_area_struct *vma = find_vma(mm, addr);
|
||||
|
||||
if (vma && addr < vma->vm_start)
|
||||
vma = NULL;
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long vm_start = vma->vm_start;
|
||||
|
||||
Reference in New Issue
Block a user