FROMLIST: mm: cma: introduce gfp flag in cma_alloc instead of no_warn
The upcoming patch will introduce __GFP_NORETRY semantic
in alloc_contig_range which is a failfast mode of the API.
Instead of adding a additional parameter for gfp, replace
no_warn with gfp flag.
To keep old behaviors, it follows the rule below.
no_warn gfp_flags
false GFP_KERNEL
true GFP_KERNEL|__GFP_NOWARN
gfp & __GFP_NOWARN GFP_KERNEL | (gfp & __GFP_NOWARN)
Bug: 170340257
Bug: 120293424
Link: https://lore.kernel.org/linux-mm/YAnM5PbNJZlk%2F%2FiX@google.com/T/#m36b144ff81fe0a8f0ecaf6813de4819ecc41f8fe
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: I1ce020ab5d5fff34eb6464be4632ddef72fb43eb
Signed-off-by: Richard Chang <richardycc@google.com>
(cherry picked from commit 23ba990a3e)
This commit is contained in:
committed by
Richard Chang
parent
9d79c30f82
commit
eebff8eab2
@@ -302,7 +302,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
|
||||
if (align > CONFIG_CMA_ALIGNMENT)
|
||||
align = CONFIG_CMA_ALIGNMENT;
|
||||
|
||||
cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
|
||||
cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
|
||||
if (!cma_pages)
|
||||
goto free_buffer;
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ static void vmcp_response_alloc(struct vmcp_session *session)
|
||||
* anymore the system won't work anyway.
|
||||
*/
|
||||
if (order > 2)
|
||||
page = cma_alloc(vmcp_cma, nr_pages, 0, false);
|
||||
page = cma_alloc(vmcp_cma, nr_pages, 0, GFP_KERNEL);
|
||||
if (page) {
|
||||
session->response = (char *)page_to_phys(page);
|
||||
session->cma_alloc = 1;
|
||||
|
||||
@@ -45,7 +45,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
const char *name,
|
||||
struct cma **res_cma);
|
||||
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
|
||||
bool no_warn);
|
||||
gfp_t gfp_mask);
|
||||
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
|
||||
|
||||
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
|
||||
|
||||
@@ -261,7 +261,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||
if (align > CONFIG_CMA_ALIGNMENT)
|
||||
align = CONFIG_CMA_ALIGNMENT;
|
||||
|
||||
return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
|
||||
return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL |
|
||||
(no_warn ? __GFP_NOWARN : 0));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -284,7 +285,8 @@ static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
|
||||
{
|
||||
unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
|
||||
|
||||
return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
|
||||
return cma_alloc(cma, size >> PAGE_SHIFT, align,
|
||||
GFP_KERNEL | (gfp & __GFP_NOWARN));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
13
mm/cma.c
13
mm/cma.c
@@ -423,13 +423,13 @@ static inline void cma_debug_show_areas(struct cma *cma) { }
|
||||
* @cma: Contiguous memory region for which the allocation is performed.
|
||||
* @count: Requested number of pages.
|
||||
* @align: Requested alignment of pages (in PAGE_SIZE order).
|
||||
* @no_warn: Avoid printing message about failed allocation
|
||||
* @gfp_mask: GFP mask to use during the cma allocation.
|
||||
*
|
||||
* This function allocates part of contiguous memory on specific
|
||||
* contiguous memory area.
|
||||
*/
|
||||
struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
unsigned int align, bool no_warn)
|
||||
unsigned int align, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned long mask, offset;
|
||||
unsigned long pfn = -1;
|
||||
@@ -444,8 +444,8 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
if (!cma || !cma->count || !cma->bitmap)
|
||||
goto out;
|
||||
|
||||
pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma,
|
||||
count, align);
|
||||
pr_debug("%s(cma %p, count %lu, align %d gfp_mask 0x%x)\n", __func__,
|
||||
(void *)cma, count, align, gfp_mask);
|
||||
|
||||
if (!count)
|
||||
goto out;
|
||||
@@ -499,8 +499,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
|
||||
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
|
||||
mutex_lock(&cma_mutex);
|
||||
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
|
||||
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
|
||||
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask);
|
||||
mutex_unlock(&cma_mutex);
|
||||
if (ret == 0) {
|
||||
page = pfn_to_page(pfn);
|
||||
@@ -532,7 +531,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
|
||||
page_kasan_tag_reset(page + i);
|
||||
}
|
||||
|
||||
if (ret && !no_warn) {
|
||||
if (ret && !(gfp_mask & __GFP_NOWARN)) {
|
||||
pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
|
||||
__func__, cma->name, count, ret);
|
||||
cma_debug_show_areas(cma);
|
||||
|
||||
@@ -137,7 +137,7 @@ static int cma_alloc_mem(struct cma *cma, int count)
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
|
||||
p = cma_alloc(cma, count, 0, false);
|
||||
p = cma_alloc(cma, count, 0, GFP_KERNEL);
|
||||
if (!p) {
|
||||
kfree(mem);
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1310,7 +1310,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
|
||||
|
||||
if (hugetlb_cma[nid]) {
|
||||
page = cma_alloc(hugetlb_cma[nid], nr_pages,
|
||||
huge_page_order(h), true);
|
||||
huge_page_order(h),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
@@ -1321,7 +1322,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
|
||||
continue;
|
||||
|
||||
page = cma_alloc(hugetlb_cma[node], nr_pages,
|
||||
huge_page_order(h), true);
|
||||
huge_page_order(h),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user