ANDROID: dma-buf: heaps: replace mutex lock with spinlock

We should use spinlock to protect page pool's critical section as
1. The critical section is short, using spinlock is more efficient.
2. Spinlock could protect priority inversion. Ex. Low priority
   thread (dmabuf-deferred) hold the page lock but get scheduled
   out under heavy loading. Then the other high priority threads
   need to wait for dmabuf-deferred to release the lock. It causes
   long allocation latency and possible UI jank.

Also, we could move NR_KERNEL_MISC_RECLAIMABLE stat out of the
critical section to make it shorter as mod_node_page_state can
handle concurrent access cases.

Bug: 245454030
Change-Id: I15f349f9e893621f71ca79f1de037de184c33edf
Signed-off-by: Martin Liu <liumartin@google.com>
This commit is contained in:
Martin Liu
2022-11-03 03:51:14 +00:00
committed by Treehugger Robot
parent a229a30d99
commit 060e38dce1
2 changed files with 10 additions and 8 deletions

View File

@@ -41,28 +41,30 @@ static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *pag
else else
index = POOL_LOWPAGE; index = POOL_LOWPAGE;
mutex_lock(&pool->mutex); spin_lock(&pool->lock);
list_add_tail(&page->lru, &pool->items[index]); list_add_tail(&page->lru, &pool->items[index]);
pool->count[index]++; pool->count[index]++;
spin_unlock(&pool->lock);
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1 << pool->order); 1 << pool->order);
mutex_unlock(&pool->mutex);
} }
static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index) static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
{ {
struct page *page; struct page *page;
mutex_lock(&pool->mutex); spin_lock(&pool->lock);
page = list_first_entry_or_null(&pool->items[index], struct page, lru); page = list_first_entry_or_null(&pool->items[index], struct page, lru);
if (page) { if (page) {
pool->count[index]--; pool->count[index]--;
list_del(&page->lru); list_del(&page->lru);
spin_unlock(&pool->lock);
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
-(1 << pool->order)); -(1 << pool->order));
goto out;
} }
mutex_unlock(&pool->mutex); spin_unlock(&pool->lock);
out:
return page; return page;
} }
@@ -125,7 +127,7 @@ struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int or
} }
pool->gfp_mask = gfp_mask | __GFP_COMP; pool->gfp_mask = gfp_mask | __GFP_COMP;
pool->order = order; pool->order = order;
mutex_init(&pool->mutex); spin_lock_init(&pool->lock);
mutex_lock(&pool_list_lock); mutex_lock(&pool_list_lock);
list_add(&pool->list, &pool_list); list_add(&pool->list, &pool_list);

View File

@@ -13,7 +13,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/mutex.h> #include <linux/spinlock.h>
#include <linux/shrinker.h> #include <linux/shrinker.h>
#include <linux/types.h> #include <linux/types.h>
@@ -40,7 +40,7 @@ enum {
struct dmabuf_page_pool { struct dmabuf_page_pool {
int count[POOL_TYPE_SIZE]; int count[POOL_TYPE_SIZE];
struct list_head items[POOL_TYPE_SIZE]; struct list_head items[POOL_TYPE_SIZE];
struct mutex mutex; struct spinlock lock;
gfp_t gfp_mask; gfp_t gfp_mask;
unsigned int order; unsigned int order;
struct list_head list; struct list_head list;