ANDROID: binder: fix race in priority restore
During a reply, the target gets woken up and then the priority of the replier is restored. The order is such to allow the target to process the reply ASAP. Otherwise, we risk the sender getting scheduled out before the wakeup happens. This strategy reduces transaction latency. However, a subsequent transaction from the same target could be started before the priority of the replier gets restored. At this point we save the wrong priority and it gets reinstated at the end of the transaction. This patch allows the incoming transaction to detect the race condition and save the correct next priority. Additionally, the replier will abort its pending priority restore which allows the new transaction to always run at the desired priority. Bug: 148101660 Signed-off-by: Carlos Llamas <cmllamas@google.com> Change-Id: I6fec41ae1a1342023f78212ab1f984e26f068221 (cherry picked from commit cac827f2619b280d418e546a09f25da600dafe5a) [cmllamas: fixed trivial merge conflict]
This commit is contained in:
@@ -698,6 +698,20 @@ static void binder_do_set_priority(struct binder_thread *thread,
|
|||||||
to_kernel_prio(policy, priority),
|
to_kernel_prio(policy, priority),
|
||||||
desired->prio);
|
desired->prio);
|
||||||
|
|
||||||
|
spin_lock(&thread->prio_lock);
|
||||||
|
if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
|
||||||
|
/*
|
||||||
|
* A new priority has been set by an incoming nested
|
||||||
|
* transaction. Abort this priority restore and allow
|
||||||
|
* the transaction to run at the new desired priority.
|
||||||
|
*/
|
||||||
|
spin_unlock(&thread->prio_lock);
|
||||||
|
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
|
||||||
|
"%d: %s: aborting priority restore\n",
|
||||||
|
thread->pid, __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Set the actual priority */
|
/* Set the actual priority */
|
||||||
if (task->policy != policy || is_rt_policy(policy)) {
|
if (task->policy != policy || is_rt_policy(policy)) {
|
||||||
struct sched_param params;
|
struct sched_param params;
|
||||||
@@ -710,6 +724,9 @@ static void binder_do_set_priority(struct binder_thread *thread,
|
|||||||
}
|
}
|
||||||
if (is_fair_policy(policy))
|
if (is_fair_policy(policy))
|
||||||
set_user_nice(task, priority);
|
set_user_nice(task, priority);
|
||||||
|
|
||||||
|
thread->prio_state = BINDER_PRIO_SET;
|
||||||
|
spin_unlock(&thread->prio_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void binder_set_priority(struct binder_thread *thread,
|
static void binder_set_priority(struct binder_thread *thread,
|
||||||
@@ -739,8 +756,6 @@ static void binder_transaction_priority(struct binder_thread *thread,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
t->set_priority_called = true;
|
t->set_priority_called = true;
|
||||||
t->saved_priority.sched_policy = task->policy;
|
|
||||||
t->saved_priority.prio = task->normal_prio;
|
|
||||||
|
|
||||||
if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
|
if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
|
||||||
desired.prio = NICE_TO_PRIO(0);
|
desired.prio = NICE_TO_PRIO(0);
|
||||||
@@ -760,6 +775,25 @@ static void binder_transaction_priority(struct binder_thread *thread,
|
|||||||
desired = node_prio;
|
desired = node_prio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&thread->prio_lock);
|
||||||
|
if (thread->prio_state == BINDER_PRIO_PENDING) {
|
||||||
|
/*
|
||||||
|
* Task is in the process of changing priorities
|
||||||
|
* saving its current values would be incorrect.
|
||||||
|
* Instead, save the pending priority and signal
|
||||||
|
* the task to abort the priority restore.
|
||||||
|
*/
|
||||||
|
t->saved_priority = thread->prio_next;
|
||||||
|
thread->prio_state = BINDER_PRIO_ABORT;
|
||||||
|
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
|
||||||
|
"%d: saved pending priority %d\n",
|
||||||
|
current->pid, thread->prio_next.prio);
|
||||||
|
} else {
|
||||||
|
t->saved_priority.sched_policy = task->policy;
|
||||||
|
t->saved_priority.prio = task->normal_prio;
|
||||||
|
}
|
||||||
|
spin_unlock(&thread->prio_lock);
|
||||||
|
|
||||||
binder_set_priority(thread, &desired);
|
binder_set_priority(thread, &desired);
|
||||||
trace_android_vh_binder_set_priority(t, task);
|
trace_android_vh_binder_set_priority(t, task);
|
||||||
}
|
}
|
||||||
@@ -2627,6 +2661,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||||||
u32 secctx_sz = 0;
|
u32 secctx_sz = 0;
|
||||||
const void __user *user_buffer = (const void __user *)
|
const void __user *user_buffer = (const void __user *)
|
||||||
(uintptr_t)tr->data.ptr.buffer;
|
(uintptr_t)tr->data.ptr.buffer;
|
||||||
|
bool is_nested = false;
|
||||||
|
|
||||||
e = binder_transaction_log_add(&binder_transaction_log);
|
e = binder_transaction_log_add(&binder_transaction_log);
|
||||||
e->debug_id = t_debug_id;
|
e->debug_id = t_debug_id;
|
||||||
@@ -2809,6 +2844,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||||||
atomic_inc(&from->tmp_ref);
|
atomic_inc(&from->tmp_ref);
|
||||||
target_thread = from;
|
target_thread = from;
|
||||||
spin_unlock(&tmp->lock);
|
spin_unlock(&tmp->lock);
|
||||||
|
is_nested = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock(&tmp->lock);
|
spin_unlock(&tmp->lock);
|
||||||
@@ -2873,6 +2909,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||||||
t->to_thread = target_thread;
|
t->to_thread = target_thread;
|
||||||
t->code = tr->code;
|
t->code = tr->code;
|
||||||
t->flags = tr->flags;
|
t->flags = tr->flags;
|
||||||
|
t->is_nested = is_nested;
|
||||||
if (!(t->flags & TF_ONE_WAY) &&
|
if (!(t->flags & TF_ONE_WAY) &&
|
||||||
binder_supported_policy(current->policy)) {
|
binder_supported_policy(current->policy)) {
|
||||||
/* Inherit supported policies for synchronous transactions */
|
/* Inherit supported policies for synchronous transactions */
|
||||||
@@ -3243,6 +3280,12 @@ static void binder_transaction(struct binder_proc *proc,
|
|||||||
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
||||||
target_proc->outstanding_txns++;
|
target_proc->outstanding_txns++;
|
||||||
binder_inner_proc_unlock(target_proc);
|
binder_inner_proc_unlock(target_proc);
|
||||||
|
if (in_reply_to->is_nested) {
|
||||||
|
spin_lock(&thread->prio_lock);
|
||||||
|
thread->prio_state = BINDER_PRIO_PENDING;
|
||||||
|
thread->prio_next = in_reply_to->saved_priority;
|
||||||
|
spin_unlock(&thread->prio_lock);
|
||||||
|
}
|
||||||
wake_up_interruptible_sync(&target_thread->wait);
|
wake_up_interruptible_sync(&target_thread->wait);
|
||||||
trace_android_vh_binder_restore_priority(in_reply_to, current);
|
trace_android_vh_binder_restore_priority(in_reply_to, current);
|
||||||
binder_restore_priority(thread, &in_reply_to->saved_priority);
|
binder_restore_priority(thread, &in_reply_to->saved_priority);
|
||||||
@@ -4502,6 +4545,8 @@ static struct binder_thread *binder_get_thread_ilocked(
|
|||||||
thread->return_error.cmd = BR_OK;
|
thread->return_error.cmd = BR_OK;
|
||||||
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
|
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
|
||||||
thread->reply_error.cmd = BR_OK;
|
thread->reply_error.cmd = BR_OK;
|
||||||
|
spin_lock_init(&thread->prio_lock);
|
||||||
|
thread->prio_state = BINDER_PRIO_SET;
|
||||||
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
|
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
|
||||||
return thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -367,6 +367,12 @@ struct binder_priority {
|
|||||||
int prio;
|
int prio;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum binder_prio_state {
|
||||||
|
BINDER_PRIO_SET, /* desired priority set */
|
||||||
|
BINDER_PRIO_PENDING, /* initiated a saved priority restore */
|
||||||
|
BINDER_PRIO_ABORT, /* abort the pending priority restore */
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct binder_proc - binder process bookkeeping
|
* struct binder_proc - binder process bookkeeping
|
||||||
* @proc_node: element for binder_procs list
|
* @proc_node: element for binder_procs list
|
||||||
@@ -511,6 +517,12 @@ struct binder_proc {
|
|||||||
* when outstanding transactions are cleaned up
|
* when outstanding transactions are cleaned up
|
||||||
* (protected by @proc->inner_lock)
|
* (protected by @proc->inner_lock)
|
||||||
* @task: struct task_struct for this thread
|
* @task: struct task_struct for this thread
|
||||||
|
* @prio_lock: protects thread priority fields
|
||||||
|
* @prio_next: saved priority to be restored next
|
||||||
|
* (protected by @prio_lock)
|
||||||
|
* @prio_state: state of the priority restore process as
|
||||||
|
* defined by enum binder_prio_state
|
||||||
|
* (protected by @prio_lock)
|
||||||
*
|
*
|
||||||
* Bookkeeping structure for binder threads.
|
* Bookkeeping structure for binder threads.
|
||||||
*/
|
*/
|
||||||
@@ -531,6 +543,9 @@ struct binder_thread {
|
|||||||
atomic_t tmp_ref;
|
atomic_t tmp_ref;
|
||||||
bool is_dead;
|
bool is_dead;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
spinlock_t prio_lock;
|
||||||
|
struct binder_priority prio_next;
|
||||||
|
enum binder_prio_state prio_state;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -567,6 +582,7 @@ struct binder_transaction {
|
|||||||
struct binder_priority priority;
|
struct binder_priority priority;
|
||||||
struct binder_priority saved_priority;
|
struct binder_priority saved_priority;
|
||||||
bool set_priority_called;
|
bool set_priority_called;
|
||||||
|
bool is_nested;
|
||||||
kuid_t sender_euid;
|
kuid_t sender_euid;
|
||||||
struct list_head fd_fixups;
|
struct list_head fd_fixups;
|
||||||
binder_uintptr_t security_ctx;
|
binder_uintptr_t security_ctx;
|
||||||
|
|||||||
Reference in New Issue
Block a user