Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes in this cycle were:
- refcount conversions
- Solve the rq->leaf_cfs_rq_list can of worms for real.
- improve power-aware scheduling
- add sysctl knob for Energy Aware Scheduling
- documentation updates
- misc other changes"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
kthread: Do not use TIMER_IRQSAFE
kthread: Convert worker lock to raw spinlock
sched/fair: Use non-atomic cpumask_{set,clear}_cpu()
sched/fair: Remove unused 'sd' parameter from select_idle_smt()
sched/wait: Use freezable_schedule() when possible
sched/fair: Prune, fix and simplify the nohz_balancer_kick() comment block
sched/fair: Explain LLC nohz kick condition
sched/fair: Simplify nohz_balancer_kick()
sched/topology: Fix percpu data types in struct sd_data & struct s_data
sched/fair: Simplify post_init_entity_util_avg() by calling it with a task_struct pointer argument
sched/fair: Fix O(nr_cgroups) in the load balancing path
sched/fair: Optimize update_blocked_averages()
sched/fair: Fix insertion in rq->leaf_cfs_rq_list
sched/fair: Add tmp_alone_branch assertion
sched/core: Use READ_ONCE()/WRITE_ONCE() in move_queued_task()/task_rq_lock()
sched/debug: Initialize sd_sysctl_cpus if !CONFIG_CPUMASK_OFFSTACK
sched/pelt: Skip updating util_est when utilization is higher than CPU's capacity
sched/fair: Update scale invariance of PELT
sched/fair: Move the rq_of() helper function
sched/core: Convert task_struct.stack_refcount to refcount_t
...
This commit is contained in:
@@ -86,7 +86,7 @@ enum {
|
||||
|
||||
struct kthread_worker {
|
||||
unsigned int flags;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
struct list_head work_list;
|
||||
struct list_head delayed_work_list;
|
||||
struct task_struct *task;
|
||||
@@ -107,7 +107,7 @@ struct kthread_delayed_work {
|
||||
};
|
||||
|
||||
#define KTHREAD_WORKER_INIT(worker) { \
|
||||
.lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
|
||||
.work_list = LIST_HEAD_INIT((worker).work_list), \
|
||||
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
|
||||
}
|
||||
@@ -165,9 +165,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
|
||||
#define kthread_init_delayed_work(dwork, fn) \
|
||||
do { \
|
||||
kthread_init_work(&(dwork)->work, (fn)); \
|
||||
__init_timer(&(dwork)->timer, \
|
||||
kthread_delayed_work_timer_fn, \
|
||||
TIMER_IRQSAFE); \
|
||||
timer_setup(&(dwork)->timer, \
|
||||
kthread_delayed_work_timer_fn, 0); \
|
||||
} while (0)
|
||||
|
||||
int kthread_worker_fn(void *worker_ptr);
|
||||
|
||||
Reference in New Issue
Block a user