Merge 625acffd7a ("Merge tag 's390-5.13-5' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux") into android-mainline

Steps on the way to 5.13-final

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I792b77ece0440ec9d609829dac75d5ca4a5b1671
This commit is contained in:
Greg Kroah-Hartman
2021-06-28 07:27:29 +02:00
78 changed files with 797 additions and 423 deletions

View File

@@ -212,6 +212,8 @@ Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org> Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com> Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Marek Behún <kabel@kernel.org> <marek.behun@nic.cz>
Marek Behún <kabel@kernel.org> Marek Behun <marek.behun@nic.cz>
Mark Brown <broonie@sirena.org.uk> Mark Brown <broonie@sirena.org.uk>
Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com> Mark Starovoytov <mstarovo@pm.me> <mstarovoitov@marvell.com>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com> Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>

View File

@@ -1816,7 +1816,7 @@ F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c F: drivers/rtc/rtc-ftrtc010.c
ARM/CZ.NIC TURRIS SUPPORT ARM/CZ.NIC TURRIS SUPPORT
M: Marek Behun <kabel@kernel.org> M: Marek Behún <kabel@kernel.org>
S: Maintained S: Maintained
W: https://www.turris.cz/ W: https://www.turris.cz/
F: Documentation/ABI/testing/debugfs-moxtet F: Documentation/ABI/testing/debugfs-moxtet
@@ -7354,7 +7354,6 @@ F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS FREESCALE SOC SOUND DRIVERS
M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com> M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com> M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <festevam@gmail.com> R: Fabio Estevam <festevam@gmail.com>
@@ -10953,7 +10952,7 @@ F: include/linux/mv643xx.h
MARVELL MV88X3310 PHY DRIVER MARVELL MV88X3310 PHY DRIVER
M: Russell King <linux@armlinux.org.uk> M: Russell King <linux@armlinux.org.uk>
M: Marek Behun <marek.behun@nic.cz> M: Marek Behún <kabel@kernel.org>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/phy/marvell10g.c F: drivers/net/phy/marvell10g.c

View File

@@ -545,9 +545,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed. * In Thumb-2, msr with an immediate value is not allowed.
*/ */
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
#define PLC "r" #define PLC_l "l"
#define PLC_r "r"
#else #else
#define PLC "I" #define PLC_l "I"
#define PLC_r "I"
#endif #endif
/* /*
@@ -569,15 +571,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9" "msr cpsr_c, %9"
: :
: "r" (stk), : "r" (stk),
PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])), "I" (offsetof(struct stack, irq[0])),
PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])), "I" (offsetof(struct stack, abt[0])),
PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])), "I" (offsetof(struct stack, und[0])),
PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])), "I" (offsetof(struct stack, fiq[0])),
PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14"); : "r14");
#endif #endif
} }

View File

@@ -91,12 +91,16 @@ struct stack_frame {
CALL_ARGS_4(arg1, arg2, arg3, arg4); \ CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5) register unsigned long r4 asm("6") = (unsigned long)(arg5)
#define CALL_FMT_0 "=&d" (r2) : /*
#define CALL_FMT_1 "+&d" (r2) : * To keep this simple mark register 2-6 as being changed (volatile)
#define CALL_FMT_2 CALL_FMT_1 "d" (r3), * by the called function, even though register 6 is saved/nonvolatile.
#define CALL_FMT_3 CALL_FMT_2 "d" (r4), */
#define CALL_FMT_4 CALL_FMT_3 "d" (r5), #define CALL_FMT_0 "=&d" (r2)
#define CALL_FMT_5 CALL_FMT_4 "d" (r6), #define CALL_FMT_1 "+&d" (r2)
#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory" #define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5 #define CALL_CLOBBER_4 CALL_CLOBBER_5
@@ -118,7 +122,7 @@ struct stack_frame {
" brasl 14,%[_fn]\n" \ " brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \ " la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \ : [_prev] "=&a" (prev), CALL_FMT_##nr \
[_stack] "R" (stack), \ : [_stack] "R" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_frame] "d" (frame), \ [_frame] "d" (frame), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \ [_fn] "X" (fn) : CALL_CLOBBER_##nr); \

View File

@@ -418,6 +418,7 @@ ENTRY(\name)
xgr %r6,%r6 xgr %r6,%r6
xgr %r7,%r7 xgr %r7,%r7
xgr %r10,%r10 xgr %r10,%r10
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11) stmg %r8,%r9,__PT_PSW(%r11)
tm %r8,0x0001 # coming from user space? tm %r8,0x0001 # coming from user space?

View File

@@ -512,7 +512,6 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
/* No handlers present - check for system call restart */ /* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL); clear_pt_regs_flag(regs, PIF_SYSCALL);
clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
if (current->thread.system_call) { if (current->thread.system_call) {
regs->int_code = current->thread.system_call; regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) { switch (regs->gprs[2]) {

View File

@@ -66,7 +66,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
{ {
static cpumask_t mask; static cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_clear(&mask);
if (!cpu_online(cpu))
goto out;
cpumask_set_cpu(cpu, &mask);
switch (topology_mode) { switch (topology_mode) {
case TOPOLOGY_MODE_HW: case TOPOLOGY_MODE_HW:
while (info) { while (info) {
@@ -83,10 +86,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
default: default:
fallthrough; fallthrough;
case TOPOLOGY_MODE_SINGLE: case TOPOLOGY_MODE_SINGLE:
cpumask_copy(&mask, cpumask_of(cpu));
break; break;
} }
cpumask_and(&mask, &mask, cpu_online_mask); cpumask_and(&mask, &mask, cpu_online_mask);
out:
cpumask_copy(dst, &mask); cpumask_copy(dst, &mask);
} }
@@ -95,7 +98,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
static cpumask_t mask; static cpumask_t mask;
int i; int i;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_clear(&mask);
if (!cpu_online(cpu))
goto out;
cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW) if (topology_mode != TOPOLOGY_MODE_HW)
goto out; goto out;
cpu -= cpu % (smp_cpu_mtid + 1); cpu -= cpu % (smp_cpu_mtid + 1);

View File

@@ -140,7 +140,12 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
/* Allocate variable storage */ /* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE); vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len; vlen += uv_info.guest_virt_base_stor_len;
kvm->arch.pv.stor_var = vzalloc(vlen); /*
* The Create Secure Configuration Ultravisor Call does not support
* using large pages for the virtual memory area.
* This is a hardware limitation.
*/
kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
if (!kvm->arch.pv.stor_var) if (!kvm->arch.pv.stor_var)
goto out_err; goto out_err;
return 0; return 0;

View File

@@ -130,8 +130,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* User code screwed up. */ /* User code screwed up. */
regs->ax = -EFAULT; regs->ax = -EFAULT;
instrumentation_end();
local_irq_disable(); local_irq_disable();
instrumentation_end();
irqentry_exit_to_user_mode(regs); irqentry_exit_to_user_mode(regs);
return false; return false;
} }
@@ -269,15 +269,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs); irqentry_state_t state = irqentry_enter(regs);
bool inhcall; bool inhcall;
instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall(); inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
instrumentation_begin();
irqentry_exit_cond_resched(); irqentry_exit_cond_resched();
instrumentation_end(); instrumentation_end();
restore_inhcall(inhcall); restore_inhcall(inhcall);
} else { } else {
instrumentation_end();
irqentry_exit(regs, state); irqentry_exit(regs, state);
} }
} }

View File

@@ -731,7 +731,8 @@ void reserve_lbr_buffers(void)
if (!kmem_cache || cpuc->lbr_xsave) if (!kmem_cache || cpuc->lbr_xsave)
continue; continue;
cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL, cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
GFP_KERNEL | __GFP_ZERO,
cpu_to_node(cpu)); cpu_to_node(cpu));
} }
} }

View File

@@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
} }
static inline void fxsave(struct fxregs_state *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
else
asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
}
/* These macros all use (%edi)/(%rdi) as the single memory argument. */ /* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
@@ -268,28 +276,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory") : "memory")
/*
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
{
u64 mask = xfeatures_mask_all;
u32 lmask = mask;
u32 hmask = mask >> 32;
int err;
WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
else
XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
/* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err);
}
/* /*
* This function is called only during boot time when x86 caps are not set * This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet. * up and alternative can not be used yet.

View File

@@ -75,7 +75,7 @@ void copy_page(void *to, void *from);
* *
* With page table isolation enabled, we map the LDT in ... [stay tuned] * With page table isolation enabled, we map the LDT in ... [stay tuned]
*/ */
static inline unsigned long task_size_max(void) static __always_inline unsigned long task_size_max(void)
{ {
unsigned long ret; unsigned long ret;

View File

@@ -221,28 +221,18 @@ sanitize_restored_user_xstate(union fpregs_state *state,
if (use_xsave()) { if (use_xsave()) {
/* /*
* Note: we don't need to zero the reserved bits in the * Clear all feature bits which are not set in
* xstate_header here because we either didn't copy them at all, * user_xfeatures and clear all extended features
* or we checked earlier that they aren't set. * for fx_only mode.
*/ */
u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
/* /*
* 'user_xfeatures' might have bits clear which are * Supervisor state has to be preserved. The sigframe
* set in header->xfeatures. This represents features that * restore can only modify user features, i.e. @mask
* were in init state prior to a signal delivery, and need * cannot contain them.
* to be reset back to the init state. Clear any user
* feature bits which are set in the kernel buffer to get
* them back to the init state.
*
* Supervisor state is unchanged by input from userspace.
* Ensure supervisor state bits stay set and supervisor
* state is not modified.
*/ */
if (fx_only) header->xfeatures &= mask | xfeatures_mask_supervisor();
header->xfeatures = XFEATURE_MASK_FPSSE;
else
header->xfeatures &= user_xfeatures |
xfeatures_mask_supervisor();
} }
if (use_fxsr()) { if (use_fxsr()) {

View File

@@ -440,6 +440,25 @@ static void __init print_xstate_offset_size(void)
} }
} }
/*
* All supported features have either init state all zeros or are
* handled in setup_init_fpu() individually. This is an explicit
* feature list and does not use XFEATURE_MASK*SUPPORTED to catch
* newly added supported features at build time and make people
* actually look at the init state for the new feature.
*/
#define XFEATURES_INIT_FPSTATE_HANDLED \
(XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | \
XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM | \
XFEATURE_MASK_PKRU | \
XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR | \
XFEATURE_MASK_PASID)
/* /*
* setup the xstate image representing the init state * setup the xstate image representing the init state
*/ */
@@ -447,6 +466,10 @@ static void __init setup_init_fpu_buf(void)
{ {
static int on_boot_cpu __initdata = 1; static int on_boot_cpu __initdata = 1;
BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
XFEATURES_INIT_FPSTATE_HANDLED);
WARN_ON_FPU(!on_boot_cpu); WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0; on_boot_cpu = 0;
@@ -466,10 +489,22 @@ static void __init setup_init_fpu_buf(void)
copy_kernel_to_xregs_booting(&init_fpstate.xsave); copy_kernel_to_xregs_booting(&init_fpstate.xsave);
/* /*
* Dump the init state again. This is to identify the init state * All components are now in init state. Read the state back so
* of any feature which is not represented by all zero's. * that init_fpstate contains all non-zero init state. This only
* works with XSAVE, but not with XSAVEOPT and XSAVES because
* those use the init optimization which skips writing data for
* components in init state.
*
* XSAVE could be used, but that would require to reshuffle the
* data when XSAVES is available because XSAVES uses xstate
* compaction. But doing so is a pointless exercise because most
* components have an all zeros init state except for the legacy
* ones (FP and SSE). Those can be saved with FXSAVE into the
* legacy area. Adding new features requires to ensure that init
* state is all zeroes or if not to add the necessary handling
* here.
*/ */
copy_xregs_to_kernel_booting(&init_fpstate.xsave); fxsave(&init_fpstate.fxsave);
} }
static int xfeature_uncompacted_offset(int xfeature_nr) static int xfeature_uncompacted_offset(int xfeature_nr)

View File

@@ -58,12 +58,16 @@ SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg)
2: .skip 5-(2b-1b), 0x90 2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_call_\reg) SYM_FUNC_END(__x86_indirect_alt_call_\reg)
STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg)
SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg) SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
ANNOTATE_RETPOLINE_SAFE ANNOTATE_RETPOLINE_SAFE
1: jmp *%\reg 1: jmp *%\reg
2: .skip 5-(2b-1b), 0x90 2: .skip 5-(2b-1b), 0x90
SYM_FUNC_END(__x86_indirect_alt_jmp_\reg) SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg)
.endm .endm
/* /*

View File

@@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap) DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{ {
/* This should never happen and there is no way to handle it. */ /* This should never happen and there is no way to handle it. */
instrumentation_begin();
pr_err("Unknown trap in Xen PV mode."); pr_err("Unknown trap in Xen PV mode.");
BUG(); BUG();
instrumentation_end();
} }
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE

View File

@@ -1045,6 +1045,14 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
} }
set_secondary_fwnode(dev, &swnode->fwnode); set_secondary_fwnode(dev, &swnode->fwnode);
/*
* If the device has been fully registered by the time this function is
* called, software_node_notify() must be called separately so that the
* symlinks get created and the reference count of the node is kept in
* balance.
*/
if (device_is_registered(dev))
software_node_notify(dev, KOBJ_ADD); software_node_notify(dev, KOBJ_ADD);
return 0; return 0;
@@ -1065,6 +1073,7 @@ void device_remove_software_node(struct device *dev)
if (!swnode) if (!swnode)
return; return;
if (device_is_registered(dev))
software_node_notify(dev, KOBJ_REMOVE); software_node_notify(dev, KOBJ_REMOVE);
set_secondary_fwnode(dev, NULL); set_secondary_fwnode(dev, NULL);
kobject_put(&swnode->kobj); kobject_put(&swnode->kobj);
@@ -1119,8 +1128,7 @@ int software_node_notify(struct device *dev, unsigned long action)
switch (action) { switch (action) {
case KOBJ_ADD: case KOBJ_ADD:
ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj, ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
"software_node");
if (ret) if (ret)
break; break;

View File

@@ -1383,6 +1383,7 @@ config GPIO_TPS68470
config GPIO_TQMX86 config GPIO_TQMX86
tristate "TQ-Systems QTMX86 GPIO" tristate "TQ-Systems QTMX86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST depends on MFD_TQMX86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP select GPIOLIB_IRQCHIP
help help
This driver supports GPIO on the TQMX86 IO controller. This driver supports GPIO on the TQMX86 IO controller.
@@ -1450,6 +1451,7 @@ menu "PCI GPIO expanders"
config GPIO_AMD8111 config GPIO_AMD8111
tristate "AMD 8111 GPIO driver" tristate "AMD 8111 GPIO driver"
depends on X86 || COMPILE_TEST depends on X86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
help help
The AMD 8111 south bridge contains 32 GPIO pins which can be used. The AMD 8111 south bridge contains 32 GPIO pins which can be used.

View File

@@ -334,7 +334,7 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_set_irq_type; ct->chip.irq_set_type = gpio_set_irq_type;
ct->chip.irq_set_wake = gpio_set_wake_irq; ct->chip.irq_set_wake = gpio_set_wake_irq;
ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND; ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
ct->regs.ack = GPIO_ISR; ct->regs.ack = GPIO_ISR;
ct->regs.mask = GPIO_IMR; ct->regs.mask = GPIO_IMR;

View File

@@ -1880,6 +1880,7 @@ static void gpio_v2_line_info_changed_to_v1(
struct gpio_v2_line_info_changed *lic_v2, struct gpio_v2_line_info_changed *lic_v2,
struct gpioline_info_changed *lic_v1) struct gpioline_info_changed *lic_v1)
{ {
memset(lic_v1, 0, sizeof(*lic_v1));
gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info); gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
lic_v1->timestamp = lic_v2->timestamp_ns; lic_v1->timestamp = lic_v2->timestamp_ns;
lic_v1->event_type = lic_v2->event_type; lic_v1->event_type = lic_v2->event_type;

View File

@@ -1047,11 +1047,12 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
rfb->base.obj[0] = obj; rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret) if (ret)
goto err; goto err;
ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj); ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret) if (ret)
goto err; goto err;
@@ -1071,9 +1072,6 @@ int amdgpu_display_gem_fb_verify_and_init(
rfb->base.obj[0] = obj; rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
/* Verify that the modifier is supported. */ /* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) { mode_cmd->modifier[0])) {
@@ -1092,6 +1090,10 @@ int amdgpu_display_gem_fb_verify_and_init(
if (ret) if (ret)
goto err; goto err;
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
return 0; return 0;
err: err:
drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret); drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);

View File

@@ -214,9 +214,21 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = attach->dmabuf->priv; struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int r;
/* pin buffer into GTT */ /* pin buffer into GTT */
return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
return r;
if (bo->tbo.moving) {
r = dma_fence_wait(bo->tbo.moving, true);
if (r) {
amdgpu_bo_unpin(bo);
return r;
}
}
return 0;
} }
/** /**

View File

@@ -6871,12 +6871,8 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) { if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2); (adev->doorbell_index.kiq * 2) << 2);
/* If GC has entered CGPG, ringing doorbell > first page doesn't
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
* this issue.
*/
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell.size - 4)); (adev->doorbell_index.userqueue_end * 2) << 2);
} }
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,

View File

@@ -3673,12 +3673,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) { if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2); (adev->doorbell_index.kiq * 2) << 2);
/* If GC has entered CGPG, ringing doorbell > first page doesn't
* wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
* this issue.
*/
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell.size - 4)); (adev->doorbell_index.userqueue_end * 2) << 2);
} }
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,

View File

@@ -232,7 +232,6 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
pm_runtime_put_sync(dev->dev); pm_runtime_put_sync(dev->dev);
drm_crtc_vblank_on(c);
} }
#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0) #define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
@@ -343,8 +342,17 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{
drm_crtc_vblank_on(c);
}
static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *c,
struct drm_atomic_state *state)
{ {
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
unsigned long flags;
spin_lock_irqsave(&c->dev->event_lock, flags);
if (c->state->event) { if (c->state->event) {
c->state->event->pipe = drm_crtc_index(c); c->state->event->pipe = drm_crtc_index(c);
@@ -354,12 +362,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
crtc->event = c->state->event; crtc->event = c->state->event;
c->state->event = NULL; c->state->event = NULL;
} }
} spin_unlock_irqrestore(&c->dev->event_lock, flags);
static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
/* TODO: write common plane control register if available */
} }
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {

View File

@@ -593,6 +593,7 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = dc->desc->max_width; dev->mode_config.max_width = dc->desc->max_width;
dev->mode_config.max_height = dc->desc->max_height; dev->mode_config.max_height = dc->desc->max_height;
dev->mode_config.funcs = &mode_config_funcs; dev->mode_config.funcs = &mode_config_funcs;
dev->mode_config.async_page_flip = true;
return 0; return 0;
} }

View File

@@ -137,6 +137,7 @@ static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
/* Allocate LCD interrupt resources */ /* Allocate LCD interrupt resources */
irq_lcd = platform_get_irq(pdev, 0); irq_lcd = platform_get_irq(pdev, 0);
if (irq_lcd < 0) { if (irq_lcd < 0) {
ret = irq_lcd;
drm_err(&kmb->drm, "irq_lcd not found"); drm_err(&kmb->drm, "irq_lcd not found");
goto setup_fail; goto setup_fail;
} }

View File

@@ -546,7 +546,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j; int i, j;
if (!ttm_dma) if (!ttm_dma || !ttm_dma->dma_address)
return; return;
if (!ttm_dma->pages) { if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
@@ -582,7 +582,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j; int i, j;
if (!ttm_dma) if (!ttm_dma || !ttm_dma->dma_address)
return; return;
if (!ttm_dma->pages) { if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);

View File

@@ -93,7 +93,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret) if (ret)
return -EINVAL; return -EINVAL;
return 0; ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
goto error;
if (nvbo->bo.moving)
ret = dma_fence_wait(nvbo->bo.moving, true);
ttm_bo_unreserve(&nvbo->bo);
if (ret)
goto error;
return ret;
error:
nouveau_bo_unpin(nvbo);
return ret;
} }
void nouveau_gem_prime_unpin(struct drm_gem_object *obj) void nouveau_gem_prime_unpin(struct drm_gem_object *obj)

View File

@@ -383,6 +383,7 @@ MODULE_DEVICE_TABLE(spi, ld9040_ids);
static struct spi_driver ld9040_driver = { static struct spi_driver ld9040_driver = {
.probe = ld9040_probe, .probe = ld9040_probe,
.remove = ld9040_remove, .remove = ld9040_remove,
.id_table = ld9040_ids,
.driver = { .driver = {
.name = "panel-samsung-ld9040", .name = "panel-samsung-ld9040",
.of_match_table = ld9040_of_match, .of_match_table = ld9040_of_match,

View File

@@ -77,9 +77,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */ /* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0)) if (unlikely(ret))
bo->prime_shared_count++; goto error;
if (bo->tbo.moving) {
ret = dma_fence_wait(bo->tbo.moving, false);
if (unlikely(ret)) {
radeon_bo_unpin(bo);
goto error;
}
}
bo->prime_shared_count++;
error:
radeon_bo_unreserve(bo); radeon_bo_unreserve(bo);
return ret; return ret;
} }

View File

@@ -159,6 +159,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false; bool connected = false;
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
if (vc4_hdmi->hpd_gpio) { if (vc4_hdmi->hpd_gpio) {
if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^ if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
vc4_hdmi->hpd_active_low) vc4_hdmi->hpd_active_low)
@@ -180,10 +182,12 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
} }
} }
pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected; return connector_status_connected;
} }
cec_phys_addr_invalidate(vc4_hdmi->cec_adap); cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected; return connector_status_disconnected;
} }
@@ -473,7 +477,6 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev); ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -784,13 +787,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return; return;
} }
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret) {
DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
vc4_hdmi_cec_update_clk_div(vc4_hdmi); vc4_hdmi_cec_update_clk_div(vc4_hdmi);
/* /*
@@ -801,7 +797,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
(hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000)); (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
if (ret) { if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock);
return; return;
} }
@@ -809,7 +804,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) { if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock);
return; return;
} }
@@ -1929,6 +1923,29 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0; return 0;
} }
#ifdef CONFIG_PM
static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
}
static int vc4_hdmi_runtime_resume(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
return 0;
}
#endif
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{ {
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2165,11 +2182,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{} {}
}; };
static const struct dev_pm_ops vc4_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
vc4_hdmi_runtime_resume,
NULL)
};
struct platform_driver vc4_hdmi_driver = { struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe, .probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove, .remove = vc4_hdmi_dev_remove,
.driver = { .driver = {
.name = "vc4_hdmi", .name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match, .of_match_table = vc4_hdmi_dt_match,
.pm = &vc4_hdmi_pm_ops,
}, },
}; };

View File

@@ -138,17 +138,23 @@ cp2615_i2c_send(struct usb_interface *usbif, struct cp2615_i2c_transfer *i2c_w)
static int static int
cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf) cp2615_i2c_recv(struct usb_interface *usbif, unsigned char tag, void *buf)
{ {
struct cp2615_iop_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
struct cp2615_i2c_transfer_result *i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
struct usb_device *usbdev = interface_to_usbdev(usbif); struct usb_device *usbdev = interface_to_usbdev(usbif);
int res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), struct cp2615_iop_msg *msg;
msg, sizeof(struct cp2615_iop_msg), NULL, 0); struct cp2615_i2c_transfer_result *i2c_r;
int res;
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
res = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, IOP_EP_IN), msg,
sizeof(struct cp2615_iop_msg), NULL, 0);
if (res < 0) { if (res < 0) {
kfree(msg); kfree(msg);
return res; return res;
} }
i2c_r = (struct cp2615_i2c_transfer_result *)&msg->data;
if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) { if (msg->msg != htons(iop_I2cTransferResult) || i2c_r->tag != tag) {
kfree(msg); kfree(msg);
return -EIO; return -EIO;

View File

@@ -978,6 +978,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
} }
out: out:
/* Unlock the SMBus device for use by BIOS/ACPI */
outb_p(SMBHSTSTS_INUSE_STS, SMBHSTSTS(priv));
pm_runtime_mark_last_busy(&priv->pci_dev->dev); pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev); pm_runtime_put_autosuspend(&priv->pci_dev->dev);
mutex_unlock(&priv->acpi_lock); mutex_unlock(&priv->acpi_lock);

View File

@@ -83,7 +83,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
} }
} }
ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0); ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
if (ret) { if (ret) {
dev_err(&adapter->dev, "failure sending STOP\n"); dev_err(&adapter->dev, "failure sending STOP\n");
return -EREMOTEIO; return -EREMOTEIO;
@@ -153,7 +153,7 @@ static int osif_probe(struct usb_interface *interface,
* Set bus frequency. The frequency is: * Set bus frequency. The frequency is:
* 120,000,000 / ( 16 + 2 * div * 4^prescale). * 120,000,000 / ( 16 + 2 * div * 4^prescale).
* Using dev = 52, prescale = 0 give 100KHz */ * Using dev = 52, prescale = 0 give 100KHz */
ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0, ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
NULL, 0); NULL, 0);
if (ret) { if (ret) {
dev_err(&interface->dev, "failure sending bit rate"); dev_err(&interface->dev, "failure sending bit rate");

View File

@@ -526,7 +526,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
return put_user(funcs, (compat_ulong_t __user *)arg); return put_user(funcs, (compat_ulong_t __user *)arg);
case I2C_RDWR: { case I2C_RDWR: {
struct i2c_rdwr_ioctl_data32 rdwr_arg; struct i2c_rdwr_ioctl_data32 rdwr_arg;
struct i2c_msg32 *p; struct i2c_msg32 __user *p;
struct i2c_msg *rdwr_pa; struct i2c_msg *rdwr_pa;
int i; int i;

View File

@@ -165,6 +165,7 @@ struct meson_host {
unsigned int bounce_buf_size; unsigned int bounce_buf_size;
void *bounce_buf; void *bounce_buf;
void __iomem *bounce_iomem_buf;
dma_addr_t bounce_dma_addr; dma_addr_t bounce_dma_addr;
struct sd_emmc_desc *descs; struct sd_emmc_desc *descs;
dma_addr_t descs_dma_addr; dma_addr_t descs_dma_addr;
@@ -745,6 +746,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
writel(start, host->regs + SD_EMMC_START); writel(start, host->regs + SD_EMMC_START);
} }
/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
size_t buflen, bool to_buffer)
{
unsigned int sg_flags = SG_MITER_ATOMIC;
struct scatterlist *sgl = data->sg;
unsigned int nents = data->sg_len;
struct sg_mapping_iter miter;
unsigned int offset = 0;
if (to_buffer)
sg_flags |= SG_MITER_FROM_SG;
else
sg_flags |= SG_MITER_TO_SG;
sg_miter_start(&miter, sgl, nents, sg_flags);
while ((offset < buflen) && sg_miter_next(&miter)) {
unsigned int len;
len = min(miter.length, buflen - offset);
/* When dram_access_quirk, the bounce buffer is a iomem mapping */
if (host->dram_access_quirk) {
if (to_buffer)
memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
else
memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
} else {
if (to_buffer)
memcpy(host->bounce_buf + offset, miter.addr, len);
else
memcpy(miter.addr, host->bounce_buf + offset, len);
}
offset += len;
}
sg_miter_stop(&miter);
}
static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
{ {
struct meson_host *host = mmc_priv(mmc); struct meson_host *host = mmc_priv(mmc);
@@ -788,8 +830,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
if (data->flags & MMC_DATA_WRITE) { if (data->flags & MMC_DATA_WRITE) {
cmd_cfg |= CMD_CFG_DATA_WR; cmd_cfg |= CMD_CFG_DATA_WR;
WARN_ON(xfer_bytes > host->bounce_buf_size); WARN_ON(xfer_bytes > host->bounce_buf_size);
sg_copy_to_buffer(data->sg, data->sg_len, meson_mmc_copy_buffer(host, data, xfer_bytes, true);
host->bounce_buf, xfer_bytes);
dma_wmb(); dma_wmb();
} }
@@ -958,8 +999,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
if (meson_mmc_bounce_buf_read(data)) { if (meson_mmc_bounce_buf_read(data)) {
xfer_bytes = data->blksz * data->blocks; xfer_bytes = data->blksz * data->blocks;
WARN_ON(xfer_bytes > host->bounce_buf_size); WARN_ON(xfer_bytes > host->bounce_buf_size);
sg_copy_from_buffer(data->sg, data->sg_len, meson_mmc_copy_buffer(host, data, xfer_bytes, false);
host->bounce_buf, xfer_bytes);
} }
next_cmd = meson_mmc_get_next_command(cmd); next_cmd = meson_mmc_get_next_command(cmd);
@@ -1179,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
* instead of the DDR memory * instead of the DDR memory
*/ */
host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN; host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF; host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF; host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
} else { } else {
/* data bounce buffer */ /* data bounce buffer */

View File

@@ -1900,11 +1900,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
int err; int err;
int i, bars = 0; int i, bars = 0;
if (atomic_inc_return(&dev->enable_cnt) > 1) { /*
pci_update_current_state(dev, dev->current_state); * Power state could be unknown at this point, either due to a fresh
return 0; /* already enabled */ * boot or a device removal call. So get the current power state
* so that things like MSI message writing will behave as expected
* (e.g. if the device really is in D0 at enable time).
*/
if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
} }
if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
bridge = pci_upstream_bridge(dev); bridge = pci_upstream_bridge(dev);
if (bridge) if (bridge)
pci_enable_bridge(bridge); pci_enable_bridge(bridge);

View File

@@ -845,9 +845,11 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
i = 0; i = 0;
device_for_each_child_node(dev, fwnode) { device_for_each_child_node(dev, fwnode) {
ret = microchip_sgpio_register_bank(dev, priv, fwnode, i++); ret = microchip_sgpio_register_bank(dev, priv, fwnode, i++);
if (ret) if (ret) {
fwnode_handle_put(fwnode);
return ret; return ret;
} }
}
if (priv->in.gpio.ngpio != priv->out.gpio.ngpio) { if (priv->in.gpio.ngpio != priv->out.gpio.ngpio) {
dev_err(dev, "Banks must have same GPIO count\n"); dev_err(dev, "Banks must have same GPIO count\n");

View File

@@ -1224,7 +1224,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
struct device *dev = pctl->dev; struct device *dev = pctl->dev;
struct resource res; struct resource res;
int npins = STM32_GPIO_PINS_PER_BANK; int npins = STM32_GPIO_PINS_PER_BANK;
int bank_nr, err; int bank_nr, err, i = 0;
if (!IS_ERR(bank->rstc)) if (!IS_ERR(bank->rstc))
reset_control_deassert(bank->rstc); reset_control_deassert(bank->rstc);
@@ -1246,9 +1246,14 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
of_property_read_string(np, "st,bank-name", &bank->gpio_chip.label); of_property_read_string(np, "st,bank-name", &bank->gpio_chip.label);
if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args)) { if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, i, &args)) {
bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK; bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
bank->gpio_chip.base = args.args[1]; bank->gpio_chip.base = args.args[1];
npins = args.args[2];
while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
++i, &args))
npins += args.args[2];
} else { } else {
bank_nr = pctl->nbanks; bank_nr = pctl->nbanks;
bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK; bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;

View File

@@ -366,16 +366,6 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mutex_lock(&matrix_dev->lock); mutex_lock(&matrix_dev->lock);
/*
* If the KVM pointer is in flux or the guest is running, disallow
* un-assignment of control domain.
*/
if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
mutex_unlock(&matrix_dev->lock);
return -EBUSY;
}
vfio_ap_mdev_reset_queues(mdev); vfio_ap_mdev_reset_queues(mdev);
list_del(&matrix_mdev->node); list_del(&matrix_mdev->node);
kfree(matrix_mdev); kfree(matrix_mdev);

View File

@@ -1387,6 +1387,22 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
} }
} }
static bool sd_need_revalidate(struct block_device *bdev,
struct scsi_disk *sdkp)
{
if (sdkp->device->removable || sdkp->write_prot) {
if (bdev_check_media_change(bdev))
return true;
}
/*
* Force a full rescan after ioctl(BLKRRPART). While the disk state has
* nothing to do with partitions, BLKRRPART is used to force a full
* revalidate after things like a format for historical reasons.
*/
return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}
/** /**
* sd_open - open a scsi disk device * sd_open - open a scsi disk device
* @bdev: Block device of the scsi disk to open * @bdev: Block device of the scsi disk to open
@@ -1423,10 +1439,8 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
if (!scsi_block_when_processing_errors(sdev)) if (!scsi_block_when_processing_errors(sdev))
goto error_out; goto error_out;
if (sdev->removable || sdkp->write_prot) { if (sd_need_revalidate(bdev, sdkp))
if (bdev_check_media_change(bdev))
sd_revalidate_disk(bdev->bd_disk); sd_revalidate_disk(bdev->bd_disk);
}
/* /*
* If the drive is empty, just let the open fail. * If the drive is empty, just let the open fail.

View File

@@ -220,6 +220,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
return DISK_EVENT_EJECT_REQUEST; return DISK_EVENT_EJECT_REQUEST;
else if (med->media_event_code == 2) else if (med->media_event_code == 2)
return DISK_EVENT_MEDIA_CHANGE; return DISK_EVENT_MEDIA_CHANGE;
else if (med->media_event_code == 3)
return DISK_EVENT_EJECT_REQUEST;
return 0; return 0;
} }

View File

@@ -1124,12 +1124,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
goto err_put_ctrl; goto err_put_ctrl;
} }
/* Clear potential interrupts */
reg = fspi_readl(f, f->iobase + FSPI_INTR);
if (reg)
fspi_writel(f, reg, f->iobase + FSPI_INTR);
/* find the resources - controller memory mapped space */ /* find the resources - controller memory mapped space */
if (is_acpi_node(f->dev->fwnode)) if (is_acpi_node(f->dev->fwnode))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1167,6 +1161,11 @@ static int nxp_fspi_probe(struct platform_device *pdev)
} }
} }
/* Clear potential interrupts */
reg = fspi_readl(f, f->iobase + FSPI_INTR);
if (reg)
fspi_writel(f, reg, f->iobase + FSPI_INTR);
/* find the irq */ /* find the irq */
ret = platform_get_irq(pdev, 0); ret = platform_get_irq(pdev, 0);
if (ret < 0) if (ret < 0)

View File

@@ -1118,6 +1118,11 @@ static int tegra_slink_probe(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable; goto exit_pm_disable;
} }
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
tspi->def_command_reg = SLINK_M_S; tspi->def_command_reg = SLINK_M_S;
tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN; tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);

View File

@@ -642,6 +642,9 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
} }
info->eoi_time = 0; info->eoi_time = 0;
/* is_active hasn't been reset yet, do it now. */
smp_store_release(&info->is_active, 0);
do_unmask(info, EVT_MASK_REASON_EOI_PENDING); do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
} }
@@ -811,6 +814,7 @@ static void xen_evtchn_close(evtchn_port_t port)
BUG(); BUG();
} }
/* Not called for lateeoi events. */
static void event_handler_exit(struct irq_info *info) static void event_handler_exit(struct irq_info *info)
{ {
smp_store_release(&info->is_active, 0); smp_store_release(&info->is_active, 0);
@@ -1883,7 +1887,12 @@ static void lateeoi_ack_dynirq(struct irq_data *data)
if (VALID_EVTCHN(evtchn)) { if (VALID_EVTCHN(evtchn)) {
do_mask(info, EVT_MASK_REASON_EOI_PENDING); do_mask(info, EVT_MASK_REASON_EOI_PENDING);
event_handler_exit(info); /*
* Don't call event_handler_exit().
* Need to keep is_active non-zero in order to ignore re-raised
* events after cpu affinity changes while a lateeoi is pending.
*/
clear_evtchn(evtchn);
} }
} }

View File

@@ -118,6 +118,15 @@ int afs_write_end(struct file *file, struct address_space *mapping,
_enter("{%llx:%llu},{%lx}", _enter("{%llx:%llu},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index); vnode->fid.vid, vnode->fid.vnode, page->index);
if (!PageUptodate(page)) {
if (copied < len) {
copied = 0;
goto out;
}
SetPageUptodate(page);
}
if (copied == 0) if (copied == 0)
goto out; goto out;
@@ -132,8 +141,6 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_sequnlock(&vnode->cb_lock); write_sequnlock(&vnode->cb_lock);
} }
ASSERT(PageUptodate(page));
if (PagePrivate(page)) { if (PagePrivate(page)) {
priv = page_private(page); priv = page_private(page);
f = afs_page_dirty_from(page, priv); f = afs_page_dirty_from(page, priv);

View File

@@ -668,14 +668,13 @@ out:
* Handle lookups for the hidden .snap directory. * Handle lookups for the hidden .snap directory.
*/ */
struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req, struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry, int err) struct dentry *dentry)
{ {
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */ struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
/* .snap dir? */ /* .snap dir? */
if (err == -ENOENT && if (ceph_snap(parent) == CEPH_NOSNAP &&
ceph_snap(parent) == CEPH_NOSNAP &&
strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) { strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) {
struct dentry *res; struct dentry *res;
struct inode *inode = ceph_get_snapdir(parent); struct inode *inode = ceph_get_snapdir(parent);
@@ -742,7 +741,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req; struct ceph_mds_request *req;
struct dentry *res;
int op; int op;
int mask; int mask;
int err; int err;
@@ -793,13 +791,17 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
req->r_parent = dir; req->r_parent = dir;
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
err = ceph_mdsc_do_request(mdsc, NULL, req); err = ceph_mdsc_do_request(mdsc, NULL, req);
res = ceph_handle_snapdir(req, dentry, err); if (err == -ENOENT) {
struct dentry *res;
res = ceph_handle_snapdir(req, dentry);
if (IS_ERR(res)) { if (IS_ERR(res)) {
err = PTR_ERR(res); err = PTR_ERR(res);
} else { } else {
dentry = res; dentry = res;
err = 0; err = 0;
} }
}
dentry = ceph_finish_lookup(req, dentry, err); dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */ ceph_mdsc_put_request(req); /* will dput(dentry) */
dout("lookup result=%p\n", dentry); dout("lookup result=%p\n", dentry);

View File

@@ -578,6 +578,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir); struct ceph_inode_info *ci = ceph_inode(dir);
struct inode *inode; struct inode *inode;
struct timespec64 now; struct timespec64 now;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_vino vino = { .ino = req->r_deleg_ino, struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP }; .snap = CEPH_NOSNAP };
@@ -615,8 +616,10 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
ceph_file_layout_to_legacy(lo, &in.layout); ceph_file_layout_to_legacy(lo, &in.layout);
down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session, ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
req->r_fmode, NULL); req->r_fmode, NULL);
up_read(&mdsc->snap_rwsem);
if (ret) { if (ret) {
dout("%s failed to fill inode: %d\n", __func__, ret); dout("%s failed to fill inode: %d\n", __func__, ret);
ceph_dir_clear_complete(dir); ceph_dir_clear_complete(dir);
@@ -739,14 +742,16 @@ retry:
err = ceph_mdsc_do_request(mdsc, err = ceph_mdsc_do_request(mdsc,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req); req);
dentry = ceph_handle_snapdir(req, dentry, err); if (err == -ENOENT) {
dentry = ceph_handle_snapdir(req, dentry);
if (IS_ERR(dentry)) { if (IS_ERR(dentry)) {
err = PTR_ERR(dentry); err = PTR_ERR(dentry);
goto out_req; goto out_req;
} }
err = 0; err = 0;
}
if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry); err = ceph_handle_notrace_create(dir, dentry);
if (d_in_lookup(dentry)) { if (d_in_lookup(dentry)) {

View File

@@ -777,6 +777,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
umode_t mode = le32_to_cpu(info->mode); umode_t mode = le32_to_cpu(info->mode);
dev_t rdev = le32_to_cpu(info->rdev); dev_t rdev = le32_to_cpu(info->rdev);
lockdep_assert_held(&mdsc->snap_rwsem);
dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__, dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
inode, ceph_vinop(inode), le64_to_cpu(info->version), inode, ceph_vinop(inode), le64_to_cpu(info->version),
ci->i_version); ci->i_version);

View File

@@ -1218,7 +1218,7 @@ extern const struct dentry_operations ceph_dentry_ops;
extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order); extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry); extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req, extern struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry, int err); struct dentry *dentry);
extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err); struct dentry *dentry, int err);

View File

@@ -1011,12 +1011,42 @@ out:
} }
EXPORT_SYMBOL(netfs_readpage); EXPORT_SYMBOL(netfs_readpage);
static void netfs_clear_thp(struct page *page) /**
* netfs_skip_page_read - prep a page for writing without reading first
* @page: page being prepared
* @pos: starting position for the write
* @len: length of write
*
* In some cases, write_begin doesn't need to read at all:
* - full page write
* - write that lies in a page that is completely beyond EOF
* - write that covers the the page from start to EOF or beyond it
*
* If any of these criteria are met, then zero out the unwritten parts
* of the page and return true. Otherwise, return false.
*/
static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
{ {
unsigned int i; struct inode *inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
size_t offset = offset_in_thp(page, pos);
for (i = 0; i < thp_nr_pages(page); i++) /* Full page write */
clear_highpage(page + i); if (offset == 0 && len >= thp_size(page))
return true;
/* pos beyond last page in the file */
if (pos - offset >= i_size)
goto zero_out;
/* Write that covers from the start of the page to EOF or beyond */
if (offset == 0 && (pos + len) >= i_size)
goto zero_out;
return false;
zero_out:
zero_user_segments(page, 0, offset, offset + len, thp_size(page));
return true;
} }
/** /**
@@ -1024,7 +1054,7 @@ static void netfs_clear_thp(struct page *page)
* @file: The file to read from * @file: The file to read from
* @mapping: The mapping to read from * @mapping: The mapping to read from
* @pos: File position at which the write will begin * @pos: File position at which the write will begin
* @len: The length of the write in this page * @len: The length of the write (may extend beyond the end of the page chosen)
* @flags: AOP_* flags * @flags: AOP_* flags
* @_page: Where to put the resultant page * @_page: Where to put the resultant page
* @_fsdata: Place for the netfs to store a cookie * @_fsdata: Place for the netfs to store a cookie
@@ -1061,8 +1091,6 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
unsigned int debug_index = 0; unsigned int debug_index = 0;
pgoff_t index = pos >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
int pos_in_page = pos & ~PAGE_MASK;
loff_t size;
int ret; int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index); DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
@@ -1090,13 +1118,8 @@ retry:
* within the cache granule containing the EOF, in which case we need * within the cache granule containing the EOF, in which case we need
* to preload the granule. * to preload the granule.
*/ */
size = i_size_read(inode);
if (!ops->is_cache_enabled(inode) && if (!ops->is_cache_enabled(inode) &&
((pos_in_page == 0 && len == thp_size(page)) || netfs_skip_page_read(page, pos, len)) {
(pos >= size) ||
(pos_in_page == 0 && (pos + len) >= size))) {
netfs_clear_thp(page);
SetPageUptodate(page);
netfs_stat(&netfs_n_rh_write_zskip); netfs_stat(&netfs_n_rh_write_zskip);
goto have_page_no_wait; goto have_page_no_wait;
} }

View File

@@ -1053,6 +1053,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
nilfs_sysfs_delete_superblock_group(nilfs); nilfs_sysfs_delete_superblock_group(nilfs);
nilfs_sysfs_delete_segctor_group(nilfs); nilfs_sysfs_delete_segctor_group(nilfs);
kobject_del(&nilfs->ns_dev_kobj); kobject_del(&nilfs->ns_dev_kobj);
kobject_put(&nilfs->ns_dev_kobj);
kfree(nilfs->ns_dev_subgroups); kfree(nilfs->ns_dev_subgroups);
} }

View File

@@ -50,7 +50,7 @@ struct ceph_auth_client_ops {
* another request. * another request.
*/ */
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
int (*handle_reply)(struct ceph_auth_client *ac, int result, int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key, void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret, int *session_key_len, u8 *con_secret,
int *con_secret_len); int *con_secret_len);
@@ -104,6 +104,8 @@ struct ceph_auth_client {
struct mutex mutex; struct mutex mutex;
}; };
void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
struct ceph_auth_client *ceph_auth_init(const char *name, struct ceph_auth_client *ceph_auth_init(const char *name,
const struct ceph_crypto_key *key, const struct ceph_crypto_key *key,
const int *con_modes); const int *con_modes);

View File

@@ -27,8 +27,10 @@ extern int debug_locks_off(void);
int __ret = 0; \ int __ret = 0; \
\ \
if (!oops_in_progress && unlikely(c)) { \ if (!oops_in_progress && unlikely(c)) { \
instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \ if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \ WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
instrumentation_end(); \
__ret = 1; \ __ret = 1; \
} \ } \
__ret; \ __ret; \

View File

@@ -741,17 +741,6 @@ static inline int hstate_index(struct hstate *h)
return h - hstates; return h - hstates;
} }
pgoff_t __basepage_index(struct page *page);
/* Return page->index in PAGE_SIZE units */
static inline pgoff_t basepage_index(struct page *page)
{
if (!PageCompound(page))
return page->index;
return __basepage_index(page);
}
extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn, extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn);
@@ -988,11 +977,6 @@ static inline int hstate_index(struct hstate *h)
return 0; return 0;
} }
static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
static inline int dissolve_free_huge_page(struct page *page) static inline int dissolve_free_huge_page(struct page *page)
{ {
return 0; return 0;

View File

@@ -514,7 +514,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
} }
/* /*
* Get index of the page with in radix-tree * Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/ */
static inline pgoff_t page_to_index(struct page *page) static inline pgoff_t page_to_index(struct page *page)
@@ -533,15 +533,16 @@ static inline pgoff_t page_to_index(struct page *page)
return pgoff; return pgoff;
} }
extern pgoff_t hugetlb_basepage_index(struct page *page);
/* /*
* Get the offset in PAGE_SIZE. * Get the offset in PAGE_SIZE (even for hugetlb pages).
* (TODO: hugepage should have ->index in PAGE_SIZE) * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
*/ */
static inline pgoff_t page_to_pgoff(struct page *page) static inline pgoff_t page_to_pgoff(struct page *page)
{ {
if (unlikely(PageHeadHuge(page))) if (unlikely(PageHuge(page)))
return page->index << compound_order(page); return hugetlb_basepage_index(page);
return page_to_index(page); return page_to_index(page);
} }

View File

@@ -135,6 +135,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller); const void *caller);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller); int node, const void *caller);
void *vmalloc_no_huge(unsigned long size);
extern void vfree(const void *addr); extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr); extern void vfree_atomic(const void *addr);

View File

@@ -80,7 +80,7 @@
struct uffdio_zeropage) struct uffdio_zeropage)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect) struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \ #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
struct uffdio_continue) struct uffdio_continue)
/* read() structure */ /* read() structure */

View File

@@ -334,6 +334,14 @@ void __init swiotlb_exit(void)
io_tlb_default_mem = NULL; io_tlb_default_mem = NULL;
} }
/*
* Return the offset into a iotlb slot required to keep the device happy.
*/
static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
{
return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
}
/* /*
* Bounce: copy the swiotlb buffer from or back to the original dma location * Bounce: copy the swiotlb buffer from or back to the original dma location
*/ */
@@ -346,10 +354,17 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
size_t alloc_size = mem->slots[index].alloc_size; size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr); unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr); unsigned char *vaddr = phys_to_virt(tlb_addr);
unsigned int tlb_offset;
if (orig_addr == INVALID_PHYS_ADDR) if (orig_addr == INVALID_PHYS_ADDR)
return; return;
tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
swiotlb_align_offset(dev, orig_addr);
orig_addr += tlb_offset;
alloc_size -= tlb_offset;
if (size > alloc_size) { if (size > alloc_size) {
dev_WARN_ONCE(dev, 1, dev_WARN_ONCE(dev, 1,
"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
@@ -390,14 +405,6 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
/*
* Return the offset into a iotlb slot required to keep the device happy.
*/
static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
{
return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
}
/* /*
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL. * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
*/ */

View File

@@ -35,7 +35,6 @@
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/hugetlb.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
@@ -651,7 +650,7 @@ again:
key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.i_seq = get_inode_sequence_number(inode); key->shared.i_seq = get_inode_sequence_number(inode);
key->shared.pgoff = basepage_index(tail); key->shared.pgoff = page_to_pgoff(tail);
rcu_read_unlock(); rcu_read_unlock();
} }

View File

@@ -1094,20 +1094,15 @@ void kthread_flush_work(struct kthread_work *work)
EXPORT_SYMBOL_GPL(kthread_flush_work); EXPORT_SYMBOL_GPL(kthread_flush_work);
/* /*
* This function removes the work from the worker queue. Also it makes sure * Make sure that the timer is neither set nor running and could
* that it won't get queued later via the delayed work's timer. * not manipulate the work list_head any longer.
* *
* The work might still be in use when this function finishes. See the * The function is called under worker->lock. The lock is temporary
* current_work proceed by the worker. * released but the timer can't be set again in the meantime.
*
* Return: %true if @work was pending and successfully canceled,
* %false if @work was not pending
*/ */
static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
unsigned long *flags) unsigned long *flags)
{ {
/* Try to cancel the timer if exists. */
if (is_dwork) {
struct kthread_delayed_work *dwork = struct kthread_delayed_work *dwork =
container_of(work, struct kthread_delayed_work, work); container_of(work, struct kthread_delayed_work, work);
struct kthread_worker *worker = work->worker; struct kthread_worker *worker = work->worker;
@@ -1123,8 +1118,23 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
del_timer_sync(&dwork->timer); del_timer_sync(&dwork->timer);
raw_spin_lock_irqsave(&worker->lock, *flags); raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--; work->canceling--;
} }
/*
* This function removes the work from the worker queue.
*
* It is called under worker->lock. The caller must make sure that
* the timer used by delayed work is not running, e.g. by calling
* kthread_cancel_delayed_work_timer().
*
* The work might still be in use when this function finishes. See the
* current_work proceed by the worker.
*
* Return: %true if @work was pending and successfully canceled,
* %false if @work was not pending
*/
static bool __kthread_cancel_work(struct kthread_work *work)
{
/* /*
* Try to remove the work from a worker list. It might either * Try to remove the work from a worker list. It might either
* be from worker->work_list or from worker->delayed_work_list. * be from worker->work_list or from worker->delayed_work_list.
@@ -1177,11 +1187,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
/* Work must not be used with >1 worker, see kthread_queue_work() */ /* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker); WARN_ON_ONCE(work->worker != worker);
/* Do not fight with another command that is canceling this work. */ /*
* Temporary cancel the work but do not fight with another command
* that is canceling the work as well.
*
* It is a bit tricky because of possible races with another
* mod_delayed_work() and cancel_delayed_work() callers.
*
* The timer must be canceled first because worker->lock is released
* when doing so. But the work can be removed from the queue (list)
* only when it can be queued again so that the return value can
* be used for reference counting.
*/
kthread_cancel_delayed_work_timer(work, &flags);
if (work->canceling) if (work->canceling)
goto out; goto out;
ret = __kthread_cancel_work(work);
ret = __kthread_cancel_work(work, true, &flags);
fast_queue: fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay); __kthread_queue_delayed_work(worker, dwork, delay);
out: out:
@@ -1203,7 +1225,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
/* Work must not be used with >1 worker, see kthread_queue_work(). */ /* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker); WARN_ON_ONCE(work->worker != worker);
ret = __kthread_cancel_work(work, is_dwork, &flags); if (is_dwork)
kthread_cancel_delayed_work_timer(work, &flags);
ret = __kthread_cancel_work(work);
if (worker->current_work != work) if (worker->current_work != work)
goto out_fast; goto out_fast;

View File

@@ -843,7 +843,7 @@ static int count_matching_names(struct lock_class *new_class)
} }
/* used from NMI context -- must be lockless */ /* used from NMI context -- must be lockless */
static __always_inline struct lock_class * static noinstr struct lock_class *
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{ {
struct lockdep_subclass_key *key; struct lockdep_subclass_key *key;
@@ -851,12 +851,14 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
struct lock_class *class; struct lock_class *class;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
instrumentation_begin();
debug_locks_off(); debug_locks_off();
printk(KERN_ERR printk(KERN_ERR
"BUG: looking up invalid subclass: %u\n", subclass); "BUG: looking up invalid subclass: %u\n", subclass);
printk(KERN_ERR printk(KERN_ERR
"turning off the locking correctness validator.\n"); "turning off the locking correctness validator.\n");
dump_stack(); dump_stack();
instrumentation_end();
return NULL; return NULL;
} }

View File

@@ -266,9 +266,18 @@ static void module_assert_mutex_or_preempt(void)
#endif #endif
} }
#ifdef CONFIG_MODULE_SIG
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644); module_param(sig_enforce, bool_enable_only, 0644);
void set_module_sig_enforced(void)
{
sig_enforce = true;
}
#else
#define sig_enforce false
#endif
/* /*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
* on that instead of directly to CONFIG_MODULE_SIG_FORCE config. * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
@@ -279,11 +288,6 @@ bool is_module_sig_enforced(void)
} }
EXPORT_SYMBOL(is_module_sig_enforced); EXPORT_SYMBOL(is_module_sig_enforced);
void set_module_sig_enforced(void)
{
sig_enforce = true;
}
/* Block module loading/unloading? */ /* Block module loading/unloading? */
int modules_disabled = 0; int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0); core_param(nomodule, modules_disabled, bint, 0);

View File

@@ -3301,6 +3301,31 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
* immediately before a parent cfs_rq, and cfs_rqs are removed from the list
* bottom-up, we only have to test whether the cfs_rq before us on the list
* is our child.
* If cfs_rq is not on the list, test whether a child needs its to be added to
* connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
*/
static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
{
struct cfs_rq *prev_cfs_rq;
struct list_head *prev;
if (cfs_rq->on_list) {
prev = cfs_rq->leaf_cfs_rq_list.prev;
} else {
struct rq *rq = rq_of(cfs_rq);
prev = rq->tmp_alone_branch;
}
prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
return (prev_cfs_rq->tg->parent == cfs_rq->tg);
}
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
{ {
@@ -3316,6 +3341,9 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
if (cfs_rq->avg.runnable_sum) if (cfs_rq->avg.runnable_sum)
return false; return false;
if (child_cfs_rq_on_list(cfs_rq))
return false;
return true; return true;
} }

View File

@@ -435,6 +435,12 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
* Preallocation does not hold sighand::siglock so it can't * Preallocation does not hold sighand::siglock so it can't
* use the cache. The lockless caching requires that only * use the cache. The lockless caching requires that only
* one consumer and only one producer run at a time. * one consumer and only one producer run at a time.
*
* For the regular allocation case it is sufficient to
* check @q for NULL because this code can only be called
* if the target task @t has not been reaped yet; which
* means this code can never observe the error pointer which is
* written to @t->sigqueue_cache in exit_task_sigqueue_cache().
*/ */
q = READ_ONCE(t->sigqueue_cache); q = READ_ONCE(t->sigqueue_cache);
if (!q || sigqueue_flags) if (!q || sigqueue_flags)
@@ -463,13 +469,18 @@ void exit_task_sigqueue_cache(struct task_struct *tsk)
struct sigqueue *q = tsk->sigqueue_cache; struct sigqueue *q = tsk->sigqueue_cache;
if (q) { if (q) {
tsk->sigqueue_cache = NULL;
/* /*
* Hand it back to the cache as the task might * Hand it back to the cache as the task might
* be self reaping which would leak the object. * be self reaping which would leak the object.
*/ */
kmem_cache_free(sigqueue_cachep, q); kmem_cache_free(sigqueue_cachep, q);
} }
/*
* Set an error pointer to ensure that @tsk will not cache a
* sigqueue when it is reaping it's child tasks
*/
tsk->sigqueue_cache = ERR_PTR(-1);
} }
static void sigqueue_cache_or_free(struct sigqueue *q) static void sigqueue_cache_or_free(struct sigqueue *q)
@@ -481,6 +492,10 @@ static void sigqueue_cache_or_free(struct sigqueue *q)
* is intentional when run without holding current->sighand->siglock, * is intentional when run without holding current->sighand->siglock,
* which is fine as current obviously cannot run __sigqueue_free() * which is fine as current obviously cannot run __sigqueue_free()
* concurrently. * concurrently.
*
* The NULL check is safe even if current has been reaped already,
* in which case exit_task_sigqueue_cache() wrote an error pointer
* into current->sigqueue_cache.
*/ */
if (!READ_ONCE(current->sigqueue_cache)) if (!READ_ONCE(current->sigqueue_cache))
WRITE_ONCE(current->sigqueue_cache, q); WRITE_ONCE(current->sigqueue_cache, q);

View File

@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
/* /*
* Generic 'turn off all lock debugging' function: * Generic 'turn off all lock debugging' function:
*/ */
noinstr int debug_locks_off(void) int debug_locks_off(void)
{ {
if (debug_locks && __debug_locks_off()) { if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) { if (!debug_locks_silent) {

View File

@@ -1588,15 +1588,12 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
return NULL; return NULL;
} }
pgoff_t __basepage_index(struct page *page) pgoff_t hugetlb_basepage_index(struct page *page)
{ {
struct page *page_head = compound_head(page); struct page *page_head = compound_head(page);
pgoff_t index = page_index(page_head); pgoff_t index = page_index(page_head);
unsigned long compound_idx; unsigned long compound_idx;
if (!PageHuge(page_head))
return page_index(page);
if (compound_order(page_head) >= MAX_ORDER) if (compound_order(page_head) >= MAX_ORDER)
compound_idx = page_to_pfn(page) - page_to_pfn(page_head); compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
else else

View File

@@ -658,6 +658,7 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
*/ */
static int me_kernel(struct page *p, unsigned long pfn) static int me_kernel(struct page *p, unsigned long pfn)
{ {
unlock_page(p);
return MF_IGNORED; return MF_IGNORED;
} }
@@ -667,6 +668,7 @@ static int me_kernel(struct page *p, unsigned long pfn)
static int me_unknown(struct page *p, unsigned long pfn) static int me_unknown(struct page *p, unsigned long pfn)
{ {
pr_err("Memory failure: %#lx: Unknown page state\n", pfn); pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
unlock_page(p);
return MF_FAILED; return MF_FAILED;
} }
@@ -675,6 +677,7 @@ static int me_unknown(struct page *p, unsigned long pfn)
*/ */
static int me_pagecache_clean(struct page *p, unsigned long pfn) static int me_pagecache_clean(struct page *p, unsigned long pfn)
{ {
int ret;
struct address_space *mapping; struct address_space *mapping;
delete_from_lru_cache(p); delete_from_lru_cache(p);
@@ -683,8 +686,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
* For anonymous pages we're done the only reference left * For anonymous pages we're done the only reference left
* should be the one m_f() holds. * should be the one m_f() holds.
*/ */
if (PageAnon(p)) if (PageAnon(p)) {
return MF_RECOVERED; ret = MF_RECOVERED;
goto out;
}
/* /*
* Now truncate the page in the page cache. This is really * Now truncate the page in the page cache. This is really
@@ -698,7 +703,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
/* /*
* Page has been teared down in the meanwhile * Page has been teared down in the meanwhile
*/ */
return MF_FAILED; ret = MF_FAILED;
goto out;
} }
/* /*
@@ -706,7 +712,10 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
* *
* Open: to take i_mutex or not for this? Right now we don't. * Open: to take i_mutex or not for this? Right now we don't.
*/ */
return truncate_error_page(p, pfn, mapping); ret = truncate_error_page(p, pfn, mapping);
out:
unlock_page(p);
return ret;
} }
/* /*
@@ -782,24 +791,26 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn)
*/ */
static int me_swapcache_dirty(struct page *p, unsigned long pfn) static int me_swapcache_dirty(struct page *p, unsigned long pfn)
{ {
int ret;
ClearPageDirty(p); ClearPageDirty(p);
/* Trigger EIO in shmem: */ /* Trigger EIO in shmem: */
ClearPageUptodate(p); ClearPageUptodate(p);
if (!delete_from_lru_cache(p)) ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
return MF_DELAYED; unlock_page(p);
else return ret;
return MF_FAILED;
} }
static int me_swapcache_clean(struct page *p, unsigned long pfn) static int me_swapcache_clean(struct page *p, unsigned long pfn)
{ {
int ret;
delete_from_swap_cache(p); delete_from_swap_cache(p);
if (!delete_from_lru_cache(p)) ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
return MF_RECOVERED; unlock_page(p);
else return ret;
return MF_FAILED;
} }
/* /*
@@ -820,6 +831,7 @@ static int me_huge_page(struct page *p, unsigned long pfn)
mapping = page_mapping(hpage); mapping = page_mapping(hpage);
if (mapping) { if (mapping) {
res = truncate_error_page(hpage, pfn, mapping); res = truncate_error_page(hpage, pfn, mapping);
unlock_page(hpage);
} else { } else {
res = MF_FAILED; res = MF_FAILED;
unlock_page(hpage); unlock_page(hpage);
@@ -834,7 +846,6 @@ static int me_huge_page(struct page *p, unsigned long pfn)
page_ref_inc(p); page_ref_inc(p);
res = MF_RECOVERED; res = MF_RECOVERED;
} }
lock_page(hpage);
} }
return res; return res;
@@ -866,6 +877,8 @@ static struct page_state {
unsigned long mask; unsigned long mask;
unsigned long res; unsigned long res;
enum mf_action_page_type type; enum mf_action_page_type type;
/* Callback ->action() has to unlock the relevant page inside it. */
int (*action)(struct page *p, unsigned long pfn); int (*action)(struct page *p, unsigned long pfn);
} error_states[] = { } error_states[] = {
{ reserved, reserved, MF_MSG_KERNEL, me_kernel }, { reserved, reserved, MF_MSG_KERNEL, me_kernel },
@@ -929,6 +942,7 @@ static int page_action(struct page_state *ps, struct page *p,
int result; int result;
int count; int count;
/* page p should be unlocked after returning from ps->action(). */
result = ps->action(p, pfn); result = ps->action(p, pfn);
count = page_count(p) - 1; count = page_count(p) - 1;
@@ -1253,7 +1267,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
if (TestSetPageHWPoison(head)) { if (TestSetPageHWPoison(head)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n", pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn); pfn);
return 0; return -EHWPOISON;
} }
num_poisoned_pages_inc(); num_poisoned_pages_inc();
@@ -1313,7 +1327,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
goto out; goto out;
} }
res = identify_page_state(pfn, p, page_flags); return identify_page_state(pfn, p, page_flags);
out: out:
unlock_page(head); unlock_page(head);
return res; return res;
@@ -1429,9 +1443,10 @@ int memory_failure(unsigned long pfn, int flags)
struct page *hpage; struct page *hpage;
struct page *orig_head; struct page *orig_head;
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
int res; int res = 0;
unsigned long page_flags; unsigned long page_flags;
bool retry = true; bool retry = true;
static DEFINE_MUTEX(mf_mutex);
if (!sysctl_memory_failure_recovery) if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn); panic("Memory failure on page %lx", pfn);
@@ -1449,13 +1464,19 @@ int memory_failure(unsigned long pfn, int flags)
return -ENXIO; return -ENXIO;
} }
mutex_lock(&mf_mutex);
try_again: try_again:
if (PageHuge(p)) if (PageHuge(p)) {
return memory_failure_hugetlb(pfn, flags); res = memory_failure_hugetlb(pfn, flags);
goto unlock_mutex;
}
if (TestSetPageHWPoison(p)) { if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n", pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn); pfn);
return 0; res = -EHWPOISON;
goto unlock_mutex;
} }
orig_head = hpage = compound_head(p); orig_head = hpage = compound_head(p);
@@ -1488,17 +1509,19 @@ try_again:
res = MF_FAILED; res = MF_FAILED;
} }
action_result(pfn, MF_MSG_BUDDY, res); action_result(pfn, MF_MSG_BUDDY, res);
return res == MF_RECOVERED ? 0 : -EBUSY; res = res == MF_RECOVERED ? 0 : -EBUSY;
} else { } else {
action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
return -EBUSY; res = -EBUSY;
} }
goto unlock_mutex;
} }
if (PageTransHuge(hpage)) { if (PageTransHuge(hpage)) {
if (try_to_split_thp_page(p, "Memory Failure") < 0) { if (try_to_split_thp_page(p, "Memory Failure") < 0) {
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
return -EBUSY; res = -EBUSY;
goto unlock_mutex;
} }
VM_BUG_ON_PAGE(!page_count(p), p); VM_BUG_ON_PAGE(!page_count(p), p);
} }
@@ -1522,7 +1545,7 @@ try_again:
if (PageCompound(p) && compound_head(p) != orig_head) { if (PageCompound(p) && compound_head(p) != orig_head) {
action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
res = -EBUSY; res = -EBUSY;
goto out; goto unlock_page;
} }
/* /*
@@ -1542,14 +1565,14 @@ try_again:
num_poisoned_pages_dec(); num_poisoned_pages_dec();
unlock_page(p); unlock_page(p);
put_page(p); put_page(p);
return 0; goto unlock_mutex;
} }
if (hwpoison_filter(p)) { if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p)) if (TestClearPageHWPoison(p))
num_poisoned_pages_dec(); num_poisoned_pages_dec();
unlock_page(p); unlock_page(p);
put_page(p); put_page(p);
return 0; goto unlock_mutex;
} }
/* /*
@@ -1573,7 +1596,7 @@ try_again:
if (!hwpoison_user_mappings(p, pfn, flags, &p)) { if (!hwpoison_user_mappings(p, pfn, flags, &p)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY; res = -EBUSY;
goto out; goto unlock_page;
} }
/* /*
@@ -1582,13 +1605,17 @@ try_again:
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
res = -EBUSY; res = -EBUSY;
goto out; goto unlock_page;
} }
identify_page_state: identify_page_state:
res = identify_page_state(pfn, p, page_flags); res = identify_page_state(pfn, p, page_flags);
out: mutex_unlock(&mf_mutex);
return res;
unlock_page:
unlock_page(p); unlock_page(p);
unlock_mutex:
mutex_unlock(&mf_mutex);
return res; return res;
} }
EXPORT_SYMBOL_GPL(memory_failure); EXPORT_SYMBOL_GPL(memory_failure);

View File

@@ -5065,9 +5065,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
* Skip populated array elements to determine if any pages need * Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs. * to be allocated before disabling IRQs.
*/ */
while (page_array && page_array[nr_populated] && nr_populated < nr_pages) while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++; nr_populated++;
/* Already populated array? */
if (unlikely(page_array && nr_pages - nr_populated == 0))
return 0;
/* Use the single page allocator for one page. */ /* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1) if (nr_pages - nr_populated == 1)
goto failed; goto failed;

View File

@@ -116,6 +116,13 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
return pfn_is_match(pvmw->page, pfn); return pfn_is_match(pvmw->page, pfn);
} }
static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
{
pvmw->address = (pvmw->address + size) & ~(size - 1);
if (!pvmw->address)
pvmw->address = ULONG_MAX;
}
/** /**
* page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
* @pvmw->address * @pvmw->address
@@ -144,6 +151,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{ {
struct mm_struct *mm = pvmw->vma->vm_mm; struct mm_struct *mm = pvmw->vma->vm_mm;
struct page *page = pvmw->page; struct page *page = pvmw->page;
unsigned long end;
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d; p4d_t *p4d;
pud_t *pud; pud_t *pud;
@@ -153,10 +161,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (pvmw->pmd && !pvmw->pte) if (pvmw->pmd && !pvmw->pte)
return not_found(pvmw); return not_found(pvmw);
if (unlikely(PageHuge(page))) {
/* The only possible mapping was handled on last iteration */
if (pvmw->pte) if (pvmw->pte)
goto next_pte; return not_found(pvmw);
if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */ /* when pud is not present, pte will be NULL */
pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
if (!pvmw->pte) if (!pvmw->pte)
@@ -168,16 +177,36 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return not_found(pvmw); return not_found(pvmw);
return true; return true;
} }
/*
* Seek to next pte only makes sense for THP.
* But more important than that optimization, is to filter out
* any PageKsm page: whose page->index misleads vma_address()
* and vma_address_end() to disaster.
*/
end = PageTransCompound(page) ?
vma_address_end(page, pvmw->vma) :
pvmw->address + PAGE_SIZE;
if (pvmw->pte)
goto next_pte;
restart: restart:
do {
pgd = pgd_offset(mm, pvmw->address); pgd = pgd_offset(mm, pvmw->address);
if (!pgd_present(*pgd)) if (!pgd_present(*pgd)) {
return false; step_forward(pvmw, PGDIR_SIZE);
continue;
}
p4d = p4d_offset(pgd, pvmw->address); p4d = p4d_offset(pgd, pvmw->address);
if (!p4d_present(*p4d)) if (!p4d_present(*p4d)) {
return false; step_forward(pvmw, P4D_SIZE);
continue;
}
pud = pud_offset(p4d, pvmw->address); pud = pud_offset(p4d, pvmw->address);
if (!pud_present(*pud)) if (!pud_present(*pud)) {
return false; step_forward(pvmw, PUD_SIZE);
continue;
}
pvmw->pmd = pmd_offset(pud, pvmw->address); pvmw->pmd = pmd_offset(pud, pvmw->address);
/* /*
* Make sure the pmd value isn't cached in a register by the * Make sure the pmd value isn't cached in a register by the
@@ -185,32 +214,32 @@ restart:
* subsequent update. * subsequent update.
*/ */
pmde = READ_ONCE(*pvmw->pmd); pmde = READ_ONCE(*pvmw->pmd);
if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd); pvmw->ptl = pmd_lock(mm, pvmw->pmd);
if (likely(pmd_trans_huge(*pvmw->pmd))) { pmde = *pvmw->pmd;
if (likely(pmd_trans_huge(pmde))) {
if (pvmw->flags & PVMW_MIGRATION) if (pvmw->flags & PVMW_MIGRATION)
return not_found(pvmw); return not_found(pvmw);
if (pmd_page(*pvmw->pmd) != page) if (pmd_page(pmde) != page)
return not_found(pvmw); return not_found(pvmw);
return true; return true;
} else if (!pmd_present(*pvmw->pmd)) { }
if (thp_migration_supported()) { if (!pmd_present(pmde)) {
if (!(pvmw->flags & PVMW_MIGRATION)) swp_entry_t entry;
return not_found(pvmw);
if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
if (migration_entry_to_page(entry) != page) if (!thp_migration_supported() ||
!(pvmw->flags & PVMW_MIGRATION))
return not_found(pvmw);
entry = pmd_to_swp_entry(pmde);
if (!is_migration_entry(entry) ||
migration_entry_to_page(entry) != page)
return not_found(pvmw); return not_found(pvmw);
return true; return true;
} }
}
return not_found(pvmw);
} else {
/* THP pmd was split under us: handle on pte level */ /* THP pmd was split under us: handle on pte level */
spin_unlock(pvmw->ptl); spin_unlock(pvmw->ptl);
pvmw->ptl = NULL; pvmw->ptl = NULL;
}
} else if (!pmd_present(pmde)) { } else if (!pmd_present(pmde)) {
/* /*
* If PVMW_SYNC, take and drop THP pmd lock so that we * If PVMW_SYNC, take and drop THP pmd lock so that we
@@ -218,39 +247,38 @@ restart:
* cleared *pmd but not decremented compound_mapcount(). * cleared *pmd but not decremented compound_mapcount().
*/ */
if ((pvmw->flags & PVMW_SYNC) && if ((pvmw->flags & PVMW_SYNC) &&
PageTransCompound(pvmw->page)) { PageTransCompound(page)) {
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
spin_unlock(ptl); spin_unlock(ptl);
} }
return false; step_forward(pvmw, PMD_SIZE);
continue;
} }
if (!map_pte(pvmw)) if (!map_pte(pvmw))
goto next_pte; goto next_pte;
while (1) { this_pte:
unsigned long end;
if (check_pte(pvmw)) if (check_pte(pvmw))
return true; return true;
next_pte: next_pte:
/* Seek to next pte only makes sense for THP */
if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
return not_found(pvmw);
end = vma_address_end(pvmw->page, pvmw->vma);
do { do {
pvmw->address += PAGE_SIZE; pvmw->address += PAGE_SIZE;
if (pvmw->address >= end) if (pvmw->address >= end)
return not_found(pvmw); return not_found(pvmw);
/* Did we cross page table boundary? */ /* Did we cross page table boundary? */
if (pvmw->address % PMD_SIZE == 0) { if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
pte_unmap(pvmw->pte);
if (pvmw->ptl) { if (pvmw->ptl) {
spin_unlock(pvmw->ptl); spin_unlock(pvmw->ptl);
pvmw->ptl = NULL; pvmw->ptl = NULL;
} }
pte_unmap(pvmw->pte);
pvmw->pte = NULL;
goto restart; goto restart;
} else { }
pvmw->pte++; pvmw->pte++;
if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
spin_lock(pvmw->ptl);
} }
} while (pte_none(*pvmw->pte)); } while (pte_none(*pvmw->pte));
@@ -258,7 +286,10 @@ next_pte:
pvmw->ptl = pte_lockptr(mm, pvmw->pmd); pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
spin_lock(pvmw->ptl); spin_lock(pvmw->ptl);
} }
} goto this_pte;
} while (pvmw->address < end);
return false;
} }
/** /**

View File

@@ -2344,15 +2344,16 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
} }
static struct vm_struct *__get_vm_area_node(unsigned long size, static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start, unsigned long align, unsigned long shift, unsigned long flags,
unsigned long end, int node, gfp_t gfp_mask, const void *caller) unsigned long start, unsigned long end, int node,
gfp_t gfp_mask, const void *caller)
{ {
struct vmap_area *va; struct vmap_area *va;
struct vm_struct *area; struct vm_struct *area;
unsigned long requested_size = size; unsigned long requested_size = size;
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
size = PAGE_ALIGN(size); size = ALIGN(size, 1ul << shift);
if (unlikely(!size)) if (unlikely(!size))
return NULL; return NULL;
@@ -2384,8 +2385,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
const void *caller) const void *caller)
{ {
return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
GFP_KERNEL, caller); NUMA_NO_NODE, GFP_KERNEL, caller);
} }
/** /**
@@ -2401,7 +2402,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
*/ */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{ {
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0)); __builtin_return_address(0));
} }
@@ -2409,7 +2411,8 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller) const void *caller)
{ {
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller); NUMA_NO_NODE, GFP_KERNEL, caller);
} }
@@ -2902,9 +2905,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
} }
again: again:
size = PAGE_ALIGN(size); area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_UNINITIALIZED | vm_flags, start, end, node,
vm_flags, start, end, node, gfp_mask, caller); gfp_mask, caller);
if (!area) { if (!area) {
warn_alloc(gfp_mask, NULL, warn_alloc(gfp_mask, NULL,
"vmalloc size %lu allocation failure: " "vmalloc size %lu allocation failure: "
@@ -2923,6 +2926,7 @@ again:
*/ */
clear_vm_uninitialized_flag(area); clear_vm_uninitialized_flag(area);
size = PAGE_ALIGN(size);
kmemleak_vmalloc(area, size, gfp_mask); kmemleak_vmalloc(area, size, gfp_mask);
return addr; return addr;
@@ -2998,6 +3002,23 @@ void *vmalloc(unsigned long size)
} }
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc);
/**
* vmalloc_no_huge - allocate virtually contiguous memory using small pages
* @size: allocation size
*
* Allocate enough non-huge pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
*
* Return: pointer to the allocated memory or %NULL on error
*/
void *vmalloc_no_huge(unsigned long size)
{
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_no_huge);
/** /**
* vzalloc - allocate virtually contiguous memory with zero fill * vzalloc - allocate virtually contiguous memory with zero fill
* @size: allocation size * @size: allocation size

View File

@@ -36,7 +36,7 @@ static int init_protocol(struct ceph_auth_client *ac, int proto)
} }
} }
static void set_global_id(struct ceph_auth_client *ac, u64 global_id) void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id)
{ {
dout("%s global_id %llu\n", __func__, global_id); dout("%s global_id %llu\n", __func__, global_id);
@@ -260,19 +260,22 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
ac->negotiating = false; ac->negotiating = false;
} }
ret = ac->ops->handle_reply(ac, result, payload, payload_end, if (result) {
pr_err("auth protocol '%s' mauth authentication failed: %d\n",
ceph_auth_proto_name(ac->protocol), result);
ret = result;
goto out;
}
ret = ac->ops->handle_reply(ac, global_id, payload, payload_end,
NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
ret = build_request(ac, true, reply_buf, reply_len); ret = build_request(ac, true, reply_buf, reply_len);
goto out; goto out;
} else if (ret) { } else if (ret) {
pr_err("auth protocol '%s' mauth authentication failed: %d\n",
ceph_auth_proto_name(ac->protocol), result);
goto out; goto out;
} }
set_global_id(ac, global_id);
out: out:
mutex_unlock(&ac->mutex); mutex_unlock(&ac->mutex);
return ret; return ret;
@@ -498,11 +501,10 @@ int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
int ret; int ret;
mutex_lock(&ac->mutex); mutex_lock(&ac->mutex);
ret = ac->ops->handle_reply(ac, 0, reply, reply + reply_len, ret = ac->ops->handle_reply(ac, global_id, reply, reply + reply_len,
session_key, session_key_len, session_key, session_key_len,
con_secret, con_secret_len); con_secret, con_secret_len);
if (!ret) WARN_ON(ret == -EAGAIN || ret > 0);
set_global_id(ac, global_id);
mutex_unlock(&ac->mutex); mutex_unlock(&ac->mutex);
return ret; return ret;
} }

View File

@@ -69,7 +69,7 @@ static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
* the generic auth code decode the global_id, and we carry no actual * the generic auth code decode the global_id, and we carry no actual
* authenticate state, so nothing happens here. * authenticate state, so nothing happens here.
*/ */
static int handle_reply(struct ceph_auth_client *ac, int result, static int handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key, void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret, int *session_key_len, u8 *con_secret,
int *con_secret_len) int *con_secret_len)
@@ -77,7 +77,8 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
struct ceph_auth_none_info *xi = ac->private; struct ceph_auth_none_info *xi = ac->private;
xi->starting = false; xi->starting = false;
return result; ceph_auth_set_global_id(ac, global_id);
return 0;
} }
static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a) static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)

View File

@@ -597,7 +597,7 @@ bad:
return -EINVAL; return -EINVAL;
} }
static int handle_auth_session_key(struct ceph_auth_client *ac, static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
void **p, void *end, void **p, void *end,
u8 *session_key, int *session_key_len, u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len) u8 *con_secret, int *con_secret_len)
@@ -613,6 +613,7 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
if (ret) if (ret)
return ret; return ret;
ceph_auth_set_global_id(ac, global_id);
if (*p == end) { if (*p == end) {
/* pre-nautilus (or didn't request service tickets!) */ /* pre-nautilus (or didn't request service tickets!) */
WARN_ON(session_key || con_secret); WARN_ON(session_key || con_secret);
@@ -661,7 +662,7 @@ e_inval:
return -EINVAL; return -EINVAL;
} }
static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, static int ceph_x_handle_reply(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, void *buf, void *end,
u8 *session_key, int *session_key_len, u8 *session_key, int *session_key_len,
u8 *con_secret, int *con_secret_len) u8 *con_secret, int *con_secret_len)
@@ -669,13 +670,11 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
struct ceph_x_info *xi = ac->private; struct ceph_x_info *xi = ac->private;
struct ceph_x_ticket_handler *th; struct ceph_x_ticket_handler *th;
int len = end - buf; int len = end - buf;
int result;
void *p; void *p;
int op; int op;
int ret; int ret;
if (result)
return result; /* XXX hmm? */
if (xi->starting) { if (xi->starting) {
/* it's a hello */ /* it's a hello */
struct ceph_x_server_challenge *sc = buf; struct ceph_x_server_challenge *sc = buf;
@@ -697,9 +696,9 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
switch (op) { switch (op) {
case CEPHX_GET_AUTH_SESSION_KEY: case CEPHX_GET_AUTH_SESSION_KEY:
/* AUTH ticket + [connection secret] + service tickets */ /* AUTH ticket + [connection secret] + service tickets */
ret = handle_auth_session_key(ac, &p, end, session_key, ret = handle_auth_session_key(ac, global_id, &p, end,
session_key_len, con_secret, session_key, session_key_len,
con_secret_len); con_secret, con_secret_len);
break; break;
case CEPHX_GET_PRINCIPAL_SESSION_KEY: case CEPHX_GET_PRINCIPAL_SESSION_KEY:

View File

@@ -3388,44 +3388,30 @@ static int rt5645_probe(struct snd_soc_component *component)
{ {
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component); struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
int ret = 0;
rt5645->component = component; rt5645->component = component;
switch (rt5645->codec_type) { switch (rt5645->codec_type) {
case CODEC_TYPE_RT5645: case CODEC_TYPE_RT5645:
ret = snd_soc_dapm_new_controls(dapm, snd_soc_dapm_new_controls(dapm,
rt5645_specific_dapm_widgets, rt5645_specific_dapm_widgets,
ARRAY_SIZE(rt5645_specific_dapm_widgets)); ARRAY_SIZE(rt5645_specific_dapm_widgets));
if (ret < 0) snd_soc_dapm_add_routes(dapm,
goto exit;
ret = snd_soc_dapm_add_routes(dapm,
rt5645_specific_dapm_routes, rt5645_specific_dapm_routes,
ARRAY_SIZE(rt5645_specific_dapm_routes)); ARRAY_SIZE(rt5645_specific_dapm_routes));
if (ret < 0)
goto exit;
if (rt5645->v_id < 3) { if (rt5645->v_id < 3) {
ret = snd_soc_dapm_add_routes(dapm, snd_soc_dapm_add_routes(dapm,
rt5645_old_dapm_routes, rt5645_old_dapm_routes,
ARRAY_SIZE(rt5645_old_dapm_routes)); ARRAY_SIZE(rt5645_old_dapm_routes));
if (ret < 0)
goto exit;
} }
break; break;
case CODEC_TYPE_RT5650: case CODEC_TYPE_RT5650:
ret = snd_soc_dapm_new_controls(dapm, snd_soc_dapm_new_controls(dapm,
rt5650_specific_dapm_widgets, rt5650_specific_dapm_widgets,
ARRAY_SIZE(rt5650_specific_dapm_widgets)); ARRAY_SIZE(rt5650_specific_dapm_widgets));
if (ret < 0) snd_soc_dapm_add_routes(dapm,
goto exit;
ret = snd_soc_dapm_add_routes(dapm,
rt5650_specific_dapm_routes, rt5650_specific_dapm_routes,
ARRAY_SIZE(rt5650_specific_dapm_routes)); ARRAY_SIZE(rt5650_specific_dapm_routes));
if (ret < 0)
goto exit;
break; break;
} }
@@ -3433,17 +3419,9 @@ static int rt5645_probe(struct snd_soc_component *component)
/* for JD function */ /* for JD function */
if (rt5645->pdata.jd_mode) { if (rt5645->pdata.jd_mode) {
ret = snd_soc_dapm_force_enable_pin(dapm, "JD Power"); snd_soc_dapm_force_enable_pin(dapm, "JD Power");
if (ret < 0) snd_soc_dapm_force_enable_pin(dapm, "LDO2");
goto exit; snd_soc_dapm_sync(dapm);
ret = snd_soc_dapm_force_enable_pin(dapm, "LDO2");
if (ret < 0)
goto exit;
ret = snd_soc_dapm_sync(dapm);
if (ret < 0)
goto exit;
} }
if (rt5645->pdata.long_name) if (rt5645->pdata.long_name)
@@ -3454,14 +3432,9 @@ static int rt5645_probe(struct snd_soc_component *component)
GFP_KERNEL); GFP_KERNEL);
if (!rt5645->eq_param) if (!rt5645->eq_param)
ret = -ENOMEM; return -ENOMEM;
exit:
/* return 0;
* If there was an error above, everything will be cleaned up by the
* caller if we return an error here. This will be done with a later
* call to rt5645_remove().
*/
return ret;
} }
static void rt5645_remove(struct snd_soc_component *component) static void rt5645_remove(struct snd_soc_component *component)

View File

@@ -376,7 +376,7 @@ static void test_add_max_memory_regions(void)
pr_info("Adding slots 0..%i, each memory region with %dK size\n", pr_info("Adding slots 0..%i, each memory region with %dK size\n",
(max_mem_slots - 1), MEM_REGION_SIZE >> 10); (max_mem_slots - 1), MEM_REGION_SIZE >> 10);
mem = mmap(NULL, MEM_REGION_SIZE * max_mem_slots + alignment, mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1)); mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
@@ -401,7 +401,7 @@ static void test_add_max_memory_regions(void)
TEST_ASSERT(ret == -1 && errno == EINVAL, TEST_ASSERT(ret == -1 && errno == EINVAL,
"Adding one more memory slot should fail with EINVAL"); "Adding one more memory slot should fail with EINVAL");
munmap(mem, MEM_REGION_SIZE * max_mem_slots + alignment); munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
munmap(mem_extra, MEM_REGION_SIZE); munmap(mem_extra, MEM_REGION_SIZE);
kvm_vm_free(vm); kvm_vm_free(vm);
} }

View File

@@ -2055,6 +2055,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
return true; return true;
} }
static int kvm_try_get_pfn(kvm_pfn_t pfn)
{
if (kvm_is_reserved_pfn(pfn))
return 1;
return get_page_unless_zero(pfn_to_page(pfn));
}
static int hva_to_pfn_remapped(struct vm_area_struct *vma, static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool *async, unsigned long addr, bool *async,
bool write_fault, bool *writable, bool write_fault, bool *writable,
@@ -2104,13 +2111,21 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
* Whoever called remap_pfn_range is also going to call e.g. * Whoever called remap_pfn_range is also going to call e.g.
* unmap_mapping_range before the underlying pages are freed, * unmap_mapping_range before the underlying pages are freed,
* causing a call to our MMU notifier. * causing a call to our MMU notifier.
*
* Certain IO or PFNMAP mappings can be backed with valid
* struct pages, but be allocated without refcounting e.g.,
* tail pages of non-compound higher order allocations, which
* would then underflow the refcount when the caller does the
* required put_page. Don't allow those pages here.
*/ */
kvm_get_pfn(pfn); if (!kvm_try_get_pfn(pfn))
r = -EFAULT;
out: out:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
*p_pfn = pfn; *p_pfn = pfn;
return 0;
return r;
} }
/* /*