Merge 5.15.76 into android14-5.15
Changes in 5.15.76 r8152: add PID for the Lenovo OneLink+ Dock arm64/mm: Consolidate TCR_EL1 fields usb: gadget: uvc: consistently use define for headerlen usb: gadget: uvc: use on returned header len in video_encode_isoc_sg usb: gadget: uvc: rework uvcg_queue_next_buffer to uvcg_complete_buffer usb: gadget: uvc: giveback vb2 buffer on req complete usb: gadget: uvc: improve sg exit condition arm64: errata: Remove AES hwcap for COMPAT tasks perf/x86/intel/pt: Relax address filter validation btrfs: enhance unsupported compat RO flags handling ocfs2: clear dinode links count in case of error ocfs2: fix BUG when iput after ocfs2_mknod fails selinux: enable use of both GFP_KERNEL and GFP_ATOMIC in convert_context() cpufreq: qcom: fix writes in read-only memory region i2c: qcom-cci: Fix ordering of pm_runtime_xx and i2c_add_adapter x86/microcode/AMD: Apply the patch early on every logical thread hwmon/coretemp: Handle large core ID value ata: ahci-imx: Fix MODULE_ALIAS ata: ahci: Match EM_MAX_SLOTS with SATA_PMP_MAX_PORTS x86/resctrl: Fix min_cbm_bits for AMD cpufreq: qcom: fix memory leak in error path drm/amdgpu: fix sdma doorbell init ordering on APUs mm,hugetlb: take hugetlb_lock before decrementing h->resv_huge_pages kvm: Add support for arch compat vm ioctls KVM: arm64: vgic: Fix exit condition in scan_its_table() media: ipu3-imgu: Fix NULL pointer dereference in active selection access media: mceusb: set timeout to at least timeout provided media: venus: dec: Handle the case where find_format fails x86/topology: Fix multiple packages shown on a single-package system x86/topology: Fix duplicated core ID within a package btrfs: fix processing of delayed data refs during backref walking btrfs: fix processing of delayed tree block refs during backref walking drm/vc4: Add module dependency on hdmi-codec ACPI: extlog: Handle multiple records tipc: Fix recognition of trial period tipc: fix an information leak in tipc_topsrv_kern_subscr i40e: Fix DMA mappings leak HID: magicmouse: Do not set BTN_MOUSE on double report sfc: Change VF mac via PF as first preference if available. net/atm: fix proc_mpc_write incorrect return value net: phy: dp83867: Extend RX strap quirk for SGMII mode net: phylink: add mac_managed_pm in phylink_config structure scsi: lpfc: Fix memory leak in lpfc_create_port() udp: Update reuse->has_conns under reuseport_lock. cifs: Fix xid leak in cifs_create() cifs: Fix xid leak in cifs_copy_file_range() cifs: Fix xid leak in cifs_flock() cifs: Fix xid leak in cifs_ses_add_channel() dm: remove unnecessary assignment statement in alloc_dev() net: hsr: avoid possible NULL deref in skb_clone() ionic: catch NULL pointer issue on reconfig netfilter: nf_tables: relax NFTA_SET_ELEM_KEY_END set flags requirements nvme-hwmon: consistently ignore errors from nvme_hwmon_init nvme-hwmon: kmalloc the NVME SMART log buffer nvmet: fix workqueue MEM_RECLAIM flushing dependency net: sched: cake: fix null pointer access issue when cake_init() fails net: sched: delete duplicate cleanup of backlog and qlen net: sched: sfb: fix null pointer access issue when sfb_init() fails sfc: include vport_id in filter spec hash and equal() wwan_hwsim: fix possible memory leak in wwan_hwsim_dev_new() net: hns: fix possible memory leak in hnae_ae_register() net: sched: fix race condition in qdisc_graft() net: phy: dp83822: disable MDI crossover status change interrupt iommu/vt-d: Allow NVS regions in arch_rmrr_sanity_check() iommu/vt-d: Clean up si_domain in the init_dmars() error path fs: dlm: fix invalid derefence of sb_lvbptr arm64: mte: move register initialization to C ksmbd: handle smb2 query dir request for OutputBufferLength that is too small ksmbd: fix incorrect handling of iterate_dir tracing: Simplify conditional compilation code in tracing_set_tracer() tracing: Do not free snapshot if tracer is on cmdline mmc: sdhci-tegra: Use actual clock rate for SW tuning correction perf: Skip and warn on unknown format 'configN' attrs ACPI: video: Force backlight native for more TongFang devices x86/Kconfig: Drop check for -mabi=ms for CONFIG_EFI_STUB Makefile.debug: re-enable debug info for .S files mmc: core: Add SD card quirk for broken discard mm: /proc/pid/smaps_rollup: fix no vma's null-deref Linux 5.15.76 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ica5b3f26c36900ff31ccac63f4fb55b52bff0ec2
This commit is contained in:
@@ -78,10 +78,14 @@ stable kernels.
|
|||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
|
| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A72 | #853709 | N/A |
|
| ARM | Cortex-A72 | #853709 | N/A |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
|
| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
|
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
|
| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 15
|
PATCHLEVEL = 15
|
||||||
SUBLEVEL = 75
|
SUBLEVEL = 76
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Trick or Treat
|
NAME = Trick or Treat
|
||||||
|
|
||||||
@@ -892,7 +892,9 @@ else
|
|||||||
DEBUG_CFLAGS += -g
|
DEBUG_CFLAGS += -g
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef CONFIG_AS_IS_LLVM
|
ifdef CONFIG_AS_IS_LLVM
|
||||||
|
KBUILD_AFLAGS += -g
|
||||||
|
else
|
||||||
KBUILD_AFLAGS += -Wa,-gdwarf-2
|
KBUILD_AFLAGS += -Wa,-gdwarf-2
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|||||||
@@ -494,6 +494,22 @@ config ARM64_ERRATUM_834220
|
|||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
|
config ARM64_ERRATUM_1742098
|
||||||
|
bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
|
||||||
|
depends on COMPAT
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This option removes the AES hwcap for aarch32 user-space to
|
||||||
|
workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
|
||||||
|
|
||||||
|
Affected parts may corrupt the AES state if an interrupt is
|
||||||
|
taken between a pair of AES instructions. These instructions
|
||||||
|
are only present if the cryptography extensions are present.
|
||||||
|
All software should have a fallback implementation for CPUs
|
||||||
|
that don't implement the cryptography extensions.
|
||||||
|
|
||||||
|
If unsure, say Y.
|
||||||
|
|
||||||
config ARM64_ERRATUM_845719
|
config ARM64_ERRATUM_845719
|
||||||
bool "Cortex-A53: 845719: a load might read incorrect data"
|
bool "Cortex-A53: 845719: a load might read incorrect data"
|
||||||
depends on COMPAT
|
depends on COMPAT
|
||||||
|
|||||||
@@ -40,7 +40,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte);
|
|||||||
void mte_copy_page_tags(void *kto, const void *kfrom);
|
void mte_copy_page_tags(void *kto, const void *kfrom);
|
||||||
void mte_thread_init_user(void);
|
void mte_thread_init_user(void);
|
||||||
void mte_thread_switch(struct task_struct *next);
|
void mte_thread_switch(struct task_struct *next);
|
||||||
|
void mte_cpu_setup(void);
|
||||||
void mte_suspend_enter(void);
|
void mte_suspend_enter(void);
|
||||||
|
void mte_suspend_exit(void);
|
||||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
|
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
|
||||||
long get_mte_ctrl(struct task_struct *task);
|
long get_mte_ctrl(struct task_struct *task);
|
||||||
int mte_ptrace_copy_tags(struct task_struct *child, long request,
|
int mte_ptrace_copy_tags(struct task_struct *child, long request,
|
||||||
@@ -69,6 +71,9 @@ static inline void mte_thread_switch(struct task_struct *next)
|
|||||||
static inline void mte_suspend_enter(void)
|
static inline void mte_suspend_enter(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
static inline void mte_suspend_exit(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -273,6 +273,8 @@
|
|||||||
#define TCR_NFD1 (UL(1) << 54)
|
#define TCR_NFD1 (UL(1) << 54)
|
||||||
#define TCR_E0PD0 (UL(1) << 55)
|
#define TCR_E0PD0 (UL(1) << 55)
|
||||||
#define TCR_E0PD1 (UL(1) << 56)
|
#define TCR_E0PD1 (UL(1) << 56)
|
||||||
|
#define TCR_TCMA0 (UL(1) << 57)
|
||||||
|
#define TCR_TCMA1 (UL(1) << 58)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TTBR.
|
* TTBR.
|
||||||
|
|||||||
@@ -1191,10 +1191,6 @@
|
|||||||
#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
|
#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
|
||||||
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
|
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
|
||||||
|
|
||||||
/* TCR EL1 Bit Definitions */
|
|
||||||
#define SYS_TCR_EL1_TCMA1 (BIT(58))
|
|
||||||
#define SYS_TCR_EL1_TCMA0 (BIT(57))
|
|
||||||
|
|
||||||
/* GCR_EL1 Definitions */
|
/* GCR_EL1 Definitions */
|
||||||
#define SYS_GCR_EL1_RRND (BIT(16))
|
#define SYS_GCR_EL1_RRND (BIT(16))
|
||||||
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
|
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
|
||||||
|
|||||||
@@ -391,6 +391,14 @@ static struct midr_range trbe_write_out_of_range_cpus[] = {
|
|||||||
};
|
};
|
||||||
#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
|
#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||||
|
static struct midr_range broken_aarch32_aes[] = {
|
||||||
|
MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||||
{
|
{
|
||||||
@@ -628,6 +636,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||||||
/* Cortex-A510 r0p0-r1p1 */
|
/* Cortex-A510 r0p0-r1p1 */
|
||||||
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
||||||
},
|
},
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||||
|
{
|
||||||
|
.desc = "ARM erratum 1742098",
|
||||||
|
.capability = ARM64_WORKAROUND_1742098,
|
||||||
|
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
|
||||||
|
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,6 +79,7 @@
|
|||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/cpu_ops.h>
|
#include <asm/cpu_ops.h>
|
||||||
#include <asm/fpsimd.h>
|
#include <asm/fpsimd.h>
|
||||||
|
#include <asm/hwcap.h>
|
||||||
#include <asm/insn.h>
|
#include <asm/insn.h>
|
||||||
#include <asm/kvm_host.h>
|
#include <asm/kvm_host.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
@@ -1968,7 +1969,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
|
|||||||
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||||
{
|
{
|
||||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
|
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
|
||||||
isb();
|
|
||||||
|
mte_cpu_setup();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear the tags in the zero page. This needs to be done via the
|
* Clear the tags in the zero page. This needs to be done via the
|
||||||
@@ -1981,6 +1983,14 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_ARM64_MTE */
|
#endif /* CONFIG_ARM64_MTE */
|
||||||
|
|
||||||
|
static void elf_hwcap_fixup(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||||
|
if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
|
||||||
|
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
|
||||||
|
#endif /* ARM64_ERRATUM_1742098 */
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM
|
#ifdef CONFIG_KVM
|
||||||
static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
|
static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||||
{
|
{
|
||||||
@@ -3155,8 +3165,10 @@ void __init setup_cpu_features(void)
|
|||||||
setup_system_capabilities();
|
setup_system_capabilities();
|
||||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||||
|
|
||||||
if (system_supports_32bit_el0())
|
if (system_supports_32bit_el0()) {
|
||||||
setup_elf_hwcaps(compat_elf_hwcaps);
|
setup_elf_hwcaps(compat_elf_hwcaps);
|
||||||
|
elf_hwcap_fixup();
|
||||||
|
}
|
||||||
|
|
||||||
if (system_uses_ttbr0_pan())
|
if (system_uses_ttbr0_pan())
|
||||||
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
||||||
@@ -3209,6 +3221,7 @@ static int enable_mismatched_32bit_el0(unsigned int cpu)
|
|||||||
cpu_active_mask);
|
cpu_active_mask);
|
||||||
get_cpu_device(lucky_winner)->offline_disabled = true;
|
get_cpu_device(lucky_winner)->offline_disabled = true;
|
||||||
setup_elf_hwcaps(compat_elf_hwcaps);
|
setup_elf_hwcaps(compat_elf_hwcaps);
|
||||||
|
elf_hwcap_fixup();
|
||||||
pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
|
pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
|
||||||
cpu, lucky_winner);
|
cpu, lucky_winner);
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -283,6 +283,49 @@ void mte_thread_switch(struct task_struct *next)
|
|||||||
mte_check_tfsr_el1();
|
mte_check_tfsr_el1();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mte_cpu_setup(void)
|
||||||
|
{
|
||||||
|
u64 rgsr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CnP must be enabled only after the MAIR_EL1 register has been set
|
||||||
|
* up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
|
||||||
|
* lead to the wrong memory type being used for a brief window during
|
||||||
|
* CPU power-up.
|
||||||
|
*
|
||||||
|
* CnP is not a boot feature so MTE gets enabled before CnP, but let's
|
||||||
|
* make sure that is the case.
|
||||||
|
*/
|
||||||
|
BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
|
||||||
|
BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
|
||||||
|
|
||||||
|
/* Normal Tagged memory type at the corresponding MAIR index */
|
||||||
|
sysreg_clear_set(mair_el1,
|
||||||
|
MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
|
||||||
|
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
|
||||||
|
MT_NORMAL_TAGGED));
|
||||||
|
|
||||||
|
write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
|
||||||
|
* RGSR_EL1.SEED must be non-zero for IRG to produce
|
||||||
|
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
|
||||||
|
* must initialize it.
|
||||||
|
*/
|
||||||
|
rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
|
||||||
|
SYS_RGSR_EL1_SEED_SHIFT;
|
||||||
|
if (rgsr == 0)
|
||||||
|
rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
|
||||||
|
write_sysreg_s(rgsr, SYS_RGSR_EL1);
|
||||||
|
|
||||||
|
/* clear any pending tag check faults in TFSR*_EL1 */
|
||||||
|
write_sysreg_s(0, SYS_TFSR_EL1);
|
||||||
|
write_sysreg_s(0, SYS_TFSRE0_EL1);
|
||||||
|
|
||||||
|
local_flush_tlb_all();
|
||||||
|
}
|
||||||
|
|
||||||
void mte_suspend_enter(void)
|
void mte_suspend_enter(void)
|
||||||
{
|
{
|
||||||
if (!system_supports_mte())
|
if (!system_supports_mte())
|
||||||
@@ -299,6 +342,14 @@ void mte_suspend_enter(void)
|
|||||||
mte_check_tfsr_el1();
|
mte_check_tfsr_el1();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mte_suspend_exit(void)
|
||||||
|
{
|
||||||
|
if (!system_supports_mte())
|
||||||
|
return;
|
||||||
|
|
||||||
|
mte_cpu_setup();
|
||||||
|
}
|
||||||
|
|
||||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||||
{
|
{
|
||||||
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
|
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
|
||||||
|
|||||||
@@ -43,6 +43,8 @@ void notrace __cpu_suspend_exit(void)
|
|||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
mte_suspend_exit();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
||||||
* We must uninstall the idmap and restore the expected MMU
|
* We must uninstall the idmap and restore the expected MMU
|
||||||
|
|||||||
@@ -46,18 +46,20 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KASAN_HW_TAGS
|
#ifdef CONFIG_KASAN_HW_TAGS
|
||||||
#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
|
#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
|
||||||
#else
|
#elif defined(CONFIG_ARM64_MTE)
|
||||||
/*
|
/*
|
||||||
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
|
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
|
||||||
* TBI being enabled at EL1.
|
* TBI being enabled at EL1.
|
||||||
*/
|
*/
|
||||||
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
|
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
|
||||||
|
#else
|
||||||
|
#define TCR_MTE_FLAGS 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
|
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
|
||||||
* changed during __cpu_setup to Normal Tagged if the system supports MTE.
|
* changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
|
||||||
*/
|
*/
|
||||||
#define MAIR_EL1_SET \
|
#define MAIR_EL1_SET \
|
||||||
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
|
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
|
||||||
@@ -422,46 +424,8 @@ SYM_FUNC_START(__cpu_setup)
|
|||||||
mov_q mair, MAIR_EL1_SET
|
mov_q mair, MAIR_EL1_SET
|
||||||
mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||||
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
||||||
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
|
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_MTE
|
|
||||||
/*
|
|
||||||
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
|
|
||||||
* (ID_AA64PFR1_EL1[11:8] > 1).
|
|
||||||
*/
|
|
||||||
mrs x10, ID_AA64PFR1_EL1
|
|
||||||
ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
|
|
||||||
cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2
|
|
||||||
b.lt 1f
|
|
||||||
|
|
||||||
/* Normal Tagged memory type at the corresponding MAIR index */
|
|
||||||
mov x10, #MAIR_ATTR_NORMAL_TAGGED
|
|
||||||
bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
|
|
||||||
|
|
||||||
mov x10, #KERNEL_GCR_EL1
|
|
||||||
msr_s SYS_GCR_EL1, x10
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
|
|
||||||
* RGSR_EL1.SEED must be non-zero for IRG to produce
|
|
||||||
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
|
|
||||||
* must initialize it.
|
|
||||||
*/
|
|
||||||
mrs x10, CNTVCT_EL0
|
|
||||||
ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
|
|
||||||
csinc x10, x10, xzr, ne
|
|
||||||
lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
|
|
||||||
msr_s SYS_RGSR_EL1, x10
|
|
||||||
|
|
||||||
/* clear any pending tag check faults in TFSR*_EL1 */
|
|
||||||
msr_s SYS_TFSR_EL1, xzr
|
|
||||||
msr_s SYS_TFSRE0_EL1, xzr
|
|
||||||
|
|
||||||
/* set the TCR_EL1 bits */
|
|
||||||
mov_q x10, TCR_MTE_FLAGS
|
|
||||||
orr tcr, tcr, x10
|
|
||||||
1:
|
|
||||||
#endif
|
|
||||||
tcr_clear_errata_bits tcr, x9, x5
|
tcr_clear_errata_bits tcr, x9, x5
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_VA_BITS_52
|
#ifdef CONFIG_ARM64_VA_BITS_52
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ WORKAROUND_1418040
|
|||||||
WORKAROUND_1463225
|
WORKAROUND_1463225
|
||||||
WORKAROUND_1508412
|
WORKAROUND_1508412
|
||||||
WORKAROUND_1542419
|
WORKAROUND_1542419
|
||||||
|
WORKAROUND_1742098
|
||||||
WORKAROUND_2457168
|
WORKAROUND_2457168
|
||||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||||
WORKAROUND_TSB_FLUSH_FAILURE
|
WORKAROUND_TSB_FLUSH_FAILURE
|
||||||
|
|||||||
@@ -1928,7 +1928,6 @@ config EFI
|
|||||||
config EFI_STUB
|
config EFI_STUB
|
||||||
bool "EFI stub support"
|
bool "EFI stub support"
|
||||||
depends on EFI && !X86_USE_3DNOW
|
depends on EFI && !X86_USE_3DNOW
|
||||||
depends on $(cc-option,-mabi=ms) || X86_32
|
|
||||||
select RELOCATABLE
|
select RELOCATABLE
|
||||||
help
|
help
|
||||||
This kernel feature allows a bzImage to be loaded directly
|
This kernel feature allows a bzImage to be loaded directly
|
||||||
|
|||||||
@@ -13,6 +13,8 @@
|
|||||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/bits.h>
|
||||||
|
#include <linux/limits.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
|
||||||
@@ -1348,11 +1350,37 @@ static void pt_addr_filters_fini(struct perf_event *event)
|
|||||||
event->hw.addr_filters = NULL;
|
event->hw.addr_filters = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool valid_kernel_ip(unsigned long ip)
|
#ifdef CONFIG_X86_64
|
||||||
|
static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||||
{
|
{
|
||||||
return virt_addr_valid(ip) && kernel_ip(ip);
|
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||||
|
{
|
||||||
|
return canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clamp to a canonical address greater-than-or-equal-to the address given */
|
||||||
|
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||||
|
{
|
||||||
|
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||||
|
vaddr :
|
||||||
|
-BIT_ULL(vaddr_bits - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clamp to a canonical address less-than-or-equal-to the address given */
|
||||||
|
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||||
|
{
|
||||||
|
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||||
|
vaddr :
|
||||||
|
BIT_ULL(vaddr_bits - 1) - 1;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define clamp_to_ge_canonical_addr(x, y) (x)
|
||||||
|
#define clamp_to_le_canonical_addr(x, y) (x)
|
||||||
|
#endif
|
||||||
|
|
||||||
static int pt_event_addr_filters_validate(struct list_head *filters)
|
static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||||
{
|
{
|
||||||
struct perf_addr_filter *filter;
|
struct perf_addr_filter *filter;
|
||||||
@@ -1367,14 +1395,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
|
|||||||
filter->action == PERF_ADDR_FILTER_ACTION_START)
|
filter->action == PERF_ADDR_FILTER_ACTION_START)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (!filter->path.dentry) {
|
|
||||||
if (!valid_kernel_ip(filter->offset))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!valid_kernel_ip(filter->offset + filter->size))
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
|
if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
@@ -1398,9 +1418,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
|
|||||||
if (filter->path.dentry && !fr[range].start) {
|
if (filter->path.dentry && !fr[range].start) {
|
||||||
msr_a = msr_b = 0;
|
msr_a = msr_b = 0;
|
||||||
} else {
|
} else {
|
||||||
/* apply the offset */
|
unsigned long n = fr[range].size - 1;
|
||||||
msr_a = fr[range].start;
|
unsigned long a = fr[range].start;
|
||||||
msr_b = msr_a + fr[range].size - 1;
|
unsigned long b;
|
||||||
|
|
||||||
|
if (a > ULONG_MAX - n)
|
||||||
|
b = ULONG_MAX;
|
||||||
|
else
|
||||||
|
b = a + n;
|
||||||
|
/*
|
||||||
|
* Apply the offset. 64-bit addresses written to the
|
||||||
|
* MSRs must be canonical, but the range can encompass
|
||||||
|
* non-canonical addresses. Since software cannot
|
||||||
|
* execute at non-canonical addresses, adjusting to
|
||||||
|
* canonical addresses does not affect the result of the
|
||||||
|
* address filter.
|
||||||
|
*/
|
||||||
|
msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
|
||||||
|
msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
|
||||||
|
if (msr_b < msr_a)
|
||||||
|
msr_a = msr_b = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
filters->filter[range].msr_a = msr_a;
|
filters->filter[range].msr_a = msr_a;
|
||||||
|
|||||||
@@ -17,8 +17,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
|
|||||||
{
|
{
|
||||||
u64 start = rmrr->base_address;
|
u64 start = rmrr->base_address;
|
||||||
u64 end = rmrr->end_address + 1;
|
u64 end = rmrr->end_address + 1;
|
||||||
|
int entry_type;
|
||||||
|
|
||||||
if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
|
entry_type = e820__get_entry_type(start, end);
|
||||||
|
if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
|
pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
|
||||||
|
|||||||
@@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||||
if (rev >= mc->hdr.patch_id)
|
|
||||||
|
/*
|
||||||
|
* Allow application of the same revision to pick up SMT-specific
|
||||||
|
* changes even if the revision of the other SMT thread is already
|
||||||
|
* up-to-date.
|
||||||
|
*/
|
||||||
|
if (rev > mc->hdr.patch_id)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!__apply_microcode_amd(mc)) {
|
if (!__apply_microcode_amd(mc)) {
|
||||||
@@ -522,8 +528,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
|||||||
|
|
||||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||||
|
|
||||||
/* Check whether we have saved a new patch already: */
|
/*
|
||||||
if (*new_rev && rev < mc->hdr.patch_id) {
|
* Check whether a new patch has been saved already. Also, allow application of
|
||||||
|
* the same revision in order to pick up SMT-thread-specific configuration even
|
||||||
|
* if the sibling SMT thread already has an up-to-date revision.
|
||||||
|
*/
|
||||||
|
if (*new_rev && rev <= mc->hdr.patch_id) {
|
||||||
if (!__apply_microcode_amd(mc)) {
|
if (!__apply_microcode_amd(mc)) {
|
||||||
*new_rev = mc->hdr.patch_id;
|
*new_rev = mc->hdr.patch_id;
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
|
|||||||
.rid = RDT_RESOURCE_L3,
|
.rid = RDT_RESOURCE_L3,
|
||||||
.name = "L3",
|
.name = "L3",
|
||||||
.cache_level = 3,
|
.cache_level = 3,
|
||||||
.cache = {
|
|
||||||
.min_cbm_bits = 1,
|
|
||||||
},
|
|
||||||
.domains = domain_init(RDT_RESOURCE_L3),
|
.domains = domain_init(RDT_RESOURCE_L3),
|
||||||
.parse_ctrlval = parse_cbm,
|
.parse_ctrlval = parse_cbm,
|
||||||
.format_str = "%d=%0*x",
|
.format_str = "%d=%0*x",
|
||||||
@@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
|
|||||||
.rid = RDT_RESOURCE_L2,
|
.rid = RDT_RESOURCE_L2,
|
||||||
.name = "L2",
|
.name = "L2",
|
||||||
.cache_level = 2,
|
.cache_level = 2,
|
||||||
.cache = {
|
|
||||||
.min_cbm_bits = 1,
|
|
||||||
},
|
|
||||||
.domains = domain_init(RDT_RESOURCE_L2),
|
.domains = domain_init(RDT_RESOURCE_L2),
|
||||||
.parse_ctrlval = parse_cbm,
|
.parse_ctrlval = parse_cbm,
|
||||||
.format_str = "%d=%0*x",
|
.format_str = "%d=%0*x",
|
||||||
@@ -877,6 +871,7 @@ static __init void rdt_init_res_defs_intel(void)
|
|||||||
r->cache.arch_has_sparse_bitmaps = false;
|
r->cache.arch_has_sparse_bitmaps = false;
|
||||||
r->cache.arch_has_empty_bitmaps = false;
|
r->cache.arch_has_empty_bitmaps = false;
|
||||||
r->cache.arch_has_per_cpu_cfg = false;
|
r->cache.arch_has_per_cpu_cfg = false;
|
||||||
|
r->cache.min_cbm_bits = 1;
|
||||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||||
hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
||||||
hw_res->msr_update = mba_wrmsr_intel;
|
hw_res->msr_update = mba_wrmsr_intel;
|
||||||
@@ -897,6 +892,7 @@ static __init void rdt_init_res_defs_amd(void)
|
|||||||
r->cache.arch_has_sparse_bitmaps = true;
|
r->cache.arch_has_sparse_bitmaps = true;
|
||||||
r->cache.arch_has_empty_bitmaps = true;
|
r->cache.arch_has_empty_bitmaps = true;
|
||||||
r->cache.arch_has_per_cpu_cfg = true;
|
r->cache.arch_has_per_cpu_cfg = true;
|
||||||
|
r->cache.min_cbm_bits = 0;
|
||||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||||
hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
|
hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
|
||||||
hw_res->msr_update = mba_wrmsr_amd;
|
hw_res->msr_update = mba_wrmsr_amd;
|
||||||
|
|||||||
@@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||||||
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
|
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
|
||||||
unsigned int core_select_mask, core_level_siblings;
|
unsigned int core_select_mask, core_level_siblings;
|
||||||
unsigned int die_select_mask, die_level_siblings;
|
unsigned int die_select_mask, die_level_siblings;
|
||||||
|
unsigned int pkg_mask_width;
|
||||||
bool die_level_present = false;
|
bool die_level_present = false;
|
||||||
int leaf;
|
int leaf;
|
||||||
|
|
||||||
@@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||||||
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||||
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||||
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||||
|
|
||||||
sub_index = 1;
|
sub_index = 1;
|
||||||
do {
|
while (true) {
|
||||||
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
|
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||||
}
|
}
|
||||||
|
|
||||||
sub_index++;
|
if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
|
||||||
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
|
pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
|
||||||
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
|
sub_index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
|
||||||
die_select_mask = (~(-1 << die_plus_mask_width)) >>
|
die_select_mask = (~(-1 << die_plus_mask_width)) >>
|
||||||
core_plus_mask_width;
|
core_plus_mask_width;
|
||||||
|
|
||||||
@@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
|
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
|
||||||
die_plus_mask_width);
|
pkg_mask_width);
|
||||||
/*
|
/*
|
||||||
* Reinit the apicid, now that we have extended initial_apicid.
|
* Reinit the apicid, now that we have extended initial_apicid.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -12,6 +12,7 @@
|
|||||||
#include <linux/ratelimit.h>
|
#include <linux/ratelimit.h>
|
||||||
#include <linux/edac.h>
|
#include <linux/edac.h>
|
||||||
#include <linux/ras.h>
|
#include <linux/ras.h>
|
||||||
|
#include <acpi/ghes.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
|
|
||||||
@@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
|||||||
int cpu = mce->extcpu;
|
int cpu = mce->extcpu;
|
||||||
struct acpi_hest_generic_status *estatus, *tmp;
|
struct acpi_hest_generic_status *estatus, *tmp;
|
||||||
struct acpi_hest_generic_data *gdata;
|
struct acpi_hest_generic_data *gdata;
|
||||||
const guid_t *fru_id = &guid_null;
|
const guid_t *fru_id;
|
||||||
char *fru_text = "";
|
char *fru_text;
|
||||||
guid_t *sec_type;
|
guid_t *sec_type;
|
||||||
static u32 err_seq;
|
static u32 err_seq;
|
||||||
|
|
||||||
@@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
|||||||
|
|
||||||
/* log event via trace */
|
/* log event via trace */
|
||||||
err_seq++;
|
err_seq++;
|
||||||
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
|
apei_estatus_for_each_section(tmp, gdata) {
|
||||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
|
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
|
||||||
fru_id = (guid_t *)gdata->fru_id;
|
fru_id = (guid_t *)gdata->fru_id;
|
||||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
|
else
|
||||||
fru_text = gdata->fru_text;
|
fru_id = &guid_null;
|
||||||
sec_type = (guid_t *)gdata->section_type;
|
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
|
||||||
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
|
fru_text = gdata->fru_text;
|
||||||
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
|
else
|
||||||
if (gdata->error_data_length >= sizeof(*mem))
|
fru_text = "";
|
||||||
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
|
sec_type = (guid_t *)gdata->section_type;
|
||||||
(u8)gdata->error_severity);
|
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
|
||||||
|
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
|
||||||
|
|
||||||
|
if (gdata->error_data_length >= sizeof(*mem))
|
||||||
|
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
|
||||||
|
(u8)gdata->error_severity);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|||||||
@@ -500,6 +500,70 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||||||
DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
|
DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
/*
|
||||||
|
* More Tongfang devices with the same issue as the Clevo NL5xRU and
|
||||||
|
* NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above.
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GKxNRxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GKxNRxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GKxNRxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GKxNRxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GKxNRxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GMxNGxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GMxZGxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.callback = video_detect_force_native,
|
||||||
|
.ident = "TongFang GMxRGxx",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
|
||||||
|
},
|
||||||
|
},
|
||||||
/*
|
/*
|
||||||
* Desktops which falsely report a backlight and which our heuristics
|
* Desktops which falsely report a backlight and which our heuristics
|
||||||
* for this do not catch.
|
* for this do not catch.
|
||||||
|
|||||||
@@ -254,7 +254,7 @@ enum {
|
|||||||
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
|
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
|
||||||
|
|
||||||
/* em constants */
|
/* em constants */
|
||||||
EM_MAX_SLOTS = 8,
|
EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
|
||||||
EM_MAX_RETRY = 5,
|
EM_MAX_RETRY = 5,
|
||||||
|
|
||||||
/* em_ctl bits */
|
/* em_ctl bits */
|
||||||
|
|||||||
@@ -1230,4 +1230,4 @@ module_platform_driver(imx_ahci_driver);
|
|||||||
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
|
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
|
||||||
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
|
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_ALIAS("ahci:imx");
|
MODULE_ALIAS("platform:" DRV_NAME);
|
||||||
|
|||||||
@@ -215,6 +215,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
|||||||
int speed = 0, pvs = 0, pvs_ver = 0;
|
int speed = 0, pvs = 0, pvs_ver = 0;
|
||||||
u8 *speedbin;
|
u8 *speedbin;
|
||||||
size_t len;
|
size_t len;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||||
|
|
||||||
@@ -232,7 +233,8 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
|
dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
|
||||||
return -ENODEV;
|
ret = -ENODEV;
|
||||||
|
goto len_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
|
snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
|
||||||
@@ -240,8 +242,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
|||||||
|
|
||||||
drv->versions = (1 << speed);
|
drv->versions = (1 << speed);
|
||||||
|
|
||||||
|
len_error:
|
||||||
kfree(speedbin);
|
kfree(speedbin);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct qcom_cpufreq_match_data match_data_kryo = {
|
static const struct qcom_cpufreq_match_data match_data_kryo = {
|
||||||
@@ -264,7 +267,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
|||||||
struct nvmem_cell *speedbin_nvmem;
|
struct nvmem_cell *speedbin_nvmem;
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
struct device *cpu_dev;
|
struct device *cpu_dev;
|
||||||
char *pvs_name = "speedXX-pvsXX-vXX";
|
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
|
||||||
|
char *pvs_name = pvs_name_buffer;
|
||||||
unsigned cpu;
|
unsigned cpu;
|
||||||
const struct of_device_id *match;
|
const struct of_device_id *match;
|
||||||
int ret;
|
int ret;
|
||||||
|
|||||||
@@ -1507,11 +1507,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
|
|||||||
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
|
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
|
||||||
|
|
||||||
if (!amdgpu_sriov_vf(adev)) {
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
ring = &adev->sdma.instance[i].ring;
|
|
||||||
adev->nbio.funcs->sdma_doorbell_range(adev, i,
|
|
||||||
ring->use_doorbell, ring->doorbell_index,
|
|
||||||
adev->doorbell_index.sdma_doorbell_range);
|
|
||||||
|
|
||||||
/* unhalt engine */
|
/* unhalt engine */
|
||||||
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
|
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
|
||||||
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
|
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
|
||||||
|
|||||||
@@ -1416,6 +1416,20 @@ static int soc15_common_sw_fini(void *handle)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* sdma doorbell range is programed by hypervisor */
|
||||||
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
adev->nbio.funcs->sdma_doorbell_range(adev, i,
|
||||||
|
true, adev->doorbell_index.sdma_engine[i] << 1,
|
||||||
|
adev->doorbell_index.sdma_doorbell_range);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int soc15_common_hw_init(void *handle)
|
static int soc15_common_hw_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
@@ -1435,6 +1449,13 @@ static int soc15_common_hw_init(void *handle)
|
|||||||
|
|
||||||
/* enable the doorbell aperture */
|
/* enable the doorbell aperture */
|
||||||
soc15_enable_doorbell_aperture(adev, true);
|
soc15_enable_doorbell_aperture(adev, true);
|
||||||
|
/* HW doorbell routing policy: doorbell writing not
|
||||||
|
* in SDMA/IH/MM/ACV range will be routed to CP. So
|
||||||
|
* we need to init SDMA doorbell range prior
|
||||||
|
* to CP ip block init and ring test. IH already
|
||||||
|
* happens before CP.
|
||||||
|
*/
|
||||||
|
soc15_sdma_doorbell_range_init(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -397,6 +397,7 @@ module_init(vc4_drm_register);
|
|||||||
module_exit(vc4_drm_unregister);
|
module_exit(vc4_drm_unregister);
|
||||||
|
|
||||||
MODULE_ALIAS("platform:vc4-drm");
|
MODULE_ALIAS("platform:vc4-drm");
|
||||||
|
MODULE_SOFTDEP("pre: snd-soc-hdmi-codec");
|
||||||
MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
|
MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
|
||||||
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
|
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|||||||
@@ -478,7 +478,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
|
|||||||
magicmouse_raw_event(hdev, report, data + 2, data[1]);
|
magicmouse_raw_event(hdev, report, data + 2, data[1]);
|
||||||
magicmouse_raw_event(hdev, report, data + 2 + data[1],
|
magicmouse_raw_event(hdev, report, data + 2 + data[1],
|
||||||
size - 2 - data[1]);
|
size - 2 - data[1]);
|
||||||
break;
|
return 0;
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
|
|||||||
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
|
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
|
||||||
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
|
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
|
||||||
|
|
||||||
#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
|
|
||||||
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
#define for_each_sibling(i, cpu) \
|
#define for_each_sibling(i, cpu) \
|
||||||
for_each_cpu(i, topology_sibling_cpumask(cpu))
|
for_each_cpu(i, topology_sibling_cpumask(cpu))
|
||||||
@@ -91,6 +88,8 @@ struct temp_data {
|
|||||||
struct platform_data {
|
struct platform_data {
|
||||||
struct device *hwmon_dev;
|
struct device *hwmon_dev;
|
||||||
u16 pkg_id;
|
u16 pkg_id;
|
||||||
|
u16 cpu_map[NUM_REAL_CORES];
|
||||||
|
struct ida ida;
|
||||||
struct cpumask cpumask;
|
struct cpumask cpumask;
|
||||||
struct temp_data *core_data[MAX_CORE_DATA];
|
struct temp_data *core_data[MAX_CORE_DATA];
|
||||||
struct device_attribute name_attr;
|
struct device_attribute name_attr;
|
||||||
@@ -441,7 +440,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
|
|||||||
MSR_IA32_THERM_STATUS;
|
MSR_IA32_THERM_STATUS;
|
||||||
tdata->is_pkg_data = pkg_flag;
|
tdata->is_pkg_data = pkg_flag;
|
||||||
tdata->cpu = cpu;
|
tdata->cpu = cpu;
|
||||||
tdata->cpu_core_id = TO_CORE_ID(cpu);
|
tdata->cpu_core_id = topology_core_id(cpu);
|
||||||
tdata->attr_size = MAX_CORE_ATTRS;
|
tdata->attr_size = MAX_CORE_ATTRS;
|
||||||
mutex_init(&tdata->update_lock);
|
mutex_init(&tdata->update_lock);
|
||||||
return tdata;
|
return tdata;
|
||||||
@@ -454,7 +453,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
|
|||||||
struct platform_data *pdata = platform_get_drvdata(pdev);
|
struct platform_data *pdata = platform_get_drvdata(pdev);
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
u32 eax, edx;
|
u32 eax, edx;
|
||||||
int err, attr_no;
|
int err, index, attr_no;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find attr number for sysfs:
|
* Find attr number for sysfs:
|
||||||
@@ -462,14 +461,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
|
|||||||
* The attr number is always core id + 2
|
* The attr number is always core id + 2
|
||||||
* The Pkgtemp will always show up as temp1_*, if available
|
* The Pkgtemp will always show up as temp1_*, if available
|
||||||
*/
|
*/
|
||||||
attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
|
if (pkg_flag) {
|
||||||
|
attr_no = PKG_SYSFS_ATTR_NO;
|
||||||
|
} else {
|
||||||
|
index = ida_alloc(&pdata->ida, GFP_KERNEL);
|
||||||
|
if (index < 0)
|
||||||
|
return index;
|
||||||
|
pdata->cpu_map[index] = topology_core_id(cpu);
|
||||||
|
attr_no = index + BASE_SYSFS_ATTR_NO;
|
||||||
|
}
|
||||||
|
|
||||||
if (attr_no > MAX_CORE_DATA - 1)
|
if (attr_no > MAX_CORE_DATA - 1) {
|
||||||
return -ERANGE;
|
err = -ERANGE;
|
||||||
|
goto ida_free;
|
||||||
|
}
|
||||||
|
|
||||||
tdata = init_temp_data(cpu, pkg_flag);
|
tdata = init_temp_data(cpu, pkg_flag);
|
||||||
if (!tdata)
|
if (!tdata) {
|
||||||
return -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
goto ida_free;
|
||||||
|
}
|
||||||
|
|
||||||
/* Test if we can access the status register */
|
/* Test if we can access the status register */
|
||||||
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
|
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
|
||||||
@@ -505,6 +516,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
|
|||||||
exit_free:
|
exit_free:
|
||||||
pdata->core_data[attr_no] = NULL;
|
pdata->core_data[attr_no] = NULL;
|
||||||
kfree(tdata);
|
kfree(tdata);
|
||||||
|
ida_free:
|
||||||
|
if (!pkg_flag)
|
||||||
|
ida_free(&pdata->ida, index);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -524,6 +538,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
|
|||||||
|
|
||||||
kfree(pdata->core_data[indx]);
|
kfree(pdata->core_data[indx]);
|
||||||
pdata->core_data[indx] = NULL;
|
pdata->core_data[indx] = NULL;
|
||||||
|
|
||||||
|
if (indx >= BASE_SYSFS_ATTR_NO)
|
||||||
|
ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int coretemp_probe(struct platform_device *pdev)
|
static int coretemp_probe(struct platform_device *pdev)
|
||||||
@@ -537,6 +554,7 @@ static int coretemp_probe(struct platform_device *pdev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
pdata->pkg_id = pdev->id;
|
pdata->pkg_id = pdev->id;
|
||||||
|
ida_init(&pdata->ida);
|
||||||
platform_set_drvdata(pdev, pdata);
|
platform_set_drvdata(pdev, pdata);
|
||||||
|
|
||||||
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
|
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
|
||||||
@@ -553,6 +571,7 @@ static int coretemp_remove(struct platform_device *pdev)
|
|||||||
if (pdata->core_data[i])
|
if (pdata->core_data[i])
|
||||||
coretemp_remove_core(pdata, i);
|
coretemp_remove_core(pdata, i);
|
||||||
|
|
||||||
|
ida_destroy(&pdata->ida);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -647,7 +666,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
|
|||||||
struct platform_device *pdev = coretemp_get_pdev(cpu);
|
struct platform_device *pdev = coretemp_get_pdev(cpu);
|
||||||
struct platform_data *pd;
|
struct platform_data *pd;
|
||||||
struct temp_data *tdata;
|
struct temp_data *tdata;
|
||||||
int indx, target;
|
int i, indx = -1, target;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't execute this on suspend as the device remove locks
|
* Don't execute this on suspend as the device remove locks
|
||||||
@@ -660,12 +679,19 @@ static int coretemp_cpu_offline(unsigned int cpu)
|
|||||||
if (!pdev)
|
if (!pdev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* The core id is too big, just return */
|
pd = platform_get_drvdata(pdev);
|
||||||
indx = TO_ATTR_NO(cpu);
|
|
||||||
if (indx > MAX_CORE_DATA - 1)
|
for (i = 0; i < NUM_REAL_CORES; i++) {
|
||||||
|
if (pd->cpu_map[i] == topology_core_id(cpu)) {
|
||||||
|
indx = i + BASE_SYSFS_ATTR_NO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Too many cores and this core is not populated, just return */
|
||||||
|
if (indx < 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pd = platform_get_drvdata(pdev);
|
|
||||||
tdata = pd->core_data[indx];
|
tdata = pd->core_data[indx];
|
||||||
|
|
||||||
cpumask_clear_cpu(cpu, &pd->cpumask);
|
cpumask_clear_cpu(cpu, &pd->cpumask);
|
||||||
|
|||||||
@@ -638,6 +638,11 @@ static int cci_probe(struct platform_device *pdev)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
||||||
|
pm_runtime_use_autosuspend(dev);
|
||||||
|
pm_runtime_set_active(dev);
|
||||||
|
pm_runtime_enable(dev);
|
||||||
|
|
||||||
for (i = 0; i < cci->data->num_masters; i++) {
|
for (i = 0; i < cci->data->num_masters; i++) {
|
||||||
if (!cci->master[i].cci)
|
if (!cci->master[i].cci)
|
||||||
continue;
|
continue;
|
||||||
@@ -649,14 +654,12 @@ static int cci_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
|
||||||
pm_runtime_use_autosuspend(dev);
|
|
||||||
pm_runtime_set_active(dev);
|
|
||||||
pm_runtime_enable(dev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_i2c:
|
error_i2c:
|
||||||
|
pm_runtime_disable(dev);
|
||||||
|
pm_runtime_dont_use_autosuspend(dev);
|
||||||
|
|
||||||
for (--i ; i >= 0; i--) {
|
for (--i ; i >= 0; i--) {
|
||||||
if (cci->master[i].cci) {
|
if (cci->master[i].cci) {
|
||||||
i2c_del_adapter(&cci->master[i].adap);
|
i2c_del_adapter(&cci->master[i].adap);
|
||||||
|
|||||||
@@ -2761,6 +2761,7 @@ static int __init si_domain_init(int hw)
|
|||||||
|
|
||||||
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
|
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
|
||||||
domain_exit(si_domain);
|
domain_exit(si_domain);
|
||||||
|
si_domain = NULL;
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3397,6 +3398,10 @@ free_iommu:
|
|||||||
disable_dmar_iommu(iommu);
|
disable_dmar_iommu(iommu);
|
||||||
free_dmar_iommu(iommu);
|
free_dmar_iommu(iommu);
|
||||||
}
|
}
|
||||||
|
if (si_domain) {
|
||||||
|
domain_exit(si_domain);
|
||||||
|
si_domain = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
kfree(g_iommus);
|
kfree(g_iommus);
|
||||||
|
|
||||||
|
|||||||
@@ -1797,7 +1797,6 @@ static struct mapped_device *alloc_dev(int minor)
|
|||||||
md->disk->first_minor = minor;
|
md->disk->first_minor = minor;
|
||||||
md->disk->minors = 1;
|
md->disk->minors = 1;
|
||||||
md->disk->fops = &dm_blk_dops;
|
md->disk->fops = &dm_blk_dops;
|
||||||
md->disk->queue = md->queue;
|
|
||||||
md->disk->private_data = md;
|
md->disk->private_data = md;
|
||||||
sprintf(md->disk->disk_name, "dm-%d", minor);
|
sprintf(md->disk->disk_name, "dm-%d", minor);
|
||||||
|
|
||||||
|
|||||||
@@ -158,6 +158,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
|
|||||||
else
|
else
|
||||||
return NULL;
|
return NULL;
|
||||||
fmt = find_format(inst, pixmp->pixelformat, f->type);
|
fmt = find_format(inst, pixmp->pixelformat, f->type);
|
||||||
|
if (!fmt)
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pixmp->width = clamp(pixmp->width, frame_width_min(inst),
|
pixmp->width = clamp(pixmp->width, frame_width_min(inst),
|
||||||
|
|||||||
@@ -1077,7 +1077,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
|
|||||||
struct mceusb_dev *ir = dev->priv;
|
struct mceusb_dev *ir = dev->priv;
|
||||||
unsigned int units;
|
unsigned int units;
|
||||||
|
|
||||||
units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT);
|
units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT);
|
||||||
|
|
||||||
cmdbuf[2] = units >> 8;
|
cmdbuf[2] = units >> 8;
|
||||||
cmdbuf[3] = units;
|
cmdbuf[3] = units;
|
||||||
|
|||||||
@@ -762,7 +762,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
|
|||||||
*/
|
*/
|
||||||
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
|
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
|
||||||
clk_set_rate(pltfm_host->clk, host_clk);
|
clk_set_rate(pltfm_host->clk, host_clk);
|
||||||
tegra_host->curr_clk_rate = host_clk;
|
tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
|
||||||
if (tegra_host->ddr_signaling)
|
if (tegra_host->ddr_signaling)
|
||||||
host->max_clk = host_clk;
|
host->max_clk = host_clk;
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
|
|||||||
hdev->cls_dev.release = hnae_release;
|
hdev->cls_dev.release = hnae_release;
|
||||||
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
|
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
|
||||||
ret = device_register(&hdev->cls_dev);
|
ret = device_register(&hdev->cls_dev);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
put_device(&hdev->cls_dev);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
__module_get(THIS_MODULE);
|
__module_get(THIS_MODULE);
|
||||||
|
|
||||||
|
|||||||
@@ -2081,9 +2081,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
|
|||||||
*/
|
*/
|
||||||
rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
|
rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
|
||||||
err = i40e_setup_rx_descriptors(&rx_rings[i]);
|
err = i40e_setup_rx_descriptors(&rx_rings[i]);
|
||||||
if (err)
|
|
||||||
goto rx_unwind;
|
|
||||||
err = i40e_alloc_rx_bi(&rx_rings[i]);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto rx_unwind;
|
goto rx_unwind;
|
||||||
|
|
||||||
|
|||||||
@@ -3421,12 +3421,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
|||||||
if (ring->vsi->type == I40E_VSI_MAIN)
|
if (ring->vsi->type == I40E_VSI_MAIN)
|
||||||
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||||||
|
|
||||||
kfree(ring->rx_bi);
|
|
||||||
ring->xsk_pool = i40e_xsk_pool(ring);
|
ring->xsk_pool = i40e_xsk_pool(ring);
|
||||||
if (ring->xsk_pool) {
|
if (ring->xsk_pool) {
|
||||||
ret = i40e_alloc_rx_bi_zc(ring);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
ring->rx_buf_len =
|
ring->rx_buf_len =
|
||||||
xsk_pool_get_rx_frame_size(ring->xsk_pool);
|
xsk_pool_get_rx_frame_size(ring->xsk_pool);
|
||||||
/* For AF_XDP ZC, we disallow packets to span on
|
/* For AF_XDP ZC, we disallow packets to span on
|
||||||
@@ -3444,9 +3440,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
|||||||
ring->queue_index);
|
ring->queue_index);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
ret = i40e_alloc_rx_bi(ring);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
ring->rx_buf_len = vsi->rx_buf_len;
|
ring->rx_buf_len = vsi->rx_buf_len;
|
||||||
if (ring->vsi->type == I40E_VSI_MAIN) {
|
if (ring->vsi->type == I40E_VSI_MAIN) {
|
||||||
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
||||||
@@ -13161,6 +13154,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
|
|||||||
i40e_reset_and_rebuild(pf, true, true);
|
i40e_reset_and_rebuild(pf, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!i40e_enabled_xdp_vsi(vsi) && prog) {
|
||||||
|
if (i40e_realloc_rx_bi_zc(vsi, true))
|
||||||
|
return -ENOMEM;
|
||||||
|
} else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
|
||||||
|
if (i40e_realloc_rx_bi_zc(vsi, false))
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < vsi->num_queue_pairs; i++)
|
for (i = 0; i < vsi->num_queue_pairs; i++)
|
||||||
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
|
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
|
||||||
|
|
||||||
@@ -13393,6 +13394,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
|
|||||||
|
|
||||||
i40e_queue_pair_disable_irq(vsi, queue_pair);
|
i40e_queue_pair_disable_irq(vsi, queue_pair);
|
||||||
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
|
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
|
||||||
|
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
|
||||||
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
|
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
|
||||||
i40e_queue_pair_clean_rings(vsi, queue_pair);
|
i40e_queue_pair_clean_rings(vsi, queue_pair);
|
||||||
i40e_queue_pair_reset_stats(vsi, queue_pair);
|
i40e_queue_pair_reset_stats(vsi, queue_pair);
|
||||||
|
|||||||
@@ -1459,14 +1459,6 @@ err:
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
|
|
||||||
{
|
|
||||||
unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
|
|
||||||
|
|
||||||
rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
|
|
||||||
return rx_ring->rx_bi ? 0 : -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
|
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
|
||||||
{
|
{
|
||||||
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
|
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
|
||||||
@@ -1597,6 +1589,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
|
|||||||
|
|
||||||
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
|
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
|
||||||
|
|
||||||
|
rx_ring->rx_bi =
|
||||||
|
kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
|
||||||
|
if (!rx_ring->rx_bi)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -466,7 +466,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
|
|||||||
bool __i40e_chk_linearize(struct sk_buff *skb);
|
bool __i40e_chk_linearize(struct sk_buff *skb);
|
||||||
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
u32 flags);
|
u32 flags);
|
||||||
int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_get_head - Retrieve head from head writeback
|
* i40e_get_head - Retrieve head from head writeback
|
||||||
|
|||||||
@@ -10,14 +10,6 @@
|
|||||||
#include "i40e_txrx_common.h"
|
#include "i40e_txrx_common.h"
|
||||||
#include "i40e_xsk.h"
|
#include "i40e_xsk.h"
|
||||||
|
|
||||||
int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
|
|
||||||
{
|
|
||||||
unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
|
|
||||||
|
|
||||||
rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
|
|
||||||
return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
|
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
|
||||||
{
|
{
|
||||||
memset(rx_ring->rx_bi_zc, 0,
|
memset(rx_ring->rx_bi_zc, 0,
|
||||||
@@ -29,6 +21,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
|
|||||||
return &rx_ring->rx_bi_zc[idx];
|
return &rx_ring->rx_bi_zc[idx];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
|
||||||
|
* @rx_ring: Current rx ring
|
||||||
|
* @pool_present: is pool for XSK present
|
||||||
|
*
|
||||||
|
* Try allocating memory and return ENOMEM, if failed to allocate.
|
||||||
|
* If allocation was successful, substitute buffer with allocated one.
|
||||||
|
* Returns 0 on success, negative on failure
|
||||||
|
*/
|
||||||
|
static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
|
||||||
|
{
|
||||||
|
size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
|
||||||
|
sizeof(*rx_ring->rx_bi);
|
||||||
|
void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!sw_ring)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (pool_present) {
|
||||||
|
kfree(rx_ring->rx_bi);
|
||||||
|
rx_ring->rx_bi = NULL;
|
||||||
|
rx_ring->rx_bi_zc = sw_ring;
|
||||||
|
} else {
|
||||||
|
kfree(rx_ring->rx_bi_zc);
|
||||||
|
rx_ring->rx_bi_zc = NULL;
|
||||||
|
rx_ring->rx_bi = sw_ring;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i40e_realloc_rx_bi_zc - reallocate rx SW rings
|
||||||
|
* @vsi: Current VSI
|
||||||
|
* @zc: is zero copy set
|
||||||
|
*
|
||||||
|
* Reallocate buffer for rx_rings that might be used by XSK.
|
||||||
|
* XDP requires more memory, than rx_buf provides.
|
||||||
|
* Returns 0 on success, negative on failure
|
||||||
|
*/
|
||||||
|
int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
|
||||||
|
{
|
||||||
|
struct i40e_ring *rx_ring;
|
||||||
|
unsigned long q;
|
||||||
|
|
||||||
|
for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
|
||||||
|
rx_ring = vsi->rx_rings[q];
|
||||||
|
if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
|
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
|
||||||
* certain ring/qid
|
* certain ring/qid
|
||||||
@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
err = i40e_queue_pair_enable(vsi, qid);
|
err = i40e_queue_pair_enable(vsi, qid);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
|
|||||||
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
|
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
|
||||||
|
|
||||||
if (if_running) {
|
if (if_running) {
|
||||||
|
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
err = i40e_queue_pair_enable(vsi, qid);
|
err = i40e_queue_pair_enable(vsi, qid);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
|
|||||||
|
|
||||||
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
|
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
|
||||||
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
|
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
|
||||||
int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
|
int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
|
||||||
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
|
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
|
||||||
|
|
||||||
#endif /* _I40E_XSK_H_ */
|
#endif /* _I40E_XSK_H_ */
|
||||||
|
|||||||
@@ -2880,11 +2880,15 @@ err_out:
|
|||||||
* than the full array, but leave the qcq shells in place
|
* than the full array, but leave the qcq shells in place
|
||||||
*/
|
*/
|
||||||
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
|
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
|
||||||
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
if (lif->txqcqs && lif->txqcqs[i]) {
|
||||||
ionic_qcq_free(lif, lif->txqcqs[i]);
|
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
||||||
|
ionic_qcq_free(lif, lif->txqcqs[i]);
|
||||||
|
}
|
||||||
|
|
||||||
lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
if (lif->rxqcqs && lif->rxqcqs[i]) {
|
||||||
ionic_qcq_free(lif, lif->rxqcqs[i]);
|
lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
|
||||||
|
ionic_qcq_free(lif, lif->rxqcqs[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
|||||||
@@ -3271,6 +3271,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
|
|||||||
bool was_enabled = efx->port_enabled;
|
bool was_enabled = efx->port_enabled;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SFC_SRIOV
|
||||||
|
/* If this function is a VF and we have access to the parent PF,
|
||||||
|
* then use the PF control path to attempt to change the VF MAC address.
|
||||||
|
*/
|
||||||
|
if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
|
||||||
|
struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
|
||||||
|
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||||
|
u8 mac[ETH_ALEN];
|
||||||
|
|
||||||
|
/* net_dev->dev_addr can be zeroed by efx_net_stop in
|
||||||
|
* efx_ef10_sriov_set_vf_mac, so pass in a copy.
|
||||||
|
*/
|
||||||
|
ether_addr_copy(mac, efx->net_dev->dev_addr);
|
||||||
|
|
||||||
|
rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
|
||||||
|
if (!rc)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
netif_dbg(efx, drv, efx->net_dev,
|
||||||
|
"Updating VF mac via PF failed (%d), setting directly\n",
|
||||||
|
rc);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
efx_device_detach_sync(efx);
|
efx_device_detach_sync(efx);
|
||||||
efx_net_stop(efx->net_dev);
|
efx_net_stop(efx->net_dev);
|
||||||
|
|
||||||
@@ -3293,40 +3317,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
|
|||||||
efx_net_open(efx->net_dev);
|
efx_net_open(efx->net_dev);
|
||||||
efx_device_attach_if_not_resetting(efx);
|
efx_device_attach_if_not_resetting(efx);
|
||||||
|
|
||||||
#ifdef CONFIG_SFC_SRIOV
|
|
||||||
if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
|
|
||||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
|
||||||
struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
|
|
||||||
|
|
||||||
if (rc == -EPERM) {
|
|
||||||
struct efx_nic *efx_pf;
|
|
||||||
|
|
||||||
/* Switch to PF and change MAC address on vport */
|
|
||||||
efx_pf = pci_get_drvdata(pci_dev_pf);
|
|
||||||
|
|
||||||
rc = efx_ef10_sriov_set_vf_mac(efx_pf,
|
|
||||||
nic_data->vf_index,
|
|
||||||
efx->net_dev->dev_addr);
|
|
||||||
} else if (!rc) {
|
|
||||||
struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
|
|
||||||
struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
/* MAC address successfully changed by VF (with MAC
|
|
||||||
* spoofing) so update the parent PF if possible.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < efx_pf->vf_count; ++i) {
|
|
||||||
struct ef10_vf *vf = nic_data->vf + i;
|
|
||||||
|
|
||||||
if (vf->efx == efx) {
|
|
||||||
ether_addr_copy(vf->mac,
|
|
||||||
efx->net_dev->dev_addr);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
if (rc == -EPERM) {
|
if (rc == -EPERM) {
|
||||||
netif_err(efx, drv, efx->net_dev,
|
netif_err(efx, drv, efx->net_dev,
|
||||||
"Cannot change MAC address; use sfboot to enable"
|
"Cannot change MAC address; use sfboot to enable"
|
||||||
|
|||||||
@@ -157,7 +157,8 @@ struct efx_filter_spec {
|
|||||||
u32 flags:6;
|
u32 flags:6;
|
||||||
u32 dmaq_id:12;
|
u32 dmaq_id:12;
|
||||||
u32 rss_context;
|
u32 rss_context;
|
||||||
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
|
u32 vport_id;
|
||||||
|
__be16 outer_vid;
|
||||||
__be16 inner_vid;
|
__be16 inner_vid;
|
||||||
u8 loc_mac[ETH_ALEN];
|
u8 loc_mac[ETH_ALEN];
|
||||||
u8 rem_mac[ETH_ALEN];
|
u8 rem_mac[ETH_ALEN];
|
||||||
|
|||||||
@@ -676,17 +676,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
|||||||
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
|
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return memcmp(&left->outer_vid, &right->outer_vid,
|
return memcmp(&left->vport_id, &right->vport_id,
|
||||||
sizeof(struct efx_filter_spec) -
|
sizeof(struct efx_filter_spec) -
|
||||||
offsetof(struct efx_filter_spec, outer_vid)) == 0;
|
offsetof(struct efx_filter_spec, vport_id)) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
|
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
|
||||||
{
|
{
|
||||||
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
|
BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
|
||||||
return jhash2((const u32 *)&spec->outer_vid,
|
return jhash2((const u32 *)&spec->vport_id,
|
||||||
(sizeof(struct efx_filter_spec) -
|
(sizeof(struct efx_filter_spec) -
|
||||||
offsetof(struct efx_filter_spec, outer_vid)) / 4,
|
offsetof(struct efx_filter_spec, vport_id)) / 4,
|
||||||
0);
|
0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -253,8 +253,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
|
|||||||
DP83822_EEE_ERROR_CHANGE_INT_EN);
|
DP83822_EEE_ERROR_CHANGE_INT_EN);
|
||||||
|
|
||||||
if (!dp83822->fx_enabled)
|
if (!dp83822->fx_enabled)
|
||||||
misr_status |= DP83822_MDI_XOVER_INT_EN |
|
misr_status |= DP83822_ANEG_ERR_INT_EN |
|
||||||
DP83822_ANEG_ERR_INT_EN |
|
|
||||||
DP83822_WOL_PKT_INT_EN;
|
DP83822_WOL_PKT_INT_EN;
|
||||||
|
|
||||||
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
|
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
|
||||||
|
|||||||
@@ -791,6 +791,14 @@ static int dp83867_config_init(struct phy_device *phydev)
|
|||||||
else
|
else
|
||||||
val &= ~DP83867_SGMII_TYPE;
|
val &= ~DP83867_SGMII_TYPE;
|
||||||
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
|
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
|
||||||
|
|
||||||
|
/* This is a SW workaround for link instability if RX_CTRL is
|
||||||
|
* not strapped to mode 3 or 4 in HW. This is required for SGMII
|
||||||
|
* in addition to clearing bit 7, handled above.
|
||||||
|
*/
|
||||||
|
if (dp83867->rxctrl_strap_quirk)
|
||||||
|
phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
|
||||||
|
BIT(8));
|
||||||
}
|
}
|
||||||
|
|
||||||
val = phy_read(phydev, DP83867_CFG3);
|
val = phy_read(phydev, DP83867_CFG3);
|
||||||
|
|||||||
@@ -1050,6 +1050,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
|
|||||||
if (phy_interrupt_is_valid(phy))
|
if (phy_interrupt_is_valid(phy))
|
||||||
phy_request_interrupt(phy);
|
phy_request_interrupt(phy);
|
||||||
|
|
||||||
|
if (pl->config->mac_managed_pm)
|
||||||
|
phy->mac_managed_pm = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -776,6 +776,13 @@ static const struct usb_device_id products[] = {
|
|||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
|
||||||
|
{
|
||||||
|
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
|
||||||
|
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||||
|
.driver_info = 0,
|
||||||
|
},
|
||||||
|
|
||||||
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
|
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
|
||||||
{
|
{
|
||||||
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
|
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
|
||||||
|
|||||||
@@ -770,6 +770,7 @@ enum rtl8152_flags {
|
|||||||
RX_EPROTO,
|
RX_EPROTO,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054
|
||||||
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
|
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
|
||||||
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
|
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
|
||||||
|
|
||||||
@@ -9651,6 +9652,7 @@ static int rtl8152_probe(struct usb_interface *intf,
|
|||||||
|
|
||||||
if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
|
if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
|
||||||
switch (le16_to_cpu(udev->descriptor.idProduct)) {
|
switch (le16_to_cpu(udev->descriptor.idProduct)) {
|
||||||
|
case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
|
||||||
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
|
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
|
||||||
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
|
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
|
||||||
tp->lenovo_macpassthru = 1;
|
tp->lenovo_macpassthru = 1;
|
||||||
@@ -9809,6 +9811,7 @@ static const struct usb_device_id rtl8152_table[] = {
|
|||||||
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
|
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
|
||||||
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
|
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
|
||||||
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
|
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
|
||||||
|
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
|
||||||
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
|
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
|
||||||
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
|
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
|
||||||
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
|
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
|
||||||
|
|||||||
@@ -310,7 +310,7 @@ err_unreg_dev:
|
|||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
|
||||||
err_free_dev:
|
err_free_dev:
|
||||||
kfree(dev);
|
put_device(&dev->dev);
|
||||||
|
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3088,8 +3088,12 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
|
if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
|
||||||
|
/*
|
||||||
|
* Do not return errors unless we are in a controller reset,
|
||||||
|
* the controller works perfectly fine without hwmon.
|
||||||
|
*/
|
||||||
ret = nvme_hwmon_init(ctrl);
|
ret = nvme_hwmon_init(ctrl);
|
||||||
if (ret < 0)
|
if (ret == -EINTR)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
struct nvme_hwmon_data {
|
struct nvme_hwmon_data {
|
||||||
struct nvme_ctrl *ctrl;
|
struct nvme_ctrl *ctrl;
|
||||||
struct nvme_smart_log log;
|
struct nvme_smart_log *log;
|
||||||
struct mutex read_lock;
|
struct mutex read_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
|
|||||||
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
|
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
|
||||||
{
|
{
|
||||||
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
|
||||||
NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
|
NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
|
||||||
u32 attr, int channel, long *val)
|
u32 attr, int channel, long *val)
|
||||||
{
|
{
|
||||||
struct nvme_hwmon_data *data = dev_get_drvdata(dev);
|
struct nvme_hwmon_data *data = dev_get_drvdata(dev);
|
||||||
struct nvme_smart_log *log = &data->log;
|
struct nvme_smart_log *log = data->log;
|
||||||
int temp;
|
int temp;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
|
|||||||
case hwmon_temp_max:
|
case hwmon_temp_max:
|
||||||
case hwmon_temp_min:
|
case hwmon_temp_min:
|
||||||
if ((!channel && data->ctrl->wctemp) ||
|
if ((!channel && data->ctrl->wctemp) ||
|
||||||
(channel && data->log.temp_sensor[channel - 1])) {
|
(channel && data->log->temp_sensor[channel - 1])) {
|
||||||
if (data->ctrl->quirks &
|
if (data->ctrl->quirks &
|
||||||
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
|
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
|
||||||
return 0444;
|
return 0444;
|
||||||
@@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
|
|||||||
break;
|
break;
|
||||||
case hwmon_temp_input:
|
case hwmon_temp_input:
|
||||||
case hwmon_temp_label:
|
case hwmon_temp_label:
|
||||||
if (!channel || data->log.temp_sensor[channel - 1])
|
if (!channel || data->log->temp_sensor[channel - 1])
|
||||||
return 0444;
|
return 0444;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -230,7 +230,13 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
|||||||
|
|
||||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return 0;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
|
||||||
|
if (!data->log) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto err_free_data;
|
||||||
|
}
|
||||||
|
|
||||||
data->ctrl = ctrl;
|
data->ctrl = ctrl;
|
||||||
mutex_init(&data->read_lock);
|
mutex_init(&data->read_lock);
|
||||||
@@ -238,8 +244,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
|||||||
err = nvme_hwmon_get_smart_log(data);
|
err = nvme_hwmon_get_smart_log(data);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
|
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
|
||||||
kfree(data);
|
goto err_free_log;
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hwmon = hwmon_device_register_with_info(dev, "nvme",
|
hwmon = hwmon_device_register_with_info(dev, "nvme",
|
||||||
@@ -247,11 +252,17 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
|
|||||||
NULL);
|
NULL);
|
||||||
if (IS_ERR(hwmon)) {
|
if (IS_ERR(hwmon)) {
|
||||||
dev_warn(dev, "Failed to instantiate hwmon device\n");
|
dev_warn(dev, "Failed to instantiate hwmon device\n");
|
||||||
kfree(data);
|
err = PTR_ERR(hwmon);
|
||||||
return PTR_ERR(hwmon);
|
goto err_free_log;
|
||||||
}
|
}
|
||||||
ctrl->hwmon_device = hwmon;
|
ctrl->hwmon_device = hwmon;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_free_log:
|
||||||
|
kfree(data->log);
|
||||||
|
err_free_data:
|
||||||
|
kfree(data);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
||||||
@@ -262,6 +273,7 @@ void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
|
|||||||
|
|
||||||
hwmon_device_unregister(ctrl->hwmon_device);
|
hwmon_device_unregister(ctrl->hwmon_device);
|
||||||
ctrl->hwmon_device = NULL;
|
ctrl->hwmon_device = NULL;
|
||||||
|
kfree(data->log);
|
||||||
kfree(data);
|
kfree(data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1168,7 +1168,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
|
|||||||
* reset the keep alive timer when the controller is enabled.
|
* reset the keep alive timer when the controller is enabled.
|
||||||
*/
|
*/
|
||||||
if (ctrl->kato)
|
if (ctrl->kato)
|
||||||
mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
|
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
|
||||||
|
|||||||
@@ -4666,7 +4666,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
|||||||
rc = lpfc_vmid_res_alloc(phba, vport);
|
rc = lpfc_vmid_res_alloc(phba, vport);
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out_put_shost;
|
||||||
|
|
||||||
/* Initialize all internally managed lists. */
|
/* Initialize all internally managed lists. */
|
||||||
INIT_LIST_HEAD(&vport->fc_nodes);
|
INIT_LIST_HEAD(&vport->fc_nodes);
|
||||||
@@ -4684,16 +4684,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
|||||||
|
|
||||||
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
|
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_put_shost;
|
goto out_free_vmid;
|
||||||
|
|
||||||
spin_lock_irq(&phba->port_list_lock);
|
spin_lock_irq(&phba->port_list_lock);
|
||||||
list_add_tail(&vport->listentry, &phba->port_list);
|
list_add_tail(&vport->listentry, &phba->port_list);
|
||||||
spin_unlock_irq(&phba->port_list_lock);
|
spin_unlock_irq(&phba->port_list_lock);
|
||||||
return vport;
|
return vport;
|
||||||
|
|
||||||
out_put_shost:
|
out_free_vmid:
|
||||||
kfree(vport->vmid);
|
kfree(vport->vmid);
|
||||||
bitmap_free(vport->vmid_priority_range);
|
bitmap_free(vport->vmid_priority_range);
|
||||||
|
out_put_shost:
|
||||||
scsi_host_put(shost);
|
scsi_host_put(shost);
|
||||||
out:
|
out:
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|||||||
@@ -192,33 +192,30 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd,
|
|||||||
struct v4l2_subdev_state *sd_state,
|
struct v4l2_subdev_state *sd_state,
|
||||||
struct v4l2_subdev_selection *sel)
|
struct v4l2_subdev_selection *sel)
|
||||||
{
|
{
|
||||||
struct v4l2_rect *try_sel, *r;
|
struct imgu_v4l2_subdev *imgu_sd =
|
||||||
struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
|
container_of(sd, struct imgu_v4l2_subdev, subdev);
|
||||||
struct imgu_v4l2_subdev,
|
|
||||||
subdev);
|
|
||||||
|
|
||||||
if (sel->pad != IMGU_NODE_IN)
|
if (sel->pad != IMGU_NODE_IN)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
switch (sel->target) {
|
switch (sel->target) {
|
||||||
case V4L2_SEL_TGT_CROP:
|
case V4L2_SEL_TGT_CROP:
|
||||||
try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
|
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
|
||||||
r = &imgu_sd->rect.eff;
|
sel->r = *v4l2_subdev_get_try_crop(sd, sd_state,
|
||||||
break;
|
sel->pad);
|
||||||
|
else
|
||||||
|
sel->r = imgu_sd->rect.eff;
|
||||||
|
return 0;
|
||||||
case V4L2_SEL_TGT_COMPOSE:
|
case V4L2_SEL_TGT_COMPOSE:
|
||||||
try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
|
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
|
||||||
r = &imgu_sd->rect.bds;
|
sel->r = *v4l2_subdev_get_try_compose(sd, sd_state,
|
||||||
break;
|
sel->pad);
|
||||||
|
else
|
||||||
|
sel->r = imgu_sd->rect.bds;
|
||||||
|
return 0;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
|
|
||||||
sel->r = *try_sel;
|
|
||||||
else
|
|
||||||
sel->r = *r;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
|
static int imgu_subdev_set_selection(struct v4l2_subdev *sd,
|
||||||
|
|||||||
@@ -69,6 +69,8 @@ extern unsigned int uvc_gadget_trace_param;
|
|||||||
#define UVC_MAX_REQUEST_SIZE 64
|
#define UVC_MAX_REQUEST_SIZE 64
|
||||||
#define UVC_MAX_EVENTS 4
|
#define UVC_MAX_EVENTS 4
|
||||||
|
|
||||||
|
#define UVCG_REQUEST_HEADER_LEN 2
|
||||||
|
|
||||||
/* ------------------------------------------------------------------------
|
/* ------------------------------------------------------------------------
|
||||||
* Structures
|
* Structures
|
||||||
*/
|
*/
|
||||||
@@ -77,7 +79,8 @@ struct uvc_request {
|
|||||||
u8 *req_buffer;
|
u8 *req_buffer;
|
||||||
struct uvc_video *video;
|
struct uvc_video *video;
|
||||||
struct sg_table sgt;
|
struct sg_table sgt;
|
||||||
u8 header[2];
|
u8 header[UVCG_REQUEST_HEADER_LEN];
|
||||||
|
struct uvc_buffer *last_buf;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct uvc_video {
|
struct uvc_video {
|
||||||
|
|||||||
@@ -335,33 +335,22 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* called with &queue_irqlock held.. */
|
/* called with &queue_irqlock held.. */
|
||||||
struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
|
void uvcg_complete_buffer(struct uvc_video_queue *queue,
|
||||||
struct uvc_buffer *buf)
|
struct uvc_buffer *buf)
|
||||||
{
|
{
|
||||||
struct uvc_buffer *nextbuf;
|
|
||||||
|
|
||||||
if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
|
if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
|
||||||
buf->length != buf->bytesused) {
|
buf->length != buf->bytesused) {
|
||||||
buf->state = UVC_BUF_STATE_QUEUED;
|
buf->state = UVC_BUF_STATE_QUEUED;
|
||||||
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
|
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
|
||||||
return buf;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&buf->queue);
|
|
||||||
if (!list_empty(&queue->irqqueue))
|
|
||||||
nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
|
|
||||||
queue);
|
|
||||||
else
|
|
||||||
nextbuf = NULL;
|
|
||||||
|
|
||||||
buf->buf.field = V4L2_FIELD_NONE;
|
buf->buf.field = V4L2_FIELD_NONE;
|
||||||
buf->buf.sequence = queue->sequence++;
|
buf->buf.sequence = queue->sequence++;
|
||||||
buf->buf.vb2_buf.timestamp = ktime_get_ns();
|
buf->buf.vb2_buf.timestamp = ktime_get_ns();
|
||||||
|
|
||||||
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
|
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
|
||||||
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
|
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
|
||||||
|
|
||||||
return nextbuf;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
|
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect);
|
|||||||
|
|
||||||
int uvcg_queue_enable(struct uvc_video_queue *queue, int enable);
|
int uvcg_queue_enable(struct uvc_video_queue *queue, int enable);
|
||||||
|
|
||||||
struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
|
void uvcg_complete_buffer(struct uvc_video_queue *queue,
|
||||||
struct uvc_buffer *buf);
|
struct uvc_buffer *buf);
|
||||||
|
|
||||||
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
|
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
|
|||||||
if (buf->bytesused - video->queue.buf_used <= len - UVCG_REQUEST_HEADER_LEN)
|
if (buf->bytesused - video->queue.buf_used <= len - UVCG_REQUEST_HEADER_LEN)
|
||||||
data[1] |= UVC_STREAM_EOF;
|
data[1] |= UVC_STREAM_EOF;
|
||||||
|
|
||||||
return 2;
|
return UVCG_REQUEST_HEADER_LEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@@ -83,7 +83,8 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
|
|||||||
if (buf->bytesused == video->queue.buf_used) {
|
if (buf->bytesused == video->queue.buf_used) {
|
||||||
video->queue.buf_used = 0;
|
video->queue.buf_used = 0;
|
||||||
buf->state = UVC_BUF_STATE_DONE;
|
buf->state = UVC_BUF_STATE_DONE;
|
||||||
uvcg_queue_next_buffer(&video->queue, buf);
|
list_del(&buf->queue);
|
||||||
|
uvcg_complete_buffer(&video->queue, buf);
|
||||||
video->fid ^= UVC_STREAM_FID;
|
video->fid ^= UVC_STREAM_FID;
|
||||||
|
|
||||||
video->payload_size = 0;
|
video->payload_size = 0;
|
||||||
@@ -104,28 +105,28 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
|
|||||||
unsigned int len = video->req_size;
|
unsigned int len = video->req_size;
|
||||||
unsigned int sg_left, part = 0;
|
unsigned int sg_left, part = 0;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int ret;
|
int header_len;
|
||||||
|
|
||||||
sg = ureq->sgt.sgl;
|
sg = ureq->sgt.sgl;
|
||||||
sg_init_table(sg, ureq->sgt.nents);
|
sg_init_table(sg, ureq->sgt.nents);
|
||||||
|
|
||||||
/* Init the header. */
|
/* Init the header. */
|
||||||
ret = uvc_video_encode_header(video, buf, ureq->header,
|
header_len = uvc_video_encode_header(video, buf, ureq->header,
|
||||||
video->req_size);
|
video->req_size);
|
||||||
sg_set_buf(sg, ureq->header, UVCG_REQUEST_HEADER_LEN);
|
sg_set_buf(sg, ureq->header, header_len);
|
||||||
len -= ret;
|
len -= header_len;
|
||||||
|
|
||||||
if (pending <= len)
|
if (pending <= len)
|
||||||
len = pending;
|
len = pending;
|
||||||
|
|
||||||
req->length = (len == pending) ?
|
req->length = (len == pending) ?
|
||||||
len + UVCG_REQUEST_HEADER_LEN : video->req_size;
|
len + header_len : video->req_size;
|
||||||
|
|
||||||
/* Init the pending sgs with payload */
|
/* Init the pending sgs with payload */
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
|
|
||||||
for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
|
for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
|
||||||
if (!len || !buf->sg)
|
if (!len || !buf->sg || !sg_dma_len(buf->sg))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
sg_left = sg_dma_len(buf->sg) - buf->offset;
|
sg_left = sg_dma_len(buf->sg) - buf->offset;
|
||||||
@@ -148,14 +149,15 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
|
|||||||
req->num_sgs = i + 1;
|
req->num_sgs = i + 1;
|
||||||
|
|
||||||
req->length -= len;
|
req->length -= len;
|
||||||
video->queue.buf_used += req->length - UVCG_REQUEST_HEADER_LEN;
|
video->queue.buf_used += req->length - header_len;
|
||||||
|
|
||||||
if (buf->bytesused == video->queue.buf_used || !buf->sg) {
|
if (buf->bytesused == video->queue.buf_used || !buf->sg) {
|
||||||
video->queue.buf_used = 0;
|
video->queue.buf_used = 0;
|
||||||
buf->state = UVC_BUF_STATE_DONE;
|
buf->state = UVC_BUF_STATE_DONE;
|
||||||
buf->offset = 0;
|
buf->offset = 0;
|
||||||
uvcg_queue_next_buffer(&video->queue, buf);
|
list_del(&buf->queue);
|
||||||
video->fid ^= UVC_STREAM_FID;
|
video->fid ^= UVC_STREAM_FID;
|
||||||
|
ureq->last_buf = buf;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -181,7 +183,8 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
|
|||||||
if (buf->bytesused == video->queue.buf_used) {
|
if (buf->bytesused == video->queue.buf_used) {
|
||||||
video->queue.buf_used = 0;
|
video->queue.buf_used = 0;
|
||||||
buf->state = UVC_BUF_STATE_DONE;
|
buf->state = UVC_BUF_STATE_DONE;
|
||||||
uvcg_queue_next_buffer(&video->queue, buf);
|
list_del(&buf->queue);
|
||||||
|
uvcg_complete_buffer(&video->queue, buf);
|
||||||
video->fid ^= UVC_STREAM_FID;
|
video->fid ^= UVC_STREAM_FID;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -231,6 +234,11 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
|
|||||||
uvcg_queue_cancel(queue, 0);
|
uvcg_queue_cancel(queue, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ureq->last_buf) {
|
||||||
|
uvcg_complete_buffer(&video->queue, ureq->last_buf);
|
||||||
|
ureq->last_buf = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&video->req_lock, flags);
|
spin_lock_irqsave(&video->req_lock, flags);
|
||||||
list_add_tail(&req->list, &video->req_free);
|
list_add_tail(&req->list, &video->req_free);
|
||||||
spin_unlock_irqrestore(&video->req_lock, flags);
|
spin_unlock_irqrestore(&video->req_lock, flags);
|
||||||
@@ -298,12 +306,13 @@ uvc_video_alloc_requests(struct uvc_video *video)
|
|||||||
video->ureq[i].req->complete = uvc_video_complete;
|
video->ureq[i].req->complete = uvc_video_complete;
|
||||||
video->ureq[i].req->context = &video->ureq[i];
|
video->ureq[i].req->context = &video->ureq[i];
|
||||||
video->ureq[i].video = video;
|
video->ureq[i].video = video;
|
||||||
|
video->ureq[i].last_buf = NULL;
|
||||||
|
|
||||||
list_add_tail(&video->ureq[i].req->list, &video->req_free);
|
list_add_tail(&video->ureq[i].req->list, &video->req_free);
|
||||||
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
|
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
|
||||||
sg_alloc_table(&video->ureq[i].sgt,
|
sg_alloc_table(&video->ureq[i].sgt,
|
||||||
DIV_ROUND_UP(req_size - 2, PAGE_SIZE) + 2,
|
DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
|
||||||
GFP_KERNEL);
|
PAGE_SIZE) + 2, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
video->req_size = req_size;
|
video->req_size = req_size;
|
||||||
|
|||||||
@@ -12,8 +12,6 @@
|
|||||||
#ifndef __UVC_VIDEO_H__
|
#ifndef __UVC_VIDEO_H__
|
||||||
#define __UVC_VIDEO_H__
|
#define __UVC_VIDEO_H__
|
||||||
|
|
||||||
#define UVCG_REQUEST_HEADER_LEN 2
|
|
||||||
|
|
||||||
struct uvc_video;
|
struct uvc_video;
|
||||||
|
|
||||||
int uvcg_video_enable(struct uvc_video *video, int enable);
|
int uvcg_video_enable(struct uvc_video *video, int enable);
|
||||||
|
|||||||
@@ -138,6 +138,7 @@ struct share_check {
|
|||||||
u64 root_objectid;
|
u64 root_objectid;
|
||||||
u64 inum;
|
u64 inum;
|
||||||
int share_count;
|
int share_count;
|
||||||
|
bool have_delayed_delete_refs;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int extent_is_shared(struct share_check *sc)
|
static inline int extent_is_shared(struct share_check *sc)
|
||||||
@@ -818,16 +819,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
|||||||
struct preftrees *preftrees, struct share_check *sc)
|
struct preftrees *preftrees, struct share_check *sc)
|
||||||
{
|
{
|
||||||
struct btrfs_delayed_ref_node *node;
|
struct btrfs_delayed_ref_node *node;
|
||||||
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
|
|
||||||
struct btrfs_key key;
|
struct btrfs_key key;
|
||||||
struct btrfs_key tmp_op_key;
|
|
||||||
struct rb_node *n;
|
struct rb_node *n;
|
||||||
int count;
|
int count;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (extent_op && extent_op->update_key)
|
|
||||||
btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
|
|
||||||
|
|
||||||
spin_lock(&head->lock);
|
spin_lock(&head->lock);
|
||||||
for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
|
for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
|
||||||
node = rb_entry(n, struct btrfs_delayed_ref_node,
|
node = rb_entry(n, struct btrfs_delayed_ref_node,
|
||||||
@@ -853,10 +849,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
|||||||
case BTRFS_TREE_BLOCK_REF_KEY: {
|
case BTRFS_TREE_BLOCK_REF_KEY: {
|
||||||
/* NORMAL INDIRECT METADATA backref */
|
/* NORMAL INDIRECT METADATA backref */
|
||||||
struct btrfs_delayed_tree_ref *ref;
|
struct btrfs_delayed_tree_ref *ref;
|
||||||
|
struct btrfs_key *key_ptr = NULL;
|
||||||
|
|
||||||
|
if (head->extent_op && head->extent_op->update_key) {
|
||||||
|
btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
|
||||||
|
key_ptr = &key;
|
||||||
|
}
|
||||||
|
|
||||||
ref = btrfs_delayed_node_to_tree_ref(node);
|
ref = btrfs_delayed_node_to_tree_ref(node);
|
||||||
ret = add_indirect_ref(fs_info, preftrees, ref->root,
|
ret = add_indirect_ref(fs_info, preftrees, ref->root,
|
||||||
&tmp_op_key, ref->level + 1,
|
key_ptr, ref->level + 1,
|
||||||
node->bytenr, count, sc,
|
node->bytenr, count, sc,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
break;
|
break;
|
||||||
@@ -882,13 +884,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
|||||||
key.offset = ref->offset;
|
key.offset = ref->offset;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Found a inum that doesn't match our known inum, we
|
* If we have a share check context and a reference for
|
||||||
* know it's shared.
|
* another inode, we can't exit immediately. This is
|
||||||
|
* because even if this is a BTRFS_ADD_DELAYED_REF
|
||||||
|
* reference we may find next a BTRFS_DROP_DELAYED_REF
|
||||||
|
* which cancels out this ADD reference.
|
||||||
|
*
|
||||||
|
* If this is a DROP reference and there was no previous
|
||||||
|
* ADD reference, then we need to signal that when we
|
||||||
|
* process references from the extent tree (through
|
||||||
|
* add_inline_refs() and add_keyed_refs()), we should
|
||||||
|
* not exit early if we find a reference for another
|
||||||
|
* inode, because one of the delayed DROP references
|
||||||
|
* may cancel that reference in the extent tree.
|
||||||
*/
|
*/
|
||||||
if (sc && sc->inum && ref->objectid != sc->inum) {
|
if (sc && count < 0)
|
||||||
ret = BACKREF_FOUND_SHARED;
|
sc->have_delayed_delete_refs = true;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = add_indirect_ref(fs_info, preftrees, ref->root,
|
ret = add_indirect_ref(fs_info, preftrees, ref->root,
|
||||||
&key, 0, node->bytenr, count, sc,
|
&key, 0, node->bytenr, count, sc,
|
||||||
@@ -918,7 +929,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
|
|||||||
}
|
}
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = extent_is_shared(sc);
|
ret = extent_is_shared(sc);
|
||||||
out:
|
|
||||||
spin_unlock(&head->lock);
|
spin_unlock(&head->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -1021,7 +1032,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
|
|||||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||||
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
|
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
|
||||||
|
|
||||||
if (sc && sc->inum && key.objectid != sc->inum) {
|
if (sc && sc->inum && key.objectid != sc->inum &&
|
||||||
|
!sc->have_delayed_delete_refs) {
|
||||||
ret = BACKREF_FOUND_SHARED;
|
ret = BACKREF_FOUND_SHARED;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -1031,6 +1043,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
|
|||||||
ret = add_indirect_ref(fs_info, preftrees, root,
|
ret = add_indirect_ref(fs_info, preftrees, root,
|
||||||
&key, 0, bytenr, count,
|
&key, 0, bytenr, count,
|
||||||
sc, GFP_NOFS);
|
sc, GFP_NOFS);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@@ -1120,7 +1133,8 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
|
|||||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||||
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
|
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
|
||||||
|
|
||||||
if (sc && sc->inum && key.objectid != sc->inum) {
|
if (sc && sc->inum && key.objectid != sc->inum &&
|
||||||
|
!sc->have_delayed_delete_refs) {
|
||||||
ret = BACKREF_FOUND_SHARED;
|
ret = BACKREF_FOUND_SHARED;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -1547,6 +1561,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
|
|||||||
.root_objectid = root->root_key.objectid,
|
.root_objectid = root->root_key.objectid,
|
||||||
.inum = inum,
|
.inum = inum,
|
||||||
.share_count = 0,
|
.share_count = 0,
|
||||||
|
.have_delayed_delete_refs = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
ulist_init(roots);
|
ulist_init(roots);
|
||||||
@@ -1581,6 +1596,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
|
|||||||
break;
|
break;
|
||||||
bytenr = node->val;
|
bytenr = node->val;
|
||||||
shared.share_count = 0;
|
shared.share_count = 0;
|
||||||
|
shared.have_delayed_delete_refs = false;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2139,7 +2139,16 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
|
|||||||
int need_clear = 0;
|
int need_clear = 0;
|
||||||
u64 cache_gen;
|
u64 cache_gen;
|
||||||
|
|
||||||
if (!info->extent_root)
|
/*
|
||||||
|
* Either no extent root (with ibadroots rescue option) or we have
|
||||||
|
* unsupported RO options. The fs can never be mounted read-write, so no
|
||||||
|
* need to waste time searching block group items.
|
||||||
|
*
|
||||||
|
* This also allows new extent tree related changes to be RO compat,
|
||||||
|
* no need for a full incompat flag.
|
||||||
|
*/
|
||||||
|
if (!info->extent_root || (btrfs_super_compat_ro_flags(info->super_copy) &
|
||||||
|
~BTRFS_FEATURE_COMPAT_RO_SUPP))
|
||||||
return fill_dummy_bgs(info);
|
return fill_dummy_bgs(info);
|
||||||
|
|
||||||
key.objectid = 0;
|
key.objectid = 0;
|
||||||
|
|||||||
@@ -2048,6 +2048,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto restore;
|
goto restore;
|
||||||
}
|
}
|
||||||
|
if (btrfs_super_compat_ro_flags(fs_info->super_copy) &
|
||||||
|
~BTRFS_FEATURE_COMPAT_RO_SUPP) {
|
||||||
|
btrfs_err(fs_info,
|
||||||
|
"can not remount read-write due to unsupported optional flags 0x%llx",
|
||||||
|
btrfs_super_compat_ro_flags(fs_info->super_copy) &
|
||||||
|
~BTRFS_FEATURE_COMPAT_RO_SUPP);
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto restore;
|
||||||
|
}
|
||||||
if (fs_info->fs_devices->rw_devices == 0) {
|
if (fs_info->fs_devices->rw_devices == 0) {
|
||||||
ret = -EACCES;
|
ret = -EACCES;
|
||||||
goto restore;
|
goto restore;
|
||||||
|
|||||||
@@ -1263,8 +1263,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
|
|||||||
ssize_t rc;
|
ssize_t rc;
|
||||||
struct cifsFileInfo *cfile = dst_file->private_data;
|
struct cifsFileInfo *cfile = dst_file->private_data;
|
||||||
|
|
||||||
if (cfile->swapfile)
|
if (cfile->swapfile) {
|
||||||
return -EOPNOTSUPP;
|
rc = -EOPNOTSUPP;
|
||||||
|
free_xid(xid);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
|
rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
|
||||||
len, flags);
|
len, flags);
|
||||||
|
|||||||
@@ -538,8 +538,10 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
|
|||||||
cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
|
cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
|
||||||
inode, direntry, direntry);
|
inode, direntry, direntry);
|
||||||
|
|
||||||
if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
|
if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) {
|
||||||
return -EIO;
|
rc = -EIO;
|
||||||
|
goto out_free_xid;
|
||||||
|
}
|
||||||
|
|
||||||
tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
|
tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
|
||||||
rc = PTR_ERR(tlink);
|
rc = PTR_ERR(tlink);
|
||||||
|
|||||||
@@ -1806,11 +1806,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||||||
struct cifsFileInfo *cfile;
|
struct cifsFileInfo *cfile;
|
||||||
__u32 type;
|
__u32 type;
|
||||||
|
|
||||||
rc = -EACCES;
|
|
||||||
xid = get_xid();
|
xid = get_xid();
|
||||||
|
|
||||||
if (!(fl->fl_flags & FL_FLOCK))
|
if (!(fl->fl_flags & FL_FLOCK)) {
|
||||||
return -ENOLCK;
|
rc = -ENOLCK;
|
||||||
|
free_xid(xid);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
cfile = (struct cifsFileInfo *)file->private_data;
|
cfile = (struct cifsFileInfo *)file->private_data;
|
||||||
tcon = tlink_tcon(cfile->tlink);
|
tcon = tlink_tcon(cfile->tlink);
|
||||||
@@ -1829,8 +1831,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||||||
* if no lock or unlock then nothing to do since we do not
|
* if no lock or unlock then nothing to do since we do not
|
||||||
* know what it is
|
* know what it is
|
||||||
*/
|
*/
|
||||||
|
rc = -EOPNOTSUPP;
|
||||||
free_xid(xid);
|
free_xid(xid);
|
||||||
return -EOPNOTSUPP;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
|
rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
|
||||||
|
|||||||
@@ -320,6 +320,7 @@ out:
|
|||||||
if (rc && chan->server)
|
if (rc && chan->server)
|
||||||
cifs_put_tcp_session(chan->server, 0);
|
cifs_put_tcp_session(chan->server, 0);
|
||||||
|
|
||||||
|
free_xid(xid);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3632,7 +3632,7 @@ static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
|
|||||||
case DLM_MSG_REQUEST_REPLY:
|
case DLM_MSG_REQUEST_REPLY:
|
||||||
case DLM_MSG_CONVERT_REPLY:
|
case DLM_MSG_CONVERT_REPLY:
|
||||||
case DLM_MSG_GRANT:
|
case DLM_MSG_GRANT:
|
||||||
if (!lkb->lkb_lvbptr)
|
if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK))
|
||||||
break;
|
break;
|
||||||
memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
|
memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -3803,11 +3803,6 @@ static int __query_dir(struct dir_context *ctx, const char *name, int namlen,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void restart_ctx(struct dir_context *ctx)
|
|
||||||
{
|
|
||||||
ctx->pos = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int verify_info_level(int info_level)
|
static int verify_info_level(int info_level)
|
||||||
{
|
{
|
||||||
switch (info_level) {
|
switch (info_level) {
|
||||||
@@ -3921,7 +3916,6 @@ int smb2_query_dir(struct ksmbd_work *work)
|
|||||||
if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
|
if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
|
||||||
ksmbd_debug(SMB, "Restart directory scan\n");
|
ksmbd_debug(SMB, "Restart directory scan\n");
|
||||||
generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
|
generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
|
||||||
restart_ctx(&dir_fp->readdir_data.ctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
|
memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
|
||||||
@@ -3962,11 +3956,15 @@ int smb2_query_dir(struct ksmbd_work *work)
|
|||||||
set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
|
set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
|
||||||
|
|
||||||
rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
|
rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
|
||||||
if (rc == 0)
|
/*
|
||||||
restart_ctx(&dir_fp->readdir_data.ctx);
|
* req->OutputBufferLength is too small to contain even one entry.
|
||||||
if (rc == -ENOSPC)
|
* In this case, it immediately returns OutputBufferLength 0 to client.
|
||||||
|
*/
|
||||||
|
if (!d_info.out_buf_len && !d_info.num_entry)
|
||||||
|
goto no_buf_len;
|
||||||
|
if (rc > 0 || rc == -ENOSPC)
|
||||||
rc = 0;
|
rc = 0;
|
||||||
if (rc)
|
else if (rc)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
d_info.wptr = d_info.rptr;
|
d_info.wptr = d_info.rptr;
|
||||||
@@ -3988,10 +3986,12 @@ int smb2_query_dir(struct ksmbd_work *work)
|
|||||||
rsp->Buffer[0] = 0;
|
rsp->Buffer[0] = 0;
|
||||||
inc_rfc1001_len(rsp_org, 9);
|
inc_rfc1001_len(rsp_org, 9);
|
||||||
} else {
|
} else {
|
||||||
|
no_buf_len:
|
||||||
((struct file_directory_info *)
|
((struct file_directory_info *)
|
||||||
((char *)rsp->Buffer + d_info.last_entry_offset))
|
((char *)rsp->Buffer + d_info.last_entry_offset))
|
||||||
->NextEntryOffset = 0;
|
->NextEntryOffset = 0;
|
||||||
d_info.data_count -= d_info.last_entry_off_align;
|
if (d_info.data_count >= d_info.last_entry_off_align)
|
||||||
|
d_info.data_count -= d_info.last_entry_off_align;
|
||||||
|
|
||||||
rsp->StructureSize = cpu_to_le16(9);
|
rsp->StructureSize = cpu_to_le16(9);
|
||||||
rsp->OutputBufferOffset = cpu_to_le16(72);
|
rsp->OutputBufferOffset = cpu_to_le16(72);
|
||||||
@@ -4021,6 +4021,8 @@ err_out2:
|
|||||||
rsp->hdr.Status = STATUS_NO_MEMORY;
|
rsp->hdr.Status = STATUS_NO_MEMORY;
|
||||||
else if (rc == -EFAULT)
|
else if (rc == -EFAULT)
|
||||||
rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
|
rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
|
||||||
|
else if (rc == -EIO)
|
||||||
|
rsp->hdr.Status = STATUS_FILE_CORRUPT_ERROR;
|
||||||
if (!rsp->hdr.Status)
|
if (!rsp->hdr.Status)
|
||||||
rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
|
rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
|
||||||
|
|
||||||
|
|||||||
@@ -231,6 +231,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
|
|||||||
handle_t *handle = NULL;
|
handle_t *handle = NULL;
|
||||||
struct ocfs2_super *osb;
|
struct ocfs2_super *osb;
|
||||||
struct ocfs2_dinode *dirfe;
|
struct ocfs2_dinode *dirfe;
|
||||||
|
struct ocfs2_dinode *fe = NULL;
|
||||||
struct buffer_head *new_fe_bh = NULL;
|
struct buffer_head *new_fe_bh = NULL;
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
struct ocfs2_alloc_context *inode_ac = NULL;
|
struct ocfs2_alloc_context *inode_ac = NULL;
|
||||||
@@ -381,6 +382,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns,
|
|||||||
goto leave;
|
goto leave;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
|
||||||
if (S_ISDIR(mode)) {
|
if (S_ISDIR(mode)) {
|
||||||
status = ocfs2_fill_new_dir(osb, handle, dir, inode,
|
status = ocfs2_fill_new_dir(osb, handle, dir, inode,
|
||||||
new_fe_bh, data_ac, meta_ac);
|
new_fe_bh, data_ac, meta_ac);
|
||||||
@@ -453,8 +455,11 @@ roll_back:
|
|||||||
leave:
|
leave:
|
||||||
if (status < 0 && did_quota_inode)
|
if (status < 0 && did_quota_inode)
|
||||||
dquot_free_inode(inode);
|
dquot_free_inode(inode);
|
||||||
if (handle)
|
if (handle) {
|
||||||
|
if (status < 0 && fe)
|
||||||
|
ocfs2_set_links_count(fe, 0);
|
||||||
ocfs2_commit_trans(osb, handle);
|
ocfs2_commit_trans(osb, handle);
|
||||||
|
}
|
||||||
|
|
||||||
ocfs2_inode_unlock(dir, 1);
|
ocfs2_inode_unlock(dir, 1);
|
||||||
if (did_block_signals)
|
if (did_block_signals)
|
||||||
@@ -631,18 +636,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
||||||
parent_fe_bh, handle, inode_ac,
|
parent_fe_bh, handle, inode_ac,
|
||||||
fe_blkno, suballoc_loc, suballoc_bit);
|
fe_blkno, suballoc_loc, suballoc_bit);
|
||||||
if (status < 0) {
|
|
||||||
u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit);
|
|
||||||
int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode,
|
|
||||||
inode_ac->ac_bh, suballoc_bit, bg_blkno, 1);
|
|
||||||
if (tmp)
|
|
||||||
mlog_errno(tmp);
|
|
||||||
}
|
|
||||||
|
|
||||||
return status;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_mkdir(struct user_namespace *mnt_userns,
|
static int ocfs2_mkdir(struct user_namespace *mnt_userns,
|
||||||
@@ -2027,8 +2023,11 @@ bail:
|
|||||||
ocfs2_clusters_to_bytes(osb->sb, 1));
|
ocfs2_clusters_to_bytes(osb->sb, 1));
|
||||||
if (status < 0 && did_quota_inode)
|
if (status < 0 && did_quota_inode)
|
||||||
dquot_free_inode(inode);
|
dquot_free_inode(inode);
|
||||||
if (handle)
|
if (handle) {
|
||||||
|
if (status < 0 && fe)
|
||||||
|
ocfs2_set_links_count(fe, 0);
|
||||||
ocfs2_commit_trans(osb, handle);
|
ocfs2_commit_trans(osb, handle);
|
||||||
|
}
|
||||||
|
|
||||||
ocfs2_inode_unlock(dir, 1);
|
ocfs2_inode_unlock(dir, 1);
|
||||||
if (did_block_signals)
|
if (did_block_signals)
|
||||||
|
|||||||
@@ -962,7 +962,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
|
|||||||
vma = vma->vm_next;
|
vma = vma->vm_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
show_vma_header_prefix(m, priv->mm->mmap->vm_start,
|
show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0,
|
||||||
last_vma_end, 0, 0, 0, 0);
|
last_vma_end, 0, 0, 0, 0);
|
||||||
seq_pad(m, ' ');
|
seq_pad(m, ' ');
|
||||||
seq_puts(m, "[rollup]\n");
|
seq_puts(m, "[rollup]\n");
|
||||||
|
|||||||
@@ -1123,6 +1123,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
|||||||
struct kvm_enable_cap *cap);
|
struct kvm_enable_cap *cap);
|
||||||
long kvm_arch_vm_ioctl(struct file *filp,
|
long kvm_arch_vm_ioctl(struct file *filp,
|
||||||
unsigned int ioctl, unsigned long arg);
|
unsigned int ioctl, unsigned long arg);
|
||||||
|
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
|
||||||
|
unsigned long arg);
|
||||||
|
|
||||||
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
|
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
|
||||||
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
|
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ enum phylink_op_type {
|
|||||||
* @pcs_poll: MAC PCS cannot provide link change interrupt
|
* @pcs_poll: MAC PCS cannot provide link change interrupt
|
||||||
* @poll_fixed_state: if true, starts link_poll,
|
* @poll_fixed_state: if true, starts link_poll,
|
||||||
* if MAC link is at %MLO_AN_FIXED mode.
|
* if MAC link is at %MLO_AN_FIXED mode.
|
||||||
|
* @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
|
||||||
* @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
|
* @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
|
||||||
* @get_fixed_state: callback to execute to determine the fixed link state,
|
* @get_fixed_state: callback to execute to determine the fixed link state,
|
||||||
* if MAC link is at %MLO_AN_FIXED mode.
|
* if MAC link is at %MLO_AN_FIXED mode.
|
||||||
@@ -73,6 +74,7 @@ struct phylink_config {
|
|||||||
enum phylink_op_type type;
|
enum phylink_op_type type;
|
||||||
bool pcs_poll;
|
bool pcs_poll;
|
||||||
bool poll_fixed_state;
|
bool poll_fixed_state;
|
||||||
|
bool mac_managed_pm;
|
||||||
bool ovr_an_inband;
|
bool ovr_an_inband;
|
||||||
void (*get_fixed_state)(struct phylink_config *config,
|
void (*get_fixed_state)(struct phylink_config *config,
|
||||||
struct phylink_link_state *state);
|
struct phylink_link_state *state);
|
||||||
|
|||||||
@@ -1184,7 +1184,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
|
|||||||
static inline void qdisc_reset_queue(struct Qdisc *sch)
|
static inline void qdisc_reset_queue(struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
__qdisc_reset_queue(&sch->q);
|
__qdisc_reset_queue(&sch->q);
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
|
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
|
||||||
|
|||||||
@@ -43,21 +43,20 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
|
|||||||
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
|
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
|
||||||
extern int reuseport_detach_prog(struct sock *sk);
|
extern int reuseport_detach_prog(struct sock *sk);
|
||||||
|
|
||||||
static inline bool reuseport_has_conns(struct sock *sk, bool set)
|
static inline bool reuseport_has_conns(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct sock_reuseport *reuse;
|
struct sock_reuseport *reuse;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||||
if (reuse) {
|
if (reuse && reuse->has_conns)
|
||||||
if (set)
|
ret = true;
|
||||||
reuse->has_conns = 1;
|
|
||||||
ret = reuse->has_conns;
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void reuseport_has_conns_set(struct sock *sk);
|
||||||
|
|
||||||
#endif /* _SOCK_REUSEPORT_H */
|
#endif /* _SOCK_REUSEPORT_H */
|
||||||
|
|||||||
@@ -6411,12 +6411,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
|||||||
if (tr->current_trace->reset)
|
if (tr->current_trace->reset)
|
||||||
tr->current_trace->reset(tr);
|
tr->current_trace->reset(tr);
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
|
had_max_tr = tr->current_trace->use_max_tr;
|
||||||
|
|
||||||
/* Current trace needs to be nop_trace before synchronize_rcu */
|
/* Current trace needs to be nop_trace before synchronize_rcu */
|
||||||
tr->current_trace = &nop_trace;
|
tr->current_trace = &nop_trace;
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
||||||
had_max_tr = tr->allocated_snapshot;
|
|
||||||
|
|
||||||
if (had_max_tr && !t->use_max_tr) {
|
if (had_max_tr && !t->use_max_tr) {
|
||||||
/*
|
/*
|
||||||
* We need to make sure that the update_max_tr sees that
|
* We need to make sure that the update_max_tr sees that
|
||||||
@@ -6428,14 +6428,14 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
|||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
free_snapshot(tr);
|
free_snapshot(tr);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
if (t->use_max_tr && !tr->allocated_snapshot) {
|
||||||
if (t->use_max_tr && !had_max_tr) {
|
|
||||||
ret = tracing_alloc_snapshot_instance(tr);
|
ret = tracing_alloc_snapshot_instance(tr);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
tr->current_trace = &nop_trace;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (t->init) {
|
if (t->init) {
|
||||||
|
|||||||
@@ -2813,11 +2813,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|||||||
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
|
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
|
||||||
if (!page)
|
if (!page)
|
||||||
goto out_uncharge_cgroup;
|
goto out_uncharge_cgroup;
|
||||||
|
spin_lock_irq(&hugetlb_lock);
|
||||||
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
|
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
|
||||||
SetHPageRestoreReserve(page);
|
SetHPageRestoreReserve(page);
|
||||||
h->resv_huge_pages--;
|
h->resv_huge_pages--;
|
||||||
}
|
}
|
||||||
spin_lock_irq(&hugetlb_lock);
|
|
||||||
list_add(&page->lru, &h->hugepage_activelist);
|
list_add(&page->lru, &h->hugepage_activelist);
|
||||||
/* Fall through */
|
/* Fall through */
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
|
|||||||
if (!page)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (p = page, len = 0; len < nbytes; p++, len++) {
|
for (p = page, len = 0; len < nbytes; p++) {
|
||||||
if (get_user(*p, buff++)) {
|
if (get_user(*p, buff++)) {
|
||||||
free_page((unsigned long)page);
|
free_page((unsigned long)page);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
len += 1;
|
||||||
if (*p == '\0' || *p == '\n')
|
if (*p == '\0' || *p == '\n')
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,6 +21,22 @@ static DEFINE_IDA(reuseport_ida);
|
|||||||
static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
|
static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
|
||||||
struct sock_reuseport *reuse, bool bind_inany);
|
struct sock_reuseport *reuse, bool bind_inany);
|
||||||
|
|
||||||
|
void reuseport_has_conns_set(struct sock *sk)
|
||||||
|
{
|
||||||
|
struct sock_reuseport *reuse;
|
||||||
|
|
||||||
|
if (!rcu_access_pointer(sk->sk_reuseport_cb))
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_bh(&reuseport_lock);
|
||||||
|
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
|
||||||
|
lockdep_is_held(&reuseport_lock));
|
||||||
|
if (likely(reuse))
|
||||||
|
reuse->has_conns = 1;
|
||||||
|
spin_unlock_bh(&reuseport_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(reuseport_has_conns_set);
|
||||||
|
|
||||||
static int reuseport_sock_index(struct sock *sk,
|
static int reuseport_sock_index(struct sock *sk,
|
||||||
const struct sock_reuseport *reuse,
|
const struct sock_reuseport *reuse,
|
||||||
bool closed)
|
bool closed)
|
||||||
|
|||||||
@@ -108,15 +108,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
|
|||||||
struct hsr_port *port)
|
struct hsr_port *port)
|
||||||
{
|
{
|
||||||
if (!frame->skb_std) {
|
if (!frame->skb_std) {
|
||||||
if (frame->skb_hsr) {
|
if (frame->skb_hsr)
|
||||||
frame->skb_std =
|
frame->skb_std =
|
||||||
create_stripped_skb_hsr(frame->skb_hsr, frame);
|
create_stripped_skb_hsr(frame->skb_hsr, frame);
|
||||||
} else {
|
else
|
||||||
/* Unexpected */
|
netdev_warn_once(port->dev,
|
||||||
WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
|
"Unexpected frame received in hsr_get_untagged_frame()\n");
|
||||||
__FILE__, __LINE__, port->dev->name);
|
|
||||||
|
if (!frame->skb_std)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return skb_clone(frame->skb_std, GFP_ATOMIC);
|
return skb_clone(frame->skb_std, GFP_ATOMIC);
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
|
|||||||
}
|
}
|
||||||
inet->inet_daddr = fl4->daddr;
|
inet->inet_daddr = fl4->daddr;
|
||||||
inet->inet_dport = usin->sin_port;
|
inet->inet_dport = usin->sin_port;
|
||||||
reuseport_has_conns(sk, true);
|
reuseport_has_conns_set(sk);
|
||||||
sk->sk_state = TCP_ESTABLISHED;
|
sk->sk_state = TCP_ESTABLISHED;
|
||||||
sk_set_txhash(sk);
|
sk_set_txhash(sk);
|
||||||
inet->inet_id = prandom_u32();
|
inet->inet_id = prandom_u32();
|
||||||
|
|||||||
@@ -447,7 +447,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
|
|||||||
result = lookup_reuseport(net, sk, skb,
|
result = lookup_reuseport(net, sk, skb,
|
||||||
saddr, sport, daddr, hnum);
|
saddr, sport, daddr, hnum);
|
||||||
/* Fall back to scoring if group has connections */
|
/* Fall back to scoring if group has connections */
|
||||||
if (result && !reuseport_has_conns(sk, false))
|
if (result && !reuseport_has_conns(sk))
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
result = result ? : sk;
|
result = result ? : sk;
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ ipv4_connected:
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
reuseport_has_conns(sk, true);
|
reuseport_has_conns_set(sk);
|
||||||
sk->sk_state = TCP_ESTABLISHED;
|
sk->sk_state = TCP_ESTABLISHED;
|
||||||
sk_set_txhash(sk);
|
sk_set_txhash(sk);
|
||||||
out:
|
out:
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
|
|||||||
result = lookup_reuseport(net, sk, skb,
|
result = lookup_reuseport(net, sk, skb,
|
||||||
saddr, sport, daddr, hnum);
|
saddr, sport, daddr, hnum);
|
||||||
/* Fall back to scoring if group has connections */
|
/* Fall back to scoring if group has connections */
|
||||||
if (result && !reuseport_has_conns(sk, false))
|
if (result && !reuseport_has_conns(sk))
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
result = result ? : sk;
|
result = result ? : sk;
|
||||||
|
|||||||
@@ -5720,8 +5720,9 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set,
|
|||||||
(NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
|
(NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
|
||||||
if (flags & NFT_SET_ELEM_INTERVAL_END)
|
if (flags & NFT_SET_ELEM_INTERVAL_END)
|
||||||
return false;
|
return false;
|
||||||
if (!nla[NFTA_SET_ELEM_KEY_END] &&
|
|
||||||
!(flags & NFT_SET_ELEM_CATCHALL))
|
if (nla[NFTA_SET_ELEM_KEY_END] &&
|
||||||
|
flags & NFT_SET_ELEM_CATCHALL)
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
if (nla[NFTA_SET_ELEM_KEY_END])
|
if (nla[NFTA_SET_ELEM_KEY_END])
|
||||||
|
|||||||
@@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
|
|||||||
|
|
||||||
skip:
|
skip:
|
||||||
if (!ingress) {
|
if (!ingress) {
|
||||||
notify_and_destroy(net, skb, n, classid,
|
old = rtnl_dereference(dev->qdisc);
|
||||||
rtnl_dereference(dev->qdisc), new);
|
|
||||||
if (new && !new->ops->attach)
|
if (new && !new->ops->attach)
|
||||||
qdisc_refcount_inc(new);
|
qdisc_refcount_inc(new);
|
||||||
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
|
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
|
||||||
|
|
||||||
|
notify_and_destroy(net, skb, n, classid, old, new);
|
||||||
|
|
||||||
if (new && new->ops->attach)
|
if (new && new->ops->attach)
|
||||||
new->ops->attach(new);
|
new->ops->attach(new);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -576,7 +576,6 @@ static void atm_tc_reset(struct Qdisc *sch)
|
|||||||
pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
|
pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
|
||||||
list_for_each_entry(flow, &p->flows, list)
|
list_for_each_entry(flow, &p->flows, list)
|
||||||
qdisc_reset(flow->q);
|
qdisc_reset(flow->q);
|
||||||
sch->q.qlen = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atm_tc_destroy(struct Qdisc *sch)
|
static void atm_tc_destroy(struct Qdisc *sch)
|
||||||
|
|||||||
@@ -2224,8 +2224,12 @@ retry:
|
|||||||
|
|
||||||
static void cake_reset(struct Qdisc *sch)
|
static void cake_reset(struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
|
struct cake_sched_data *q = qdisc_priv(sch);
|
||||||
u32 c;
|
u32 c;
|
||||||
|
|
||||||
|
if (!q->tins)
|
||||||
|
return;
|
||||||
|
|
||||||
for (c = 0; c < CAKE_MAX_TINS; c++)
|
for (c = 0; c < CAKE_MAX_TINS; c++)
|
||||||
cake_clear_tin(sch, c);
|
cake_clear_tin(sch, c);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1053,7 +1053,6 @@ cbq_reset(struct Qdisc *sch)
|
|||||||
cl->cpriority = cl->priority;
|
cl->cpriority = cl->priority;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sch->q.qlen = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -315,8 +315,6 @@ static void choke_reset(struct Qdisc *sch)
|
|||||||
rtnl_qdisc_drop(skb, sch);
|
rtnl_qdisc_drop(skb, sch);
|
||||||
}
|
}
|
||||||
|
|
||||||
sch->q.qlen = 0;
|
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
if (q->tab)
|
if (q->tab)
|
||||||
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
|
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
|
||||||
q->head = q->tail = 0;
|
q->head = q->tail = 0;
|
||||||
|
|||||||
@@ -444,8 +444,6 @@ static void drr_reset_qdisc(struct Qdisc *sch)
|
|||||||
qdisc_reset(cl->qdisc);
|
qdisc_reset(cl->qdisc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
sch->q.qlen = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drr_destroy_qdisc(struct Qdisc *sch)
|
static void drr_destroy_qdisc(struct Qdisc *sch)
|
||||||
|
|||||||
@@ -409,8 +409,6 @@ static void dsmark_reset(struct Qdisc *sch)
|
|||||||
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
|
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
|
||||||
if (p->q)
|
if (p->q)
|
||||||
qdisc_reset(p->q);
|
qdisc_reset(p->q);
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
sch->q.qlen = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dsmark_destroy(struct Qdisc *sch)
|
static void dsmark_destroy(struct Qdisc *sch)
|
||||||
|
|||||||
@@ -445,9 +445,6 @@ static void etf_reset(struct Qdisc *sch)
|
|||||||
timesortedlist_clear(sch);
|
timesortedlist_clear(sch);
|
||||||
__qdisc_reset_queue(&sch->q);
|
__qdisc_reset_queue(&sch->q);
|
||||||
|
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
sch->q.qlen = 0;
|
|
||||||
|
|
||||||
q->last = 0;
|
q->last = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -722,8 +722,6 @@ static void ets_qdisc_reset(struct Qdisc *sch)
|
|||||||
}
|
}
|
||||||
for (band = 0; band < q->nbands; band++)
|
for (band = 0; band < q->nbands; band++)
|
||||||
qdisc_reset(q->classes[band].qdisc);
|
qdisc_reset(q->classes[band].qdisc);
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
sch->q.qlen = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ets_qdisc_destroy(struct Qdisc *sch)
|
static void ets_qdisc_destroy(struct Qdisc *sch)
|
||||||
|
|||||||
@@ -347,8 +347,6 @@ static void fq_codel_reset(struct Qdisc *sch)
|
|||||||
codel_vars_init(&flow->cvars);
|
codel_vars_init(&flow->cvars);
|
||||||
}
|
}
|
||||||
memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
|
memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
|
||||||
sch->q.qlen = 0;
|
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
q->memory_usage = 0;
|
q->memory_usage = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -521,9 +521,6 @@ static void fq_pie_reset(struct Qdisc *sch)
|
|||||||
INIT_LIST_HEAD(&flow->flowchain);
|
INIT_LIST_HEAD(&flow->flowchain);
|
||||||
pie_vars_init(&flow->vars);
|
pie_vars_init(&flow->vars);
|
||||||
}
|
}
|
||||||
|
|
||||||
sch->q.qlen = 0;
|
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fq_pie_destroy(struct Qdisc *sch)
|
static void fq_pie_destroy(struct Qdisc *sch)
|
||||||
|
|||||||
@@ -1485,8 +1485,6 @@ hfsc_reset_qdisc(struct Qdisc *sch)
|
|||||||
}
|
}
|
||||||
q->eligible = RB_ROOT;
|
q->eligible = RB_ROOT;
|
||||||
qdisc_watchdog_cancel(&q->watchdog);
|
qdisc_watchdog_cancel(&q->watchdog);
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
sch->q.qlen = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|||||||
@@ -1008,8 +1008,6 @@ static void htb_reset(struct Qdisc *sch)
|
|||||||
}
|
}
|
||||||
qdisc_watchdog_cancel(&q->watchdog);
|
qdisc_watchdog_cancel(&q->watchdog);
|
||||||
__qdisc_reset_queue(&q->direct_queue);
|
__qdisc_reset_queue(&q->direct_queue);
|
||||||
sch->q.qlen = 0;
|
|
||||||
sch->qstats.backlog = 0;
|
|
||||||
memset(q->hlevel, 0, sizeof(q->hlevel));
|
memset(q->hlevel, 0, sizeof(q->hlevel));
|
||||||
memset(q->row_mask, 0, sizeof(q->row_mask));
|
memset(q->row_mask, 0, sizeof(q->row_mask));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -152,7 +152,6 @@ multiq_reset(struct Qdisc *sch)
|
|||||||
|
|
||||||
for (band = 0; band < q->bands; band++)
|
for (band = 0; band < q->bands; band++)
|
||||||
qdisc_reset(q->queues[band]);
|
qdisc_reset(q->queues[band]);
|
||||||
sch->q.qlen = 0;
|
|
||||||
q->curband = 0;
|
q->curband = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user