Merge 5.15.79 into android14-5.15

Changes in 5.15.79
	thunderbolt: Tear down existing tunnels when resuming from hibernate
	thunderbolt: Add DP OUT resource when DP tunnel is discovered
	fuse: fix readdir cache race
	drm/amdkfd: avoid recursive lock in migrations back to RAM
	drm/amdkfd: handle CPU fault on COW mapping
	drm/amdkfd: Fix NULL pointer dereference in svm_migrate_to_ram()
	hwspinlock: qcom: correct MMIO max register for newer SoCs
	phy: stm32: fix an error code in probe
	wifi: cfg80211: silence a sparse RCU warning
	wifi: cfg80211: fix memory leak in query_regdb_file()
	soundwire: qcom: reinit broadcast completion
	soundwire: qcom: check for outanding writes before doing a read
	bpf, verifier: Fix memory leak in array reallocation for stack state
	bpf, sockmap: Fix the sk->sk_forward_alloc warning of sk_stream_kill_queues
	wifi: mac80211: Set TWT Information Frame Disabled bit as 1
	bpftool: Fix NULL pointer dereference when pin {PROG, MAP, LINK} without FILE
	HID: hyperv: fix possible memory leak in mousevsc_probe()
	bpf, sockmap: Fix sk->sk_forward_alloc warn_on in sk_stream_kill_queues
	bpf: Fix sockmap calling sleepable function in teardown path
	bpf, sock_map: Move cancel_work_sync() out of sock lock
	bpf: Add helper macro bpf_for_each_reg_in_vstate
	bpf: Fix wrong reg type conversion in release_reference()
	net: gso: fix panic on frag_list with mixed head alloc types
	macsec: delete new rxsc when offload fails
	macsec: fix secy->n_rx_sc accounting
	macsec: fix detection of RXSCs when toggling offloading
	macsec: clear encryption keys from the stack after setting up offload
	octeontx2-pf: Use hardware register for CQE count
	octeontx2-pf: NIX TX overwrites SQ_CTX_HW_S[SQ_INT]
	net: tun: Fix memory leaks of napi_get_frags
	bnxt_en: Fix possible crash in bnxt_hwrm_set_coal()
	bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer
	net: fman: Unregister ethernet device on removal
	capabilities: fix undefined behavior in bit shift for CAP_TO_MASK
	phy: ralink: mt7621-pci: add sentinel to quirks table
	KVM: s390: pv: don't allow userspace to set the clock under PV
	net: lapbether: fix issue of dev reference count leakage in lapbeth_device_event()
	hamradio: fix issue of dev reference count leakage in bpq_device_event()
	net: wwan: iosm: fix memory leak in ipc_wwan_dellink
	net: wwan: mhi: fix memory leak in mhi_mbim_dellink
	drm/vc4: Fix missing platform_unregister_drivers() call in vc4_drm_register()
	tcp: prohibit TCP_REPAIR_OPTIONS if data was already sent
	ipv6: addrlabel: fix infoleak when sending struct ifaddrlblmsg to network
	can: af_can: fix NULL pointer dereference in can_rx_register()
	net: stmmac: dwmac-meson8b: fix meson8b_devm_clk_prepare_enable()
	net: broadcom: Fix BCMGENET Kconfig
	tipc: fix the msg->req tlv len check in tipc_nl_compat_name_table_dump_header
	dmaengine: pxa_dma: use platform_get_irq_optional
	dmaengine: mv_xor_v2: Fix a resource leak in mv_xor_v2_remove()
	dmaengine: ti: k3-udma-glue: fix memory leak when register device fail
	net: lapbether: fix issue of invalid opcode in lapbeth_open()
	drivers: net: xgene: disable napi when register irq failed in xgene_enet_open()
	perf stat: Fix printing os->prefix in CSV metrics output
	perf tools: Add the include/perf/ directory to .gitignore
	netfilter: nfnetlink: fix potential dead lock in nfnetlink_rcv_msg()
	netfilter: Cleanup nft_net->module_list from nf_tables_exit_net()
	net: marvell: prestera: fix memory leak in prestera_rxtx_switch_init()
	net: nixge: disable napi when enable interrupts failed in nixge_open()
	net: wwan: iosm: fix memory leak in ipc_pcie_read_bios_cfg
	net/mlx5: Bridge, verify LAG state when adding bond to bridge
	net/mlx5: Allow async trigger completion execution on single CPU systems
	net/mlx5e: E-Switch, Fix comparing termination table instance
	net: cpsw: disable napi in cpsw_ndo_open()
	net: cxgb3_main: disable napi when bind qsets failed in cxgb_up()
	stmmac: intel: Enable 2.5Gbps for Intel AlderLake-S
	stmmac: intel: Update PCH PTP clock rate from 200MHz to 204.8MHz
	mctp: Fix an error handling path in mctp_init()
	cxgb4vf: shut down the adapter when t4vf_update_port_info() failed in cxgb4vf_open()
	stmmac: dwmac-loongson: fix missing pci_disable_msi() while module exiting
	stmmac: dwmac-loongson: fix missing pci_disable_device() in loongson_dwmac_probe()
	stmmac: dwmac-loongson: fix missing of_node_put() while module exiting
	net: phy: mscc: macsec: clear encryption keys when freeing a flow
	net: atlantic: macsec: clear encryption keys from the stack
	ethernet: s2io: disable napi when start nic failed in s2io_card_up()
	net: mv643xx_eth: disable napi when init rxq or txq failed in mv643xx_eth_open()
	ethernet: tundra: free irq when alloc ring failed in tsi108_open()
	net: macvlan: fix memory leaks of macvlan_common_newlink
	riscv: process: fix kernel info leakage
	riscv: vdso: fix build with llvm
	riscv: fix reserved memory setup
	arm64: efi: Fix handling of misaligned runtime regions and drop warning
	MIPS: jump_label: Fix compat branch range check
	mmc: cqhci: Provide helper for resetting both SDHCI and CQHCI
	mmc: sdhci-of-arasan: Fix SDHCI_RESET_ALL for CQHCI
	mmc: sdhci_am654: Fix SDHCI_RESET_ALL for CQHCI
	mmc: sdhci-tegra: Fix SDHCI_RESET_ALL for CQHCI
	mmc: sdhci-esdhc-imx: use the correct host caps for MMC_CAP_8_BIT_DATA
	ALSA: hda/hdmi - enable runtime pm for more AMD display audio
	ALSA: hda/ca0132: add quirk for EVGA Z390 DARK
	ALSA: hda: fix potential memleak in 'add_widget_node'
	ALSA: hda/realtek: Add Positivo C6300 model quirk
	ALSA: usb-audio: Yet more regression for for the delayed card registration
	ALSA: usb-audio: Add quirk entry for M-Audio Micro
	ALSA: usb-audio: Add DSD support for Accuphase DAC-60
	vmlinux.lds.h: Fix placement of '.data..decrypted' section
	ata: libata-scsi: fix SYNCHRONIZE CACHE (16) command failure
	nilfs2: fix deadlock in nilfs_count_free_blocks()
	nilfs2: fix use-after-free bug of ns_writer on remount
	drm/i915/dmabuf: fix sg_table handling in map_dma_buf
	drm/amdgpu: disable BACO on special BEIGE_GOBY card
	platform/x86: hp_wmi: Fix rfkill causing soft blocked wifi
	wifi: ath11k: avoid deadlock during regulatory update in ath11k_regd_update()
	btrfs: fix match incorrectly in dev_args_match_device
	btrfs: selftests: fix wrong error check in btrfs_free_dummy_root()
	btrfs: zoned: initialize device's zone info for seeding
	mms: sdhci-esdhc-imx: Fix SDHCI_RESET_ALL for CQHCI
	udf: Fix a slab-out-of-bounds write bug in udf_find_entry()
	mm/damon/dbgfs: check if rm_contexts input is for a real context
	mm/memremap.c: map FS_DAX device memory as decrypted
	mm/shmem: use page_mapping() to detect page cache for uffd continue
	can: j1939: j1939_send_one(): fix missing CAN header initialization
	cert host tools: Stop complaining about deprecated OpenSSL functions
	dmaengine: at_hdmac: Fix at_lli struct definition
	dmaengine: at_hdmac: Don't start transactions at tx_submit level
	dmaengine: at_hdmac: Start transfer for cyclic channels in issue_pending
	dmaengine: at_hdmac: Fix premature completion of desc in issue_pending
	dmaengine: at_hdmac: Do not call the complete callback on device_terminate_all
	dmaengine: at_hdmac: Protect atchan->status with the channel lock
	dmaengine: at_hdmac: Fix concurrency problems by removing atc_complete_all()
	dmaengine: at_hdmac: Fix concurrency over descriptor
	dmaengine: at_hdmac: Free the memset buf without holding the chan lock
	dmaengine: at_hdmac: Fix concurrency over the active list
	dmaengine: at_hdmac: Fix descriptor handling when issuing it to hardware
	dmaengine: at_hdmac: Fix completion of unissued descriptor in case of errors
	dmaengine: at_hdmac: Don't allow CPU to reorder channel enable
	dmaengine: at_hdmac: Fix impossible condition
	dmaengine: at_hdmac: Check return code of dma_async_device_register
	marvell: octeontx2: build error: unknown type name 'u64'
	drm/amdkfd: Migrate in CPU page fault use current mm
	net: tun: call napi_schedule_prep() to ensure we own a napi
	x86/cpu: Restore AMD's DE_CFG MSR after resume
	Linux 5.15.79

Change-Id: I395d5b480d2abd70e94c3505a4bd2ad728424fb3
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2022-11-16 10:04:35 +00:00
129 changed files with 1093 additions and 529 deletions

View File

@@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT).
:Parameters: address of a buffer in user space to store the data (u8) to :Parameters: address of a buffer in user space to store the data (u8) to
:Returns: -EFAULT if the given address is not accessible from kernel space; :Returns: -EFAULT if the given address is not accessible from kernel space;
-EINVAL if setting the TOD clock extension to != 0 is not supported -EINVAL if setting the TOD clock extension to != 0 is not supported
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW 3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
----------------------------------- -----------------------------------
@@ -224,6 +225,7 @@ the POP (u64).
:Parameters: address of a buffer in user space to store the data (u64) to :Parameters: address of a buffer in user space to store the data (u64) to
:Returns: -EFAULT if the given address is not accessible from kernel space :Returns: -EFAULT if the given address is not accessible from kernel space
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT 3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT
----------------------------------- -----------------------------------
@@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0.
(kvm_s390_vm_tod_clock) to (kvm_s390_vm_tod_clock) to
:Returns: -EFAULT if the given address is not accessible from kernel space; :Returns: -EFAULT if the given address is not accessible from kernel space;
-EINVAL if setting the TOD clock extension to != 0 is not supported -EINVAL if setting the TOD clock extension to != 0 is not supported
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
4. GROUP: KVM_S390_VM_CRYPTO 4. GROUP: KVM_S390_VM_CRYPTO
============================ ============================

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 78 SUBLEVEL = 79
EXTRAVERSION = EXTRAVERSION =
NAME = Trick or Treat NAME = Trick or Treat

View File

@@ -12,6 +12,14 @@
#include <asm/efi.h> #include <asm/efi.h>
static bool region_is_misaligned(const efi_memory_desc_t *md)
{
if (PAGE_SIZE == EFI_PAGE_SIZE)
return false;
return !PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
}
/* /*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits * executable, everything else can be mapped with the XN bits
@@ -25,14 +33,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
if (type == EFI_MEMORY_MAPPED_IO) if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE; return PROT_DEVICE_nGnRE;
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr), if (region_is_misaligned(md)) {
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?")) static bool __initdata code_is_misaligned;
/* /*
* If the region is not aligned to the page size of the OS, we * Regions that are not aligned to the OS page size cannot be
* can not use strict permissions, since that would also affect * mapped with strict permissions, as those might interfere
* the mapping attributes of the adjacent regions. * with the permissions that are needed by the adjacent
* region's mapping. However, if we haven't encountered any
* misaligned runtime code regions so far, we can safely use
* non-executable permissions for non-code regions.
*/ */
return pgprot_val(PAGE_KERNEL_EXEC); code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
: pgprot_val(PAGE_KERNEL);
}
/* R-- */ /* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
@@ -63,19 +79,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA); md->type == EFI_RUNTIME_SERVICES_DATA);
if (!PAGE_ALIGNED(md->phys_addr) || /*
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) { * If this region is not aligned to the page size used by the OS, the
/* * mapping will be rounded outwards, and may end up sharing a page
* If the end address of this region is not aligned to page * frame with an adjacent runtime memory region. Given that the page
* size, the mapping is rounded up, and may end up sharing a * table descriptor covering the shared page will be rewritten when the
* page frame with the next UEFI memory region. If we create * adjacent region gets mapped, we must avoid block mappings here so we
* a block entry now, we may need to split it again when mapping * don't have to worry about splitting them when that happens.
* the next region, and support for that is going to be removed */
* from the MMU routines. So avoid block mappings altogether in if (region_is_misaligned(md))
* that case.
*/
page_mappings_only = true; page_mappings_only = true;
}
create_pgd_mapping(mm, md->phys_addr, md->virt_addr, create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT, md->num_pages << EFI_PAGE_SHIFT,
@@ -102,6 +115,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_DATA); md->type != EFI_RUNTIME_SERVICES_DATA);
if (region_is_misaligned(md))
return 0;
/* /*
* Calling apply_to_page_range() is only safe on regions that are * Calling apply_to_page_range() is only safe on regions that are
* guaranteed to be mapped down to pages. Since we are only called * guaranteed to be mapped down to pages. Since we are only called

View File

@@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e,
* The branch offset must fit in the instruction's 26 * The branch offset must fit in the instruction's 26
* bit field. * bit field.
*/ */
WARN_ON((offset >= BIT(25)) || WARN_ON((offset >= (long)BIT(25)) ||
(offset < -(long)BIT(25))); (offset < -(long)BIT(25)));
insn.j_format.opcode = bc6_op; insn.j_format.opcode = bc6_op;

View File

@@ -124,6 +124,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
{ {
struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *childregs = task_pt_regs(p);
memset(&p->thread.s, 0, sizeof(p->thread.s));
/* p->thread holds context to be restored by __switch_to() */ /* p->thread holds context to be restored by __switch_to() */
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* Kernel thread */ /* Kernel thread */

View File

@@ -291,6 +291,7 @@ void __init setup_arch(char **cmdline_p)
else else
pr_err("No DTB found in kernel mappings\n"); pr_err("No DTB found in kernel mappings\n");
#endif #endif
early_init_fdt_scan_reserved_mem();
misc_mem_init(); misc_mem_init();
init_resources(); init_resources();

View File

@@ -30,7 +30,7 @@ obj-y += vdso.o
CPPFLAGS_vdso.lds += -P -C -U$(ARCH) CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
# Disable -pg to prevent insert call site # Disable -pg to prevent insert call site
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
# Disable profiling and instrumentation for VDSO code # Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n GCOV_PROFILE := n

View File

@@ -242,7 +242,6 @@ static void __init setup_bootmem(void)
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
} }
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit); dma_contiguous_reserve(dma32_phys_limit);
if (IS_ENABLED(CONFIG_64BIT)) if (IS_ENABLED(CONFIG_64BIT))
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);

View File

@@ -1117,6 +1117,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm,
return 0; return 0;
} }
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
struct kvm_s390_vm_tod_clock gtod; struct kvm_s390_vm_tod_clock gtod;
@@ -1126,7 +1128,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
return -EINVAL; return -EINVAL;
kvm_s390_set_tod_clock(kvm, &gtod); __kvm_s390_set_tod_clock(kvm, &gtod);
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
gtod.epoch_idx, gtod.tod); gtod.epoch_idx, gtod.tod);
@@ -1157,7 +1159,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
sizeof(gtod.tod))) sizeof(gtod.tod)))
return -EFAULT; return -EFAULT;
kvm_s390_set_tod_clock(kvm, &gtod); __kvm_s390_set_tod_clock(kvm, &gtod);
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
return 0; return 0;
} }
@@ -1169,6 +1171,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
if (attr->flags) if (attr->flags)
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock);
/*
* For protected guests, the TOD is managed by the ultravisor, so trying
* to change it will never bring the expected results.
*/
if (kvm_s390_pv_is_protected(kvm)) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
switch (attr->attr) { switch (attr->attr) {
case KVM_S390_VM_TOD_EXT: case KVM_S390_VM_TOD_EXT:
ret = kvm_s390_set_tod_ext(kvm, attr); ret = kvm_s390_set_tod_ext(kvm, attr);
@@ -1183,6 +1195,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
ret = -ENXIO; ret = -ENXIO;
break; break;
} }
out_unlock:
mutex_unlock(&kvm->lock);
return ret; return ret;
} }
@@ -3926,13 +3941,6 @@ static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_t
preempt_enable(); preempt_enable();
} }
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
{
mutex_lock(&kvm->lock);
__kvm_s390_set_tod_clock(kvm, gtod);
mutex_unlock(&kvm->lock);
}
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
{ {
if (!mutex_trylock(&kvm->lock)) if (!mutex_trylock(&kvm->lock))

View File

@@ -326,7 +326,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);

View File

@@ -495,6 +495,11 @@
#define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
#define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@@ -572,9 +577,6 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c #define MSR_FAM10H_NODE_ID 0xc001100c
#define MSR_F10H_DECFG 0xc0011029
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */ /* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a #define MSR_K8_TOP_MEM1 0xc001001a

View File

@@ -794,8 +794,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
} }
#define MSR_AMD64_DE_CFG 0xC0011029
static void init_amd_ln(struct cpuinfo_x86 *c) static void init_amd_ln(struct cpuinfo_x86 *c)
{ {
/* /*
@@ -990,8 +988,8 @@ static void init_amd(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR * msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present. * is not present.
*/ */
msr_set_bit(MSR_F10H_DECFG, msr_set_bit(MSR_AMD64_DE_CFG,
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/* A serializing LFENCE stops RDTSC speculation */ /* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);

View File

@@ -326,8 +326,8 @@ static void init_hygon(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR * msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present. * is not present.
*/ */
msr_set_bit(MSR_F10H_DECFG, msr_set_bit(MSR_AMD64_DE_CFG,
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/* A serializing LFENCE stops RDTSC speculation */ /* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);

View File

@@ -2666,9 +2666,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
msr->data = 0; msr->data = 0;
switch (msr->index) { switch (msr->index) {
case MSR_F10H_DECFG: case MSR_AMD64_DE_CFG:
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break; break;
case MSR_IA32_PERF_CAPABILITIES: case MSR_IA32_PERF_CAPABILITIES:
return 0; return 0;
@@ -2777,7 +2777,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0x1E; msr_info->data = 0x1E;
} }
break; break;
case MSR_F10H_DECFG: case MSR_AMD64_DE_CFG:
msr_info->data = svm->msr_decfg; msr_info->data = svm->msr_decfg;
break; break;
default: default:
@@ -2977,7 +2977,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE: case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break; break;
case MSR_F10H_DECFG: { case MSR_AMD64_DE_CFG: {
struct kvm_msr_entry msr_entry; struct kvm_msr_entry msr_entry;
msr_entry.index = msr->index; msr_entry.index = msr->index;

View File

@@ -1464,7 +1464,7 @@ static const u32 msr_based_features_all[] = {
MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP,
MSR_IA32_VMX_VMFUNC, MSR_IA32_VMX_VMFUNC,
MSR_F10H_DECFG, MSR_AMD64_DE_CFG,
MSR_IA32_UCODE_REV, MSR_IA32_UCODE_REV,
MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_ARCH_CAPABILITIES,
MSR_IA32_PERF_CAPABILITIES, MSR_IA32_PERF_CAPABILITIES,

View File

@@ -519,6 +519,7 @@ static void pm_save_spec_msr(void)
MSR_TSX_FORCE_ABORT, MSR_TSX_FORCE_ABORT,
MSR_IA32_MCU_OPT_CTRL, MSR_IA32_MCU_OPT_CTRL,
MSR_AMD64_LS_CFG, MSR_AMD64_LS_CFG,
MSR_AMD64_DE_CFG,
}; };
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id)); msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));

View File

@@ -3259,6 +3259,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
case REPORT_LUNS: case REPORT_LUNS:
case REQUEST_SENSE: case REQUEST_SENSE:
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
case REZERO_UNIT: case REZERO_UNIT:
case SEEK_6: case SEEK_6:
case SEEK_10: case SEEK_10:
@@ -3925,6 +3926,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
return ata_scsi_write_same_xlat; return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
if (ata_try_flush_cache(dev)) if (ata_try_flush_cache(dev))
return ata_scsi_flush_xlat; return ata_scsi_flush_xlat;
break; break;
@@ -4170,6 +4172,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
* turning this into a no-op. * turning this into a no-op.
*/ */
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
fallthrough; fallthrough;
/* no-op's, complete with success */ /* no-op's, complete with success */

View File

@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
ATC_SPIP_BOUNDARY(first->boundary)); ATC_SPIP_BOUNDARY(first->boundary));
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
ATC_DPIP_BOUNDARY(first->boundary)); ATC_DPIP_BOUNDARY(first->boundary));
/* Don't allow CPU to reorder channel enable. */
wmb();
dma_writel(atdma, CHER, atchan->mask); dma_writel(atdma, CHER, atchan->mask);
vdbg_dump_regs(atchan); vdbg_dump_regs(atchan);
@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc; struct at_desc *desc;
int ret; int ret;
u32 ctrla, dscr, trials; u32 ctrla, dscr;
unsigned int i;
/* /*
* If the cookie doesn't match to the currently running transfer then * If the cookie doesn't match to the currently running transfer then
@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
dscr = channel_readl(atchan, DSCR); dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */ rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA); ctrla = channel_readl(atchan, CTRLA);
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
u32 new_dscr; u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */ rmb(); /* ensure DSCR is read after CTRLA */
@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
rmb(); /* ensure DSCR is read before CTRLA */ rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA); ctrla = channel_readl(atchan, CTRLA);
} }
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) if (unlikely(i == ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT; return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */ /* for the first descriptor we can be more accurate */
@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
if (!atc_chan_is_cyclic(atchan)) if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd); dma_cookie_complete(txd);
/* If the transfer was a memset, free our temporary buffer */
if (desc->memset_buffer) {
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
desc->memset_paddr);
desc->memset_buffer = false;
}
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dmaengine_desc_get_callback_invoke(txd, NULL); dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd); dma_run_dependencies(txd);
}
/**
* atc_complete_all - finish work for all transactions
* @atchan: channel to complete transactions for
*
* Eventually submit queued descriptors if any
*
* Assume channel is idle while calling this function
* Called with atchan->lock held and bh disabled
*/
static void atc_complete_all(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
LIST_HEAD(list);
unsigned long flags;
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
/* move children to free_list */
/* list_splice_init(&desc->tx_list, &atchan->free_list);
* Submit queued descriptors ASAP, i.e. before we go through /* add myself to free_list */
* the completed ones. list_add(&desc->desc_node, &atchan->free_list);
*/
if (!list_empty(&atchan->queue))
atc_dostart(atchan, atc_first_queued(atchan));
/* empty active_list now it is completed */
list_splice_init(&atchan->active_list, &list);
/* empty queue list by moving descriptors (if any) to active_list */
list_splice_init(&atchan->queue, &atchan->active_list);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &list, desc_node) /* If the transfer was a memset, free our temporary buffer */
atc_chain_complete(atchan, desc); if (desc->memset_buffer) {
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
desc->memset_paddr);
desc->memset_buffer = false;
}
} }
/** /**
@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
*/ */
static void atc_advance_work(struct at_dma_chan *atchan) static void atc_advance_work(struct at_dma_chan *atchan)
{ {
struct at_desc *desc;
unsigned long flags; unsigned long flags;
int ret;
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
ret = atc_chan_is_enabled(atchan); if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
return spin_unlock_irqrestore(&atchan->lock, flags);
desc = atc_first_active(atchan);
/* Remove the transfer node from the active list. */
list_del_init(&desc->desc_node);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
if (ret) atc_chain_complete(atchan, desc);
return;
if (list_empty(&atchan->active_list) ||
list_is_singular(&atchan->active_list))
return atc_complete_all(atchan);
atc_chain_complete(atchan, atc_first_active(atchan));
/* advance work */ /* advance work */
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
atc_dostart(atchan, atc_first_active(atchan)); if (!list_empty(&atchan->active_list)) {
desc = atc_first_queued(atchan);
list_move_tail(&desc->desc_node, &atchan->active_list);
atc_dostart(atchan, desc);
}
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
} }
@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
static void atc_handle_error(struct at_dma_chan *atchan) static void atc_handle_error(struct at_dma_chan *atchan)
{ {
struct at_desc *bad_desc; struct at_desc *bad_desc;
struct at_desc *desc;
struct at_desc *child; struct at_desc *child;
unsigned long flags; unsigned long flags;
@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
bad_desc = atc_first_active(atchan); bad_desc = atc_first_active(atchan);
list_del_init(&bad_desc->desc_node); list_del_init(&bad_desc->desc_node);
/* As we are stopped, take advantage to push queued descriptors
* in active_list */
list_splice_init(&atchan->queue, atchan->active_list.prev);
/* Try to restart the controller */ /* Try to restart the controller */
if (!list_empty(&atchan->active_list)) if (!list_empty(&atchan->active_list)) {
atc_dostart(atchan, atc_first_active(atchan)); desc = atc_first_queued(atchan);
list_move_tail(&desc->desc_node, &atchan->active_list);
atc_dostart(atchan, desc);
}
/* /*
* KERN_CRITICAL may seem harsh, but since this only happens * KERN_CRITICAL may seem harsh, but since this only happens
@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
if (list_empty(&atchan->active_list)) { list_add_tail(&desc->desc_node, &atchan->queue);
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie);
atc_dostart(atchan, desc);
list_add_tail(&desc->desc_node, &atchan->active_list);
} else {
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
list_add_tail(&desc->desc_node, &atchan->queue);
}
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
return cookie; return cookie;
} }
@@ -1437,11 +1400,8 @@ static int atc_terminate_all(struct dma_chan *chan)
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id; int chan_id = atchan->chan_common.chan_id;
struct at_desc *desc, *_desc;
unsigned long flags; unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "%s\n", __func__); dev_vdbg(chan2dev(chan), "%s\n", __func__);
/* /*
@@ -1460,19 +1420,15 @@ static int atc_terminate_all(struct dma_chan *chan)
cpu_relax(); cpu_relax();
/* active_list entries will end up before queued entries */ /* active_list entries will end up before queued entries */
list_splice_init(&atchan->queue, &list); list_splice_tail_init(&atchan->queue, &atchan->free_list);
list_splice_init(&atchan->active_list, &list); list_splice_tail_init(&atchan->active_list, &atchan->free_list);
spin_unlock_irqrestore(&atchan->lock, flags);
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
clear_bit(ATC_IS_PAUSED, &atchan->status); clear_bit(ATC_IS_PAUSED, &atchan->status);
/* if channel dedicated to cyclic operations, free it */ /* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status); clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
return 0; return 0;
} }
@@ -1527,20 +1483,26 @@ atc_tx_status(struct dma_chan *chan,
} }
/** /**
* atc_issue_pending - try to finish work * atc_issue_pending - takes the first transaction descriptor in the pending
* queue and starts the transfer.
* @chan: target DMA channel * @chan: target DMA channel
*/ */
static void atc_issue_pending(struct dma_chan *chan) static void atc_issue_pending(struct dma_chan *chan)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_desc *desc;
unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n"); dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */ spin_lock_irqsave(&atchan->lock, flags);
if (atc_chan_is_cyclic(atchan)) if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
return; return spin_unlock_irqrestore(&atchan->lock, flags);
atc_advance_work(atchan); desc = atc_first_queued(atchan);
list_move_tail(&desc->desc_node, &atchan->active_list);
atc_dostart(atchan, desc);
spin_unlock_irqrestore(&atchan->lock, flags);
} }
/** /**
@@ -1958,7 +1920,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
plat_dat->nr_channels); plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common); err = dma_async_device_register(&atdma->dma_common);
if (err) {
dev_err(&pdev->dev, "Unable to register: %d.\n", err);
goto err_dma_async_device_register;
}
/* /*
* Do not return an error if the dmac node is not present in order to * Do not return an error if the dmac node is not present in order to
@@ -1978,6 +1944,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register: err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common); dma_async_device_unregister(&atdma->dma_common);
err_dma_async_device_register:
dma_pool_destroy(atdma->memset_pool); dma_pool_destroy(atdma->memset_pool);
err_memset_pool_create: err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool); dma_pool_destroy(atdma->dma_desc_pool);

View File

@@ -186,13 +186,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */ /* LLI == Linked List Item; aka DMA buffer descriptor */
struct at_lli { struct at_lli {
/* values that are not changed by hardware */ /* values that are not changed by hardware */
dma_addr_t saddr; u32 saddr;
dma_addr_t daddr; u32 daddr;
/* value that may get written back: */ /* value that may get written back: */
u32 ctrla; u32 ctrla;
/* more values that are not changed by hardware */ /* more values that are not changed by hardware */
u32 ctrlb; u32 ctrlb;
dma_addr_t dscr; /* chain to next lli */ u32 dscr; /* chain to next lli */
}; };
/** /**

View File

@@ -899,6 +899,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill(&xor_dev->irq_tasklet); tasklet_kill(&xor_dev->irq_tasklet);
clk_disable_unprepare(xor_dev->clk); clk_disable_unprepare(xor_dev->clk);
clk_disable_unprepare(xor_dev->reg_clk);
return 0; return 0;
} }

View File

@@ -1248,14 +1248,14 @@ static int pxad_init_phys(struct platform_device *op,
return -ENOMEM; return -ENOMEM;
for (i = 0; i < nb_phy_chans; i++) for (i = 0; i < nb_phy_chans; i++)
if (platform_get_irq(op, i) > 0) if (platform_get_irq_optional(op, i) > 0)
nr_irq++; nr_irq++;
for (i = 0; i < nb_phy_chans; i++) { for (i = 0; i < nb_phy_chans; i++) {
phy = &pdev->phys[i]; phy = &pdev->phys[i];
phy->base = pdev->base; phy->base = pdev->base;
phy->idx = i; phy->idx = i;
irq = platform_get_irq(op, i); irq = platform_get_irq_optional(op, i);
if ((nr_irq > 1) && (irq > 0)) if ((nr_irq > 1) && (irq > 0))
ret = devm_request_irq(&op->dev, irq, ret = devm_request_irq(&op->dev, irq,
pxad_chan_handler, pxad_chan_handler,

View File

@@ -299,6 +299,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = device_register(&tx_chn->common.chan_dev); ret = device_register(&tx_chn->common.chan_dev);
if (ret) { if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret); dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL; tx_chn->common.chan_dev.parent = NULL;
goto err; goto err;
} }
@@ -917,6 +918,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ret = device_register(&rx_chn->common.chan_dev); ret = device_register(&rx_chn->common.chan_dev);
if (ret) { if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret); dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&rx_chn->common.chan_dev);
rx_chn->common.chan_dev.parent = NULL; rx_chn->common.chan_dev.parent = NULL;
goto err; goto err;
} }
@@ -1048,6 +1050,7 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
ret = device_register(&rx_chn->common.chan_dev); ret = device_register(&rx_chn->common.chan_dev);
if (ret) { if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret); dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&rx_chn->common.chan_dev);
rx_chn->common.chan_dev.parent = NULL; rx_chn->common.chan_dev.parent = NULL;
goto err; goto err;
} }

View File

@@ -780,7 +780,7 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{ {
unsigned long addr = vmf->address; unsigned long addr = vmf->address;
struct vm_area_struct *vma; struct svm_range_bo *svm_bo;
enum svm_work_list_ops op; enum svm_work_list_ops op;
struct svm_range *parent; struct svm_range *parent;
struct svm_range *prange; struct svm_range *prange;
@@ -788,24 +788,42 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
struct mm_struct *mm; struct mm_struct *mm;
int r = 0; int r = 0;
vma = vmf->vma; svm_bo = vmf->page->zone_device_data;
mm = vma->vm_mm; if (!svm_bo) {
pr_debug("failed get device page at addr 0x%lx\n", addr);
p = kfd_lookup_process_by_mm(vma->vm_mm);
if (!p) {
pr_debug("failed find process at fault address 0x%lx\n", addr);
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
addr >>= PAGE_SHIFT; if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
pr_debug("addr 0x%lx of process mm is detroyed\n", addr);
return VM_FAULT_SIGBUS;
}
mm = svm_bo->eviction_fence->mm;
if (mm != vmf->vma->vm_mm)
pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
p = kfd_lookup_process_by_mm(mm);
if (!p) {
pr_debug("failed find process at fault address 0x%lx\n", addr);
r = VM_FAULT_SIGBUS;
goto out_mmput;
}
if (READ_ONCE(p->svms.faulting_task) == current) {
pr_debug("skipping ram migration\n");
r = 0;
goto out_unref_process;
}
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
addr >>= PAGE_SHIFT;
mutex_lock(&p->svms.lock); mutex_lock(&p->svms.lock);
prange = svm_range_from_addr(&p->svms, addr, &parent); prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) { if (!prange) {
pr_debug("cannot find svm range at 0x%lx\n", addr); pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT; r = -EFAULT;
goto out; goto out_unlock_svms;
} }
mutex_lock(&parent->migrate_mutex); mutex_lock(&parent->migrate_mutex);
@@ -827,10 +845,10 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
goto out_unlock_prange; goto out_unlock_prange;
} }
r = svm_migrate_vram_to_ram(prange, mm); r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm);
if (r) if (r)
pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r, pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
prange, prange->start, prange->last); r, prange->svms, prange, prange->start, prange->last);
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange) if (p->xnack_enabled && parent == prange)
@@ -844,12 +862,13 @@ out_unlock_prange:
if (prange != parent) if (prange != parent)
mutex_unlock(&prange->migrate_mutex); mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex); mutex_unlock(&parent->migrate_mutex);
out: out_unlock_svms:
mutex_unlock(&p->svms.lock); mutex_unlock(&p->svms.lock);
kfd_unref_process(p); out_unref_process:
pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
kfd_unref_process(p);
out_mmput:
mmput(mm);
return r ? VM_FAULT_SIGBUS : 0; return r ? VM_FAULT_SIGBUS : 0;
} }

View File

@@ -755,6 +755,7 @@ struct svm_range_list {
atomic_t evicted_ranges; atomic_t evicted_ranges;
struct delayed_work restore_work; struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
struct task_struct *faulting_task;
}; };
/* Process data */ /* Process data */

View File

@@ -1489,9 +1489,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end); next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT; npages = (next - addr) >> PAGE_SHIFT;
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
addr, npages, &hmm_range, addr, npages, &hmm_range,
readonly, true, owner); readonly, true, owner);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) { if (r) {
pr_debug("failed %d to get svm range pages\n", r); pr_debug("failed %d to get svm range pages\n", r);
goto unreserve_out; goto unreserve_out;

View File

@@ -366,7 +366,9 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
if (((adev->pdev->device == 0x73A1) && if (((adev->pdev->device == 0x73A1) &&
(adev->pdev->revision == 0x00)) || (adev->pdev->revision == 0x00)) ||
((adev->pdev->device == 0x73BF) && ((adev->pdev->device == 0x73BF) &&
(adev->pdev->revision == 0xCF))) (adev->pdev->revision == 0xCF)) ||
((adev->pdev->device == 0x7422) &&
(adev->pdev->revision == 0x00)))
smu_baco->platform_support = false; smu_baco->platform_support = false;
} }

View File

@@ -34,13 +34,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err; goto err;
} }
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret) if (ret)
goto err_free; goto err_free;
src = obj->mm.pages->sgl; src = obj->mm.pages->sgl;
dst = st->sgl; dst = st->sgl;
for (i = 0; i < obj->mm.pages->nents; i++) { for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0); sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst); dst = sg_next(dst);
src = sg_next(src); src = sg_next(src);

View File

@@ -383,7 +383,12 @@ static int __init vc4_drm_register(void)
if (ret) if (ret)
return ret; return ret;
return platform_driver_register(&vc4_platform_driver); ret = platform_driver_register(&vc4_platform_driver);
if (ret)
platform_unregister_drivers(component_drivers,
ARRAY_SIZE(component_drivers));
return ret;
} }
static void __exit vc4_drm_unregister(void) static void __exit vc4_drm_unregister(void)

View File

@@ -498,7 +498,7 @@ static int mousevsc_probe(struct hv_device *device,
ret = hid_add_device(hid_dev); ret = hid_add_device(hid_dev);
if (ret) if (ret)
goto probe_err1; goto probe_err2;
ret = hid_parse(hid_dev); ret = hid_parse(hid_dev);

View File

@@ -105,7 +105,7 @@ static const struct regmap_config tcsr_mutex_config = {
.reg_bits = 32, .reg_bits = 32,
.reg_stride = 4, .reg_stride = 4,
.val_bits = 32, .val_bits = 32,
.max_register = 0x40000, .max_register = 0x20000,
.fast_io = true, .fast_io = true,
}; };

View File

@@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2022 The Chromium OS Authors
*
* Support that applies to the combination of SDHCI and CQHCI, while not
* expressing a dependency between the two modules.
*/
#ifndef __MMC_HOST_SDHCI_CQHCI_H__
#define __MMC_HOST_SDHCI_CQHCI_H__
#include "cqhci.h"
#include "sdhci.h"
static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask)
{
if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
host->mmc->cqe_private)
cqhci_deactivate(host->mmc);
sdhci_reset(host, mask);
}
#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */

View File

@@ -25,6 +25,7 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/pinctrl/consumer.h> #include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h" #include "sdhci-pltfm.h"
#include "sdhci-esdhc.h" #include "sdhci-esdhc.h"
#include "cqhci.h" #include "cqhci.h"
@@ -1273,7 +1274,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
static void esdhc_reset(struct sdhci_host *host, u8 mask) static void esdhc_reset(struct sdhci_host *host, u8 mask)
{ {
sdhci_reset(host, mask); sdhci_and_cqhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
@@ -1654,14 +1655,14 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
if (host->caps & MMC_CAP_8_BIT_DATA && if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
imx_data->socdata->flags & ESDHC_FLAG_HS400) imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->mmc->caps2 |= MMC_CAP2_HS400; host->mmc->caps2 |= MMC_CAP2_HS400;
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
if (host->caps & MMC_CAP_8_BIT_DATA && if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
host->mmc->caps2 |= MMC_CAP2_HS400_ES; host->mmc->caps2 |= MMC_CAP2_HS400_ES;
host->mmc_host_ops.hs400_enhanced_strobe = host->mmc_host_ops.hs400_enhanced_strobe =

View File

@@ -25,6 +25,7 @@
#include <linux/firmware/xlnx-zynqmp.h> #include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h" #include "cqhci.h"
#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h" #include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 #define SDHCI_ARASAN_VENDOR_REGISTER 0x78
@@ -359,7 +360,7 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
sdhci_reset(host, mask); sdhci_and_cqhci_reset(host, mask);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) { if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);

View File

@@ -24,6 +24,7 @@
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h" #include "sdhci-pltfm.h"
#include "cqhci.h" #include "cqhci.h"
@@ -363,7 +364,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl, clk_ctrl, pad_ctrl; u32 misc_ctrl, clk_ctrl, pad_ctrl;
sdhci_reset(host, mask); sdhci_and_cqhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL)) if (!(mask & SDHCI_RESET_ALL))
return; return;

View File

@@ -15,6 +15,7 @@
#include <linux/sys_soc.h> #include <linux/sys_soc.h>
#include "cqhci.h" #include "cqhci.h"
#include "sdhci-cqhci.h"
#include "sdhci-pltfm.h" #include "sdhci-pltfm.h"
/* CTL_CFG Registers */ /* CTL_CFG Registers */
@@ -378,7 +379,7 @@ static void sdhci_am654_reset(struct sdhci_host *host, u8 mask)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
sdhci_reset(host, mask); sdhci_and_cqhci_reset(host, mask);
if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) { if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
@@ -464,7 +465,7 @@ static struct sdhci_ops sdhci_am654_ops = {
.set_clock = sdhci_am654_set_clock, .set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b, .write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq, .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset, .reset = sdhci_and_cqhci_reset,
}; };
static const struct sdhci_pltfm_data sdhci_am654_pdata = { static const struct sdhci_pltfm_data sdhci_am654_pdata = {
@@ -494,7 +495,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.set_clock = sdhci_am654_set_clock, .set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b, .write_b = sdhci_am654_write_b,
.irq = sdhci_am654_cqhci_irq, .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset, .reset = sdhci_and_cqhci_reset,
}; };
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {

View File

@@ -1004,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev)
xgene_enet_napi_enable(pdata); xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev); ret = xgene_enet_register_irq(ndev);
if (ret) if (ret) {
xgene_enet_napi_disable(pdata);
return ret; return ret;
}
if (ndev->phydev) { if (ndev->phydev) {
phy_start(ndev->phydev); phy_start(ndev->phydev);

View File

@@ -585,6 +585,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx); ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
memzero_explicit(&key_rec, sizeof(key_rec));
return ret; return ret;
} }
@@ -932,6 +933,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx); ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
memzero_explicit(&sa_key_record, sizeof(sa_key_record));
return ret; return ret;
} }

View File

@@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
u16 table_index) u16 table_index)
{ {
u16 packed_record[18]; u16 packed_record[18];
int ret;
if (table_index >= NUMROWS_INGRESSSAKEYRECORD) if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL; return -EINVAL;
@@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
packed_record[16] = rec->key_len & 0x3; packed_record[16] = rec->key_len & 0x3;
return set_raw_ingress_record(hw, packed_record, 18, 2, ret = set_raw_ingress_record(hw, packed_record, 18, 2,
ROWOFFSET_INGRESSSAKEYRECORD + ROWOFFSET_INGRESSSAKEYRECORD +
table_index); table_index);
memzero_explicit(packed_record, sizeof(packed_record));
return ret;
} }
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw, int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
@@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw,
ret = set_raw_egress_record(hw, packed_record, 8, 2, ret = set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index); ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret)) if (unlikely(ret))
return ret; goto clear_key;
ret = set_raw_egress_record(hw, packed_record + 8, 8, 2, ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index - ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32); 32);
if (unlikely(ret))
return ret;
return 0; clear_key:
memzero_explicit(packed_record, sizeof(packed_record));
return ret;
} }
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw, int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,

View File

@@ -77,7 +77,7 @@ config BCMGENET
select BCM7XXX_PHY select BCM7XXX_PHY
select MDIO_BCM_UNIMAC select MDIO_BCM_UNIMAC
select DIMLIB select DIMLIB
select BROADCOM_PHY if ARCH_BCM2835 select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL)
help help
This driver supports the built-in Ethernet MACs found in the This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset. Broadcom BCM7xxx Set Top Box family chipset.

View File

@@ -12605,8 +12605,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) { hlist_for_each_entry_rcu(fltr, head, hash) {
if (bnxt_fltr_match(fltr, new_fltr)) { if (bnxt_fltr_match(fltr, new_fltr)) {
rc = fltr->sw_id;
rcu_read_unlock(); rcu_read_unlock();
rc = 0;
goto err_free; goto err_free;
} }
} }

View File

@@ -132,7 +132,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
} }
reset_coalesce: reset_coalesce:
if (netif_running(dev)) { if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) { if (update_stats) {
rc = bnxt_close_nic(bp, true, false); rc = bnxt_close_nic(bp, true, false);
if (!rc) if (!rc)

View File

@@ -1302,6 +1302,7 @@ static int cxgb_up(struct adapter *adap)
if (ret < 0) { if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret); CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap); t3_intr_disable(adap);
quiesce_rx(adap);
free_irq_resources(adap); free_irq_resources(adap);
err = ret; err = ret;
goto out; goto out;

View File

@@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev)
*/ */
err = t4vf_update_port_info(pi); err = t4vf_update_port_info(pi);
if (err < 0) if (err < 0)
return err; goto err_unwind;
/* /*
* Note that this interface is up and start everything up ... * Note that this interface is up and start everything up ...

View File

@@ -882,12 +882,21 @@ _return:
return err; return err;
} }
static int mac_remove(struct platform_device *pdev)
{
struct mac_device *mac_dev = platform_get_drvdata(pdev);
platform_device_unregister(mac_dev->priv->eth_dev);
return 0;
}
static struct platform_driver mac_driver = { static struct platform_driver mac_driver = {
.driver = { .driver = {
.name = KBUILD_MODNAME, .name = KBUILD_MODNAME,
.of_match_table = mac_match, .of_match_table = mac_match,
}, },
.probe = mac_probe, .probe = mac_probe,
.remove = mac_remove,
}; };
builtin_platform_driver(mac_driver); builtin_platform_driver(mac_driver);

View File

@@ -2477,6 +2477,7 @@ out_free:
for (i = 0; i < mp->rxq_count; i++) for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i); rxq_deinit(mp->rxq + i);
out: out:
napi_disable(&mp->napi);
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
return err; return err;

View File

@@ -1013,6 +1013,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
return err; return err;
} }
pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
NIX_LF_CQ_OP_STATUS);
/* Initialize work queue for receive buffer refill */ /* Initialize work queue for receive buffer refill */
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL); sizeof(struct refill_work), GFP_KERNEL);

View File

@@ -337,6 +337,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags; u64 flags;
u64 *cq_op_addr;
struct otx2_qset qset; struct otx2_qset qset;
struct otx2_hw hw; struct otx2_hw hw;

View File

@@ -13,6 +13,7 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <net/ip.h> #include <net/ip.h>
#include <linux/bitfield.h>
#include "otx2_reg.h" #include "otx2_reg.h"
#include "otx2_common.h" #include "otx2_common.h"
@@ -1153,6 +1154,59 @@ int otx2_set_real_num_queues(struct net_device *netdev,
} }
EXPORT_SYMBOL(otx2_set_real_num_queues); EXPORT_SYMBOL(otx2_set_real_num_queues);
static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
"NIX_SQOPERR_OOR",
"NIX_SQOPERR_CTX_FAULT",
"NIX_SQOPERR_CTX_POISON",
"NIX_SQOPERR_DISABLED",
"NIX_SQOPERR_SIZE_ERR",
"NIX_SQOPERR_OFLOW",
"NIX_SQOPERR_SQB_NULL",
"NIX_SQOPERR_SQB_FAULT",
"NIX_SQOPERR_SQE_SZ_ZERO",
};
static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
"NIX_MNQERR_SQ_CTX_FAULT",
"NIX_MNQERR_SQ_CTX_POISON",
"NIX_MNQERR_SQB_FAULT",
"NIX_MNQERR_SQB_POISON",
"NIX_MNQERR_TOTAL_ERR",
"NIX_MNQERR_LSO_ERR",
"NIX_MNQERR_CQ_QUERY_ERR",
"NIX_MNQERR_MAX_SQE_SIZE_ERR",
"NIX_MNQERR_MAXLEN_ERR",
"NIX_MNQERR_SQE_SIZEM1_ZERO",
};
static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
"NIX_SND_STATUS_GOOD",
"NIX_SND_STATUS_SQ_CTX_FAULT",
"NIX_SND_STATUS_SQ_CTX_POISON",
"NIX_SND_STATUS_SQB_FAULT",
"NIX_SND_STATUS_SQB_POISON",
"NIX_SND_STATUS_HDR_ERR",
"NIX_SND_STATUS_EXT_ERR",
"NIX_SND_STATUS_JUMP_FAULT",
"NIX_SND_STATUS_JUMP_POISON",
"NIX_SND_STATUS_CRC_ERR",
"NIX_SND_STATUS_IMM_ERR",
"NIX_SND_STATUS_SG_ERR",
"NIX_SND_STATUS_MEM_ERR",
"NIX_SND_STATUS_INVALID_SUBDC",
"NIX_SND_STATUS_SUBDC_ORDER_ERR",
"NIX_SND_STATUS_DATA_FAULT",
"NIX_SND_STATUS_DATA_POISON",
"NIX_SND_STATUS_NPC_DROP_ACTION",
"NIX_SND_STATUS_LOCK_VIOL",
"NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
"NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
"NIX_SND_STATUS_NPC_MCAST_ABORT",
"NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
"NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
"NIX_SND_STATUS_SEND_STATS_ERR",
};
static irqreturn_t otx2_q_intr_handler(int irq, void *data) static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{ {
struct otx2_nic *pf = data; struct otx2_nic *pf = data;
@@ -1186,46 +1240,67 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
/* SQ */ /* SQ */
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
u8 sq_op_err_code, mnq_err_code, snd_err_code;
/* Below debug registers captures first errors corresponding to
* those registers. We don't have to check against SQ qid as
* these are fatal errors.
*/
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr); val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
(val & NIX_SQINT_BITS)); (val & NIX_SQINT_BITS));
if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
continue;
if (val & BIT_ULL(42)) { if (val & BIT_ULL(42)) {
netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
qidx, otx2_read64(pf, NIX_LF_ERR_INT)); qidx, otx2_read64(pf, NIX_LF_ERR_INT));
} else { goto done;
if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
qidx,
otx2_read64(pf,
NIX_LF_SQ_OP_ERR_DBG));
otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
BIT_ULL(44));
}
if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
qidx,
otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
BIT_ULL(44));
}
if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
qidx,
otx2_read64(pf,
NIX_LF_SEND_ERR_DBG));
otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
BIT_ULL(44));
}
if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
qidx);
} }
sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
if (!(sq_op_err_dbg & BIT(44)))
goto chk_mnq_err_dbg;
sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
goto chk_mnq_err_dbg;
/* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
* TODO: But we are in irq context. How to call mbox functions which does sleep
*/
chk_mnq_err_dbg:
mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
if (!(mnq_err_dbg & BIT(44)))
goto chk_snd_err_dbg;
mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
chk_snd_err_dbg:
snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
if (snd_err_dbg & BIT(44)) {
snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
}
done:
/* Print values and reset */
if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
qidx);
schedule_work(&pf->reset_task); schedule_work(&pf->reset_task);
} }

View File

@@ -274,4 +274,61 @@ enum nix_sqint_e {
BIT_ULL(NIX_SQINT_SEND_ERR) | \ BIT_ULL(NIX_SQINT_SEND_ERR) | \
BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
enum nix_sqoperr_e {
NIX_SQOPERR_OOR = 0,
NIX_SQOPERR_CTX_FAULT = 1,
NIX_SQOPERR_CTX_POISON = 2,
NIX_SQOPERR_DISABLED = 3,
NIX_SQOPERR_SIZE_ERR = 4,
NIX_SQOPERR_OFLOW = 5,
NIX_SQOPERR_SQB_NULL = 6,
NIX_SQOPERR_SQB_FAULT = 7,
NIX_SQOPERR_SQE_SZ_ZERO = 8,
NIX_SQOPERR_MAX,
};
enum nix_mnqerr_e {
NIX_MNQERR_SQ_CTX_FAULT = 0,
NIX_MNQERR_SQ_CTX_POISON = 1,
NIX_MNQERR_SQB_FAULT = 2,
NIX_MNQERR_SQB_POISON = 3,
NIX_MNQERR_TOTAL_ERR = 4,
NIX_MNQERR_LSO_ERR = 5,
NIX_MNQERR_CQ_QUERY_ERR = 6,
NIX_MNQERR_MAX_SQE_SIZE_ERR = 7,
NIX_MNQERR_MAXLEN_ERR = 8,
NIX_MNQERR_SQE_SIZEM1_ZERO = 9,
NIX_MNQERR_MAX,
};
enum nix_snd_status_e {
NIX_SND_STATUS_GOOD = 0x0,
NIX_SND_STATUS_SQ_CTX_FAULT = 0x1,
NIX_SND_STATUS_SQ_CTX_POISON = 0x2,
NIX_SND_STATUS_SQB_FAULT = 0x3,
NIX_SND_STATUS_SQB_POISON = 0x4,
NIX_SND_STATUS_HDR_ERR = 0x5,
NIX_SND_STATUS_EXT_ERR = 0x6,
NIX_SND_STATUS_JUMP_FAULT = 0x7,
NIX_SND_STATUS_JUMP_POISON = 0x8,
NIX_SND_STATUS_CRC_ERR = 0x9,
NIX_SND_STATUS_IMM_ERR = 0x10,
NIX_SND_STATUS_SG_ERR = 0x11,
NIX_SND_STATUS_MEM_ERR = 0x12,
NIX_SND_STATUS_INVALID_SUBDC = 0x13,
NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
NIX_SND_STATUS_DATA_FAULT = 0x15,
NIX_SND_STATUS_DATA_POISON = 0x16,
NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
NIX_SND_STATUS_LOCK_VIOL = 0x18,
NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
NIX_SND_STATUS_MAX,
};
#endif /* OTX2_STRUCT_H */ #endif /* OTX2_STRUCT_H */

View File

@@ -18,6 +18,31 @@
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
{
u64 incr = (u64)(cq->cq_idx) << 32;
u64 status;
status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
dev_err(pfvf->dev, "CQ stopped due to error");
return -EINVAL;
}
cq->cq_tail = status & 0xFFFFF;
cq->cq_head = (status >> 20) & 0xFFFFF;
if (cq->cq_tail < cq->cq_head)
cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
cq->cq_tail;
else
cq->pend_cqe = cq->cq_tail - cq->cq_head;
return 0;
}
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{ {
struct nix_cqe_hdr_s *cqe_hdr; struct nix_cqe_hdr_s *cqe_hdr;
@@ -318,7 +343,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe; struct nix_cqe_rx_s *cqe;
int processed_cqe = 0; int processed_cqe = 0;
while (likely(processed_cqe < budget)) { if (cq->pend_cqe >= budget)
goto process_cqe;
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return 0;
process_cqe:
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
!cqe->sg.seg_addr) { !cqe->sg.seg_addr) {
@@ -334,6 +366,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00; cqe->sg.seg_addr = 0x00;
processed_cqe++; processed_cqe++;
cq->pend_cqe--;
} }
/* Free CQEs to HW */ /* Free CQEs to HW */
@@ -368,7 +401,14 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_tx_s *cqe; struct nix_cqe_tx_s *cqe;
int processed_cqe = 0; int processed_cqe = 0;
while (likely(processed_cqe < budget)) { if (cq->pend_cqe >= budget)
goto process_cqe;
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return 0;
process_cqe:
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) { if (unlikely(!cqe)) {
if (!processed_cqe) if (!processed_cqe)
@@ -380,6 +420,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++; processed_cqe++;
cq->pend_cqe--;
} }
/* Free CQEs to HW */ /* Free CQEs to HW */
@@ -936,10 +977,16 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
int processed_cqe = 0; int processed_cqe = 0;
u64 iova, pa; u64 iova, pa;
while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) { if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
if (!cqe->sg.subdc) return;
continue;
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++; processed_cqe++;
cq->pend_cqe--;
if (!cqe)
continue;
if (cqe->sg.segs > 1) { if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue; continue;
@@ -965,7 +1012,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sq = &pfvf->qset.sq[cq->cint_idx]; sq = &pfvf->qset.sq[cq->cint_idx];
while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) { if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
while (cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
cq->pend_cqe--;
if (!cqe)
continue;
sg = &sq->sg[cqe->comp.sqe_id]; sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb; skb = (struct sk_buff *)sg->skb;
if (skb) { if (skb) {
@@ -973,7 +1029,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
sg->skb = (u64)NULL; sg->skb = (u64)NULL;
} }
processed_cqe++;
} }
/* Free CQEs to HW */ /* Free CQEs to HW */

View File

@@ -56,6 +56,9 @@
*/ */
#define CQ_QCOUNT_DEFAULT 1 #define CQ_QCOUNT_DEFAULT 1
#define CQ_OP_STAT_OP_ERR 63
#define CQ_OP_STAT_CQ_ERR 46
struct queue_stats { struct queue_stats {
u64 bytes; u64 bytes;
u64 pkts; u64 pkts;
@@ -122,6 +125,8 @@ struct otx2_cq_queue {
u16 pool_ptrs; u16 pool_ptrs;
u32 cqe_cnt; u32 cqe_cnt;
u32 cq_head; u32 cq_head;
u32 cq_tail;
u32 pend_cqe;
void *cqe_base; void *cqe_base;
struct qmem *cqe; struct qmem *cqe;
struct otx2_pool *rbpool; struct otx2_pool *rbpool;

View File

@@ -776,6 +776,7 @@ tx_done:
int prestera_rxtx_switch_init(struct prestera_switch *sw) int prestera_rxtx_switch_init(struct prestera_switch *sw)
{ {
struct prestera_rxtx *rxtx; struct prestera_rxtx *rxtx;
int err;
rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL); rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
if (!rxtx) if (!rxtx)
@@ -783,7 +784,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw)
sw->rxtx = rxtx; sw->rxtx = rxtx;
return prestera_sdma_switch_init(sw); err = prestera_sdma_switch_init(sw);
if (err)
kfree(rxtx);
return err;
} }
void prestera_rxtx_switch_fini(struct prestera_switch *sw) void prestera_rxtx_switch_fini(struct prestera_switch *sw)

View File

@@ -1699,12 +1699,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
int i; int i;
for (i = 0; i < cmd->max_reg_cmds; i++) for (i = 0; i < cmd->max_reg_cmds; i++) {
while (down_trylock(&cmd->sem)) while (down_trylock(&cmd->sem)) {
mlx5_cmd_trigger_completions(dev); mlx5_cmd_trigger_completions(dev);
cond_resched();
}
}
while (down_trylock(&cmd->pages_sem)) while (down_trylock(&cmd->pages_sem)) {
mlx5_cmd_trigger_completions(dev); mlx5_cmd_trigger_completions(dev);
cond_resched();
}
/* Unlock cmdif */ /* Unlock cmdif */
up(&cmd->pages_sem); up(&cmd->pages_sem);

View File

@@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
return err; return err;
} }
static int
mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *upper = info->upper_dev;
struct net_device *lower;
struct list_head *iter;
if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
return 0;
netdev_for_each_lower_dev(dev, lower, iter) {
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
if (!mlx5e_eswitch_rep(lower))
continue;
priv = netdev_priv(lower);
mdev = priv->mdev;
if (!mlx5_lag_is_active(mdev))
return -EAGAIN;
if (!mlx5_lag_is_shared_fdb(mdev))
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
@@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
switch (event) { switch (event) {
case NETDEV_PRECHANGEUPPER: case NETDEV_PRECHANGEUPPER:
err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
break; break;
case NETDEV_CHANGEUPPER: case NETDEV_CHANGEUPPER:

View File

@@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
sizeof(dest->vport.num), hash); sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id, hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash); sizeof(dest->vport.num), hash);
if (dest->vport.pkt_reformat) if (flow_act->pkt_reformat)
hash = jhash(dest->vport.pkt_reformat, hash = jhash(flow_act->pkt_reformat,
sizeof(*dest->vport.pkt_reformat), sizeof(*flow_act->pkt_reformat),
hash); hash);
return hash; return hash;
} }
@@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
if (ret) if (ret)
return ret; return ret;
return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ? if (flow_act1->pkt_reformat && flow_act2->pkt_reformat)
memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat, return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat,
sizeof(*dest1->vport.pkt_reformat)) : 0; sizeof(*flow_act1->pkt_reformat));
return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat);
} }
static int static int

View File

@@ -7125,9 +7125,8 @@ static int s2io_card_up(struct s2io_nic *sp)
if (ret) { if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name); dev->name);
s2io_reset(sp); ret = -ENOMEM;
free_rx_buffers(sp); goto err_fill_buff;
return -ENOMEM;
} }
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
ring->rx_bufs_left); ring->rx_bufs_left);
@@ -7165,18 +7164,16 @@ static int s2io_card_up(struct s2io_nic *sp)
/* Enable Rx Traffic and interrupts on the NIC */ /* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) { if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
s2io_reset(sp); ret = -ENODEV;
free_rx_buffers(sp); goto err_out;
return -ENODEV;
} }
/* Add interrupt service routine */ /* Add interrupt service routine */
if (s2io_add_isr(sp) != 0) { if (s2io_add_isr(sp) != 0) {
if (sp->config.intr_type == MSI_X) if (sp->config.intr_type == MSI_X)
s2io_rem_isr(sp); s2io_rem_isr(sp);
s2io_reset(sp); ret = -ENODEV;
free_rx_buffers(sp); goto err_out;
return -ENODEV;
} }
timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
@@ -7196,6 +7193,20 @@ static int s2io_card_up(struct s2io_nic *sp)
} }
return 0; return 0;
err_out:
if (config->napi) {
if (config->intr_type == MSI_X) {
for (i = 0; i < sp->config.rx_ring_num; i++)
napi_disable(&sp->mac_control.rings[i].napi);
} else {
napi_disable(&sp->napi);
}
}
err_fill_buff:
s2io_reset(sp);
free_rx_buffers(sp);
return ret;
} }
/** /**

View File

@@ -899,6 +899,7 @@ static int nixge_open(struct net_device *ndev)
err_rx_irq: err_rx_irq:
free_irq(priv->tx_irq, ndev); free_irq(priv->tx_irq, ndev);
err_tx_irq: err_tx_irq:
napi_disable(&priv->napi);
phy_stop(phy); phy_stop(phy);
phy_disconnect(phy); phy_disconnect(phy);
tasklet_kill(&priv->dma_err_tasklet); tasklet_kill(&priv->dma_err_tasklet);

View File

@@ -593,7 +593,6 @@ static int ehl_common_data(struct pci_dev *pdev,
{ {
plat->rx_queues_to_use = 8; plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8; plat->tx_queues_to_use = 8;
plat->clk_ptp_rate = 200000000;
plat->use_phy_wol = 1; plat->use_phy_wol = 1;
plat->safety_feat_cfg->tsoee = 1; plat->safety_feat_cfg->tsoee = 1;
@@ -618,6 +617,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown; plat->serdes_powerdown = intel_serdes_powerdown;
plat->clk_ptp_rate = 204800000;
return ehl_common_data(pdev, plat); return ehl_common_data(pdev, plat);
} }
@@ -631,6 +632,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
plat->bus_id = 1; plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_RGMII; plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
plat->clk_ptp_rate = 204800000;
return ehl_common_data(pdev, plat); return ehl_common_data(pdev, plat);
} }
@@ -647,6 +650,8 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
plat->bus_id = 2; plat->bus_id = 2;
plat->addr64 = 32; plat->addr64 = 32;
plat->clk_ptp_rate = 200000000;
intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
return ehl_common_data(pdev, plat); return ehl_common_data(pdev, plat);
@@ -686,6 +691,8 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
plat->bus_id = 3; plat->bus_id = 3;
plat->addr64 = 32; plat->addr64 = 32;
plat->clk_ptp_rate = 200000000;
intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
return ehl_common_data(pdev, plat); return ehl_common_data(pdev, plat);
@@ -721,7 +728,8 @@ static int tgl_common_data(struct pci_dev *pdev,
{ {
plat->rx_queues_to_use = 6; plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4; plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000; plat->clk_ptp_rate = 204800000;
plat->speed_mode_2500 = intel_speed_mode_2500;
plat->safety_feat_cfg->tsoee = 1; plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0; plat->safety_feat_cfg->mrxpee = 0;
@@ -741,7 +749,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
{ {
plat->bus_id = 1; plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII; plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown; plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat); return tgl_common_data(pdev, plat);
@@ -756,7 +763,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
{ {
plat->bus_id = 2; plat->bus_id = 2;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII; plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown; plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat); return tgl_common_data(pdev, plat);

View File

@@ -75,20 +75,24 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->mdio_bus_data = devm_kzalloc(&pdev->dev, plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data), sizeof(*plat->mdio_bus_data),
GFP_KERNEL); GFP_KERNEL);
if (!plat->mdio_bus_data) if (!plat->mdio_bus_data) {
return -ENOMEM; ret = -ENOMEM;
goto err_put_node;
}
plat->mdio_bus_data->needs_reset = true; plat->mdio_bus_data->needs_reset = true;
} }
plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
if (!plat->dma_cfg) if (!plat->dma_cfg) {
return -ENOMEM; ret = -ENOMEM;
goto err_put_node;
}
/* Enable pci device */ /* Enable pci device */
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
return ret; goto err_put_node;
} }
/* Get the base address of device */ /* Get the base address of device */
@@ -97,7 +101,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
continue; continue;
ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
if (ret) if (ret)
return ret; goto err_disable_device;
break; break;
} }
@@ -108,7 +112,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
phy_mode = device_get_phy_mode(&pdev->dev); phy_mode = device_get_phy_mode(&pdev->dev);
if (phy_mode < 0) { if (phy_mode < 0) {
dev_err(&pdev->dev, "phy_mode not found\n"); dev_err(&pdev->dev, "phy_mode not found\n");
return phy_mode; ret = phy_mode;
goto err_disable_device;
} }
plat->phy_interface = phy_mode; plat->phy_interface = phy_mode;
@@ -125,6 +130,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (res.irq < 0) { if (res.irq < 0) {
dev_err(&pdev->dev, "IRQ macirq not found\n"); dev_err(&pdev->dev, "IRQ macirq not found\n");
ret = -ENODEV; ret = -ENODEV;
goto err_disable_msi;
} }
res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
@@ -137,15 +143,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (res.lpi_irq < 0) { if (res.lpi_irq < 0) {
dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
ret = -ENODEV; ret = -ENODEV;
goto err_disable_msi;
} }
return stmmac_dvr_probe(&pdev->dev, plat, &res); ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret)
goto err_disable_msi;
return ret;
err_disable_msi:
pci_disable_msi(pdev);
err_disable_device:
pci_disable_device(pdev);
err_put_node:
of_node_put(plat->mdio_node);
return ret;
} }
static void loongson_dwmac_remove(struct pci_dev *pdev) static void loongson_dwmac_remove(struct pci_dev *pdev)
{ {
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int i; int i;
of_node_put(priv->plat->mdio_node);
stmmac_dvr_remove(&pdev->dev); stmmac_dvr_remove(&pdev->dev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) { for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -155,6 +177,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
break; break;
} }
pci_disable_msi(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
} }

View File

@@ -272,11 +272,9 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
if (ret) if (ret)
return ret; return ret;
devm_add_action_or_reset(dwmac->dev, return devm_add_action_or_reset(dwmac->dev,
(void(*)(void *))clk_disable_unprepare, (void(*)(void *))clk_disable_unprepare,
dwmac->rgmii_tx_clk); clk);
return 0;
} }
static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac) static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)

View File

@@ -856,6 +856,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
err_cleanup: err_cleanup:
if (!cpsw->usage_count) { if (!cpsw->usage_count) {
napi_disable(&cpsw->napi_rx);
napi_disable(&cpsw->napi_tx);
cpdma_ctlr_stop(cpsw->dma); cpdma_ctlr_stop(cpsw->dma);
cpsw_destroy_xdp_rxqs(cpsw); cpsw_destroy_xdp_rxqs(cpsw);
} }

View File

@@ -1302,12 +1302,15 @@ static int tsi108_open(struct net_device *dev)
data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
&data->rxdma, GFP_KERNEL); &data->rxdma, GFP_KERNEL);
if (!data->rxring) if (!data->rxring) {
free_irq(data->irq_num, dev);
return -ENOMEM; return -ENOMEM;
}
data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
&data->txdma, GFP_KERNEL); &data->txdma, GFP_KERNEL);
if (!data->txring) { if (!data->txring) {
free_irq(data->irq_num, dev);
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
data->rxdma); data->rxdma);
return -ENOMEM; return -ENOMEM;

View File

@@ -534,7 +534,7 @@ static int bpq_device_event(struct notifier_block *this,
if (!net_eq(dev_net(dev), &init_net)) if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE; return NOTIFY_DONE;
if (!dev_is_ethdev(dev)) if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev))
return NOTIFY_DONE; return NOTIFY_DONE;
switch (event) { switch (event) {

View File

@@ -1386,7 +1386,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
return NULL; return NULL;
} }
static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
bool active)
{ {
struct macsec_rx_sc *rx_sc; struct macsec_rx_sc *rx_sc;
struct macsec_dev *macsec; struct macsec_dev *macsec;
@@ -1410,7 +1411,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
} }
rx_sc->sci = sci; rx_sc->sci = sci;
rx_sc->active = true; rx_sc->active = active;
refcount_set(&rx_sc->refcnt, 1); refcount_set(&rx_sc->refcnt, 1);
secy = &macsec_priv(dev)->secy; secy = &macsec_priv(dev)->secy;
@@ -1819,6 +1820,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len); secy->key_len);
err = macsec_offload(ops->mdo_add_rxsa, &ctx); err = macsec_offload(ops->mdo_add_rxsa, &ctx);
memzero_explicit(ctx.sa.key, secy->key_len);
if (err) if (err)
goto cleanup; goto cleanup;
} }
@@ -1863,7 +1865,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc; struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct macsec_secy *secy; struct macsec_secy *secy;
bool was_active; bool active = true;
int ret; int ret;
if (!attrs[MACSEC_ATTR_IFINDEX]) if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -1885,16 +1887,15 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
secy = &macsec_priv(dev)->secy; secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
rx_sc = create_rx_sc(dev, sci); if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
rx_sc = create_rx_sc(dev, sci, active);
if (IS_ERR(rx_sc)) { if (IS_ERR(rx_sc)) {
rtnl_unlock(); rtnl_unlock();
return PTR_ERR(rx_sc); return PTR_ERR(rx_sc);
} }
was_active = rx_sc->active;
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
if (macsec_is_offloaded(netdev_priv(dev))) { if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops; const struct macsec_ops *ops;
struct macsec_context ctx; struct macsec_context ctx;
@@ -1918,7 +1919,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return 0; return 0;
cleanup: cleanup:
rx_sc->active = was_active; del_rx_sc(secy, sci);
free_rx_sc(rx_sc);
rtnl_unlock(); rtnl_unlock();
return ret; return ret;
} }
@@ -2061,6 +2063,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
secy->key_len); secy->key_len);
err = macsec_offload(ops->mdo_add_txsa, &ctx); err = macsec_offload(ops->mdo_add_txsa, &ctx);
memzero_explicit(ctx.sa.key, secy->key_len);
if (err) if (err)
goto cleanup; goto cleanup;
} }
@@ -2557,7 +2560,7 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct macsec_tx_sc *tx_sc = &secy->tx_sc;
int i; int i;
if (secy->n_rx_sc > 0) if (secy->rx_sc)
return true; return true;
for (i = 0; i < MACSEC_NUM_AN; i++) for (i = 0; i < MACSEC_NUM_AN; i++)

View File

@@ -1521,8 +1521,10 @@ destroy_macvlan_port:
/* the macvlan port may be freed by macvlan_uninit when fail to register. /* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid. * so we destroy the macvlan port only when it's valid.
*/ */
if (create && macvlan_port_get_rtnl(lowerdev)) if (create && macvlan_port_get_rtnl(lowerdev)) {
macvlan_flush_sources(port, vlan);
macvlan_port_destroy(port->dev); macvlan_port_destroy(port->dev);
}
return err; return err;
} }
EXPORT_SYMBOL_GPL(macvlan_common_newlink); EXPORT_SYMBOL_GPL(macvlan_common_newlink);

View File

@@ -632,6 +632,7 @@ static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
list_del(&flow->list); list_del(&flow->list);
clear_bit(flow->index, bitmap); clear_bit(flow->index, bitmap);
memzero_explicit(flow->key, sizeof(flow->key));
kfree(flow); kfree(flow);
} }

View File

@@ -1949,17 +1949,25 @@ drop:
skb_headlen(skb)); skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) { if (unlikely(headlen > skb_headlen(skb))) {
WARN_ON_ONCE(1);
err = -ENOMEM;
atomic_long_inc(&tun->dev->rx_dropped); atomic_long_inc(&tun->dev->rx_dropped);
napi_busy:
napi_free_frags(&tfile->napi); napi_free_frags(&tfile->napi);
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex); mutex_unlock(&tfile->napi_mutex);
WARN_ON(1); return err;
return -ENOMEM;
} }
local_bh_disable(); if (likely(napi_schedule_prep(&tfile->napi))) {
napi_gro_frags(&tfile->napi); local_bh_disable();
local_bh_enable(); napi_gro_frags(&tfile->napi);
napi_complete(&tfile->napi);
local_bh_enable();
} else {
err = -EBUSY;
goto napi_busy;
}
mutex_unlock(&tfile->napi_mutex); mutex_unlock(&tfile->napi_mutex);
} else if (tfile->napi_enabled) { } else if (tfile->napi_enabled) {
struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head *queue = &tfile->sk.sk_write_queue;

View File

@@ -325,6 +325,7 @@ static int lapbeth_open(struct net_device *dev)
err = lapb_register(dev, &lapbeth_callbacks); err = lapb_register(dev, &lapbeth_callbacks);
if (err != LAPB_OK) { if (err != LAPB_OK) {
napi_disable(&lapbeth->napi);
pr_err("lapb_register error: %d\n", err); pr_err("lapb_register error: %d\n", err);
return -ENODEV; return -ENODEV;
} }
@@ -446,7 +447,7 @@ static int lapbeth_device_event(struct notifier_block *this,
if (dev_net(dev) != &init_net) if (dev_net(dev) != &init_net)
return NOTIFY_DONE; return NOTIFY_DONE;
if (!dev_is_ethdev(dev)) if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev))
return NOTIFY_DONE; return NOTIFY_DONE;
switch (event) { switch (event) {

View File

@@ -247,11 +247,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err; goto err;
} }
rtnl_lock(); ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy);
wiphy_lock(ar->hw->wiphy);
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
kfree(regd_copy); kfree(regd_copy);

View File

@@ -232,6 +232,7 @@ static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
*/ */
static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev) static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
{ {
enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
union acpi_object *object; union acpi_object *object;
acpi_handle handle_acpi; acpi_handle handle_acpi;
@@ -242,12 +243,16 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
} }
object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL); object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
if (!object)
goto default_ret;
if (object && object->integer.value == 3) if (object->integer.value == 3)
return IPC_PCIE_D3L2; sleep_state = IPC_PCIE_D3L2;
kfree(object);
default_ret: default_ret:
return IPC_PCIE_D0L12; return sleep_state;
} }
static int ipc_pcie_probe(struct pci_dev *pci, static int ipc_pcie_probe(struct pci_dev *pci,

View File

@@ -167,6 +167,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
iosm_dev->max_mtu = ETH_MAX_MTU; iosm_dev->max_mtu = ETH_MAX_MTU;
iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP; iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
iosm_dev->needs_free_netdev = true;
iosm_dev->netdev_ops = &ipc_inm_ops; iosm_dev->netdev_ops = &ipc_inm_ops;
} }

View File

@@ -582,6 +582,7 @@ static void mhi_mbim_setup(struct net_device *ndev)
ndev->min_mtu = ETH_MIN_MTU; ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom; ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
ndev->tx_queue_len = 1000; ndev->tx_queue_len = 1000;
ndev->needs_free_netdev = true;
} }
static const struct wwan_ops mhi_mbim_wwan_ops = { static const struct wwan_ops mhi_mbim_wwan_ops = {

View File

@@ -280,7 +280,8 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev,
} }
static const struct soc_device_attribute mt7621_pci_quirks_match[] = { static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
{ .soc_id = "mt7621", .revision = "E2" } { .soc_id = "mt7621", .revision = "E2" },
{ /* sentinel */ }
}; };
static const struct regmap_config mt7621_pci_phy_regmap_config = { static const struct regmap_config mt7621_pci_phy_regmap_config = {

View File

@@ -532,6 +532,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
ret = of_property_read_u32(child, "reg", &index); ret = of_property_read_u32(child, "reg", &index);
if (ret || index > usbphyc->nphys) { if (ret || index > usbphyc->nphys) {
dev_err(&phy->dev, "invalid reg property: %d\n", ret); dev_err(&phy->dev, "invalid reg property: %d\n", ret);
if (!ret)
ret = -EINVAL;
goto put_child; goto put_child;
} }

View File

@@ -984,8 +984,16 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wwan_rfkill = NULL; wwan_rfkill = NULL;
rfkill2_count = 0; rfkill2_count = 0;
if (hp_wmi_rfkill_setup(device)) /*
hp_wmi_rfkill2_setup(device); * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
* BIOS no longer controls the power for the wireless
* devices. All features supported by this command will no
* longer be supported.
*/
if (!hp_wmi_bios_2009_later()) {
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
}
thermal_profile_setup(); thermal_profile_setup();

View File

@@ -315,6 +315,9 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
if (swrm_wait_for_wr_fifo_avail(swrm)) if (swrm_wait_for_wr_fifo_avail(swrm))
return SDW_CMD_FAIL_OTHER; return SDW_CMD_FAIL_OTHER;
if (cmd_id == SWR_BROADCAST_CMD_ID)
reinit_completion(&swrm->broadcast);
/* Its assumed that write is okay as we do not get any status back */ /* Its assumed that write is okay as we do not get any status back */
swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val); swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
@@ -348,6 +351,12 @@ static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr); val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
/*
* Check for outstanding cmd wrt. write fifo depth to avoid
* overflow as read will also increase write fifo cnt.
*/
swrm_wait_for_wr_fifo_avail(swrm);
/* wait for FIFO RD to complete to avoid overflow */ /* wait for FIFO RD to complete to avoid overflow */
usleep_range(100, 105); usleep_range(100, 105);
swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val); swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);

View File

@@ -85,11 +85,12 @@ static int tb_path_find_src_hopid(struct tb_port *src,
* @dst_hopid: HopID to the @dst (%-1 if don't care) * @dst_hopid: HopID to the @dst (%-1 if don't care)
* @last: Last port is filled here if not %NULL * @last: Last port is filled here if not %NULL
* @name: Name of the path * @name: Name of the path
* @alloc_hopid: Allocate HopIDs for the ports
* *
* Follows a path starting from @src and @src_hopid to the last output * Follows a path starting from @src and @src_hopid to the last output
* port of the path. Allocates HopIDs for the visited ports. Call * port of the path. Allocates HopIDs for the visited ports (if
* tb_path_free() to release the path and allocated HopIDs when the path * @alloc_hopid is true). Call tb_path_free() to release the path and
* is not needed anymore. * allocated HopIDs when the path is not needed anymore.
* *
* Note function discovers also incomplete paths so caller should check * Note function discovers also incomplete paths so caller should check
* that the @dst port is the expected one. If it is not, the path can be * that the @dst port is the expected one. If it is not, the path can be
@@ -99,7 +100,8 @@ static int tb_path_find_src_hopid(struct tb_port *src,
*/ */
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid, struct tb_port *dst, int dst_hopid,
struct tb_port **last, const char *name) struct tb_port **last, const char *name,
bool alloc_hopid)
{ {
struct tb_port *out_port; struct tb_port *out_port;
struct tb_regs_hop hop; struct tb_regs_hop hop;
@@ -156,6 +158,7 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
path->tb = src->sw->tb; path->tb = src->sw->tb;
path->path_length = num_hops; path->path_length = num_hops;
path->activated = true; path->activated = true;
path->alloc_hopid = alloc_hopid;
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
if (!path->hops) { if (!path->hops) {
@@ -177,13 +180,14 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
goto err; goto err;
} }
if (tb_port_alloc_in_hopid(p, h, h) < 0) if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0)
goto err; goto err;
out_port = &sw->ports[hop.out_port]; out_port = &sw->ports[hop.out_port];
next_hop = hop.next_hop; next_hop = hop.next_hop;
if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) { if (alloc_hopid &&
tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
tb_port_release_in_hopid(p, h); tb_port_release_in_hopid(p, h);
goto err; goto err;
} }
@@ -263,6 +267,8 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
return NULL; return NULL;
} }
path->alloc_hopid = true;
in_hopid = src_hopid; in_hopid = src_hopid;
out_port = NULL; out_port = NULL;
@@ -345,17 +351,19 @@ err:
*/ */
void tb_path_free(struct tb_path *path) void tb_path_free(struct tb_path *path)
{ {
int i; if (path->alloc_hopid) {
int i;
for (i = 0; i < path->path_length; i++) { for (i = 0; i < path->path_length; i++) {
const struct tb_path_hop *hop = &path->hops[i]; const struct tb_path_hop *hop = &path->hops[i];
if (hop->in_port) if (hop->in_port)
tb_port_release_in_hopid(hop->in_port, tb_port_release_in_hopid(hop->in_port,
hop->in_hop_index); hop->in_hop_index);
if (hop->out_port) if (hop->out_port)
tb_port_release_out_hopid(hop->out_port, tb_port_release_out_hopid(hop->out_port,
hop->next_hop_index); hop->next_hop_index);
}
} }
kfree(path->hops); kfree(path->hops);

View File

@@ -105,10 +105,37 @@ static void tb_remove_dp_resources(struct tb_switch *sw)
} }
} }
static void tb_discover_tunnels(struct tb_switch *sw) static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *p;
list_for_each_entry(p, &tcm->dp_resources, list) {
if (p == port)
return;
}
tb_port_dbg(port, "DP %s resource available discovered\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
}
static void tb_discover_dp_resources(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dp(tunnel))
tb_discover_dp_resource(tb, tunnel->dst_port);
}
}
static void tb_switch_discover_tunnels(struct tb_switch *sw,
struct list_head *list,
bool alloc_hopids)
{ {
struct tb *tb = sw->tb; struct tb *tb = sw->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port; struct tb_port *port;
tb_switch_for_each_port(sw, port) { tb_switch_for_each_port(sw, port) {
@@ -116,24 +143,41 @@ static void tb_discover_tunnels(struct tb_switch *sw)
switch (port->config.type) { switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN: case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port); tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
break; break;
case TB_TYPE_PCIE_DOWN: case TB_TYPE_PCIE_DOWN:
tunnel = tb_tunnel_discover_pci(tb, port); tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
break; break;
case TB_TYPE_USB3_DOWN: case TB_TYPE_USB3_DOWN:
tunnel = tb_tunnel_discover_usb3(tb, port); tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
break; break;
default: default:
break; break;
} }
if (!tunnel) if (tunnel)
continue; list_add_tail(&tunnel->list, list);
}
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port)) {
tb_switch_discover_tunnels(port->remote->sw, list,
alloc_hopids);
}
}
}
static void tb_discover_tunnels(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_pci(tunnel)) { if (tb_tunnel_is_pci(tunnel)) {
struct tb_switch *parent = tunnel->dst_port->sw; struct tb_switch *parent = tunnel->dst_port->sw;
@@ -146,13 +190,6 @@ static void tb_discover_tunnels(struct tb_switch *sw)
pm_runtime_get_sync(&tunnel->src_port->sw->dev); pm_runtime_get_sync(&tunnel->src_port->sw->dev);
pm_runtime_get_sync(&tunnel->dst_port->sw->dev); pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
} }
list_add_tail(&tunnel->list, &tcm->tunnel_list);
}
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port))
tb_discover_tunnels(port->remote->sw);
} }
} }
@@ -1384,7 +1421,9 @@ static int tb_start(struct tb *tb)
/* Full scan to discover devices added before the driver was loaded. */ /* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch); tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */ /* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch); tb_discover_tunnels(tb);
/* Add DP resources from the DP tunnels created by the boot firmware */
tb_discover_dp_resources(tb);
/* /*
* If the boot firmware did not create USB 3.x tunnels create them * If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology. * now for the whole topology.
@@ -1444,6 +1483,8 @@ static int tb_resume_noirq(struct tb *tb)
{ {
struct tb_cm *tcm = tb_priv(tb); struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n; struct tb_tunnel *tunnel, *n;
unsigned int usb3_delay = 0;
LIST_HEAD(tunnels);
tb_dbg(tb, "resuming...\n"); tb_dbg(tb, "resuming...\n");
@@ -1454,8 +1495,31 @@ static int tb_resume_noirq(struct tb *tb)
tb_free_invalid_tunnels(tb); tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch); tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch); tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
/*
* If we get here from suspend to disk the boot firmware or the
* restore kernel might have created tunnels of its own. Since
* we cannot be sure they are usable for us we find and tear
* them down.
*/
tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
if (tb_tunnel_is_usb3(tunnel))
usb3_delay = 500;
tb_tunnel_deactivate(tunnel);
tb_tunnel_free(tunnel);
}
/* Re-create our tunnels now */
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
/* USB3 requires delay before it can be re-activated */
if (tb_tunnel_is_usb3(tunnel)) {
msleep(usb3_delay);
/* Only need to do it once */
usb3_delay = 0;
}
tb_tunnel_restart(tunnel); tb_tunnel_restart(tunnel);
}
if (!list_empty(&tcm->tunnel_list)) { if (!list_empty(&tcm->tunnel_list)) {
/* /*
* the pcie links need some time to get going. * the pcie links need some time to get going.

View File

@@ -354,6 +354,7 @@ enum tb_path_port {
* when deactivating this path * when deactivating this path
* @hops: Path hops * @hops: Path hops
* @path_length: How many hops the path uses * @path_length: How many hops the path uses
* @alloc_hopid: Does this path consume port HopID
* *
* A path consists of a number of hops (see &struct tb_path_hop). To * A path consists of a number of hops (see &struct tb_path_hop). To
* establish a PCIe tunnel two paths have to be created between the two * establish a PCIe tunnel two paths have to be created between the two
@@ -374,6 +375,7 @@ struct tb_path {
bool clear_fc; bool clear_fc;
struct tb_path_hop *hops; struct tb_path_hop *hops;
int path_length; int path_length;
bool alloc_hopid;
}; };
/* HopIDs 0-7 are reserved by the Thunderbolt protocol */ /* HopIDs 0-7 are reserved by the Thunderbolt protocol */
@@ -957,7 +959,8 @@ int tb_dp_port_enable(struct tb_port *port, bool enable);
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid, struct tb_port *dst, int dst_hopid,
struct tb_port **last, const char *name); struct tb_port **last, const char *name,
bool alloc_hopid);
struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid, int link_nr, struct tb_port *dst, int dst_hopid, int link_nr,
const char *name); const char *name);

View File

@@ -207,12 +207,14 @@ static int tb_pci_init_path(struct tb_path *path)
* tb_tunnel_discover_pci() - Discover existing PCIe tunnels * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
* @tb: Pointer to the domain structure * @tb: Pointer to the domain structure
* @down: PCIe downstream adapter * @down: PCIe downstream adapter
* @alloc_hopid: Allocate HopIDs from visited ports
* *
* If @down adapter is active, follows the tunnel to the PCIe upstream * If @down adapter is active, follows the tunnel to the PCIe upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was * adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel. * no tunnel.
*/ */
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
{ {
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
struct tb_path *path; struct tb_path *path;
@@ -233,7 +235,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
* case. * case.
*/ */
path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
&tunnel->dst_port, "PCIe Up"); &tunnel->dst_port, "PCIe Up", alloc_hopid);
if (!path) { if (!path) {
/* Just disable the downstream port */ /* Just disable the downstream port */
tb_pci_port_enable(down, false); tb_pci_port_enable(down, false);
@@ -244,7 +246,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
goto err_free; goto err_free;
path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
"PCIe Down"); "PCIe Down", alloc_hopid);
if (!path) if (!path)
goto err_deactivate; goto err_deactivate;
tunnel->paths[TB_PCI_PATH_DOWN] = path; tunnel->paths[TB_PCI_PATH_DOWN] = path;
@@ -761,6 +763,7 @@ static int tb_dp_init_video_path(struct tb_path *path)
* tb_tunnel_discover_dp() - Discover existing Display Port tunnels * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
* @tb: Pointer to the domain structure * @tb: Pointer to the domain structure
* @in: DP in adapter * @in: DP in adapter
* @alloc_hopid: Allocate HopIDs from visited ports
* *
* If @in adapter is active, follows the tunnel to the DP out adapter * If @in adapter is active, follows the tunnel to the DP out adapter
* and back. Returns the discovered tunnel or %NULL if there was no * and back. Returns the discovered tunnel or %NULL if there was no
@@ -768,7 +771,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
* *
* Return: DP tunnel or %NULL if no tunnel found. * Return: DP tunnel or %NULL if no tunnel found.
*/ */
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid)
{ {
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
struct tb_port *port; struct tb_port *port;
@@ -787,7 +791,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tunnel->src_port = in; tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
&tunnel->dst_port, "Video"); &tunnel->dst_port, "Video", alloc_hopid);
if (!path) { if (!path) {
/* Just disable the DP IN port */ /* Just disable the DP IN port */
tb_dp_port_enable(in, false); tb_dp_port_enable(in, false);
@@ -797,14 +801,15 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
goto err_free; goto err_free;
path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX"); path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
alloc_hopid);
if (!path) if (!path)
goto err_deactivate; goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_OUT] = path; tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
&port, "AUX RX"); &port, "AUX RX", alloc_hopid);
if (!path) if (!path)
goto err_deactivate; goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_IN] = path; tunnel->paths[TB_DP_AUX_PATH_IN] = path;
@@ -1344,12 +1349,14 @@ static void tb_usb3_init_path(struct tb_path *path)
* tb_tunnel_discover_usb3() - Discover existing USB3 tunnels * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
* @tb: Pointer to the domain structure * @tb: Pointer to the domain structure
* @down: USB3 downstream adapter * @down: USB3 downstream adapter
* @alloc_hopid: Allocate HopIDs from visited ports
* *
* If @down adapter is active, follows the tunnel to the USB3 upstream * If @down adapter is active, follows the tunnel to the USB3 upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was * adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel. * no tunnel.
*/ */
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
{ {
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
struct tb_path *path; struct tb_path *path;
@@ -1370,7 +1377,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
* case. * case.
*/ */
path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
&tunnel->dst_port, "USB3 Down"); &tunnel->dst_port, "USB3 Down", alloc_hopid);
if (!path) { if (!path) {
/* Just disable the downstream port */ /* Just disable the downstream port */
tb_usb3_port_enable(down, false); tb_usb3_port_enable(down, false);
@@ -1380,7 +1387,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
"USB3 Up"); "USB3 Up", alloc_hopid);
if (!path) if (!path)
goto err_deactivate; goto err_deactivate;
tunnel->paths[TB_USB3_PATH_UP] = path; tunnel->paths[TB_USB3_PATH_UP] = path;

View File

@@ -64,10 +64,12 @@ struct tb_tunnel {
int allocated_down; int allocated_down;
}; };
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down); struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down); struct tb_port *down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in); struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr, struct tb_port *out, int link_nr,
int max_up, int max_down); int max_up, int max_down);
@@ -77,7 +79,8 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
int receive_ring); int receive_ring);
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring); int transmit_ring, int receive_path, int receive_ring);
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down); struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up, struct tb_port *down, int max_up,
int max_down); int max_down);

View File

@@ -2404,7 +2404,9 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
fs_info->dev_root = root; fs_info->dev_root = root;
} }
/* Initialize fs_info for all devices in any case */ /* Initialize fs_info for all devices in any case */
btrfs_init_devices_late(fs_info); ret = btrfs_init_devices_late(fs_info);
if (ret)
goto out;
/* If IGNOREDATACSUMS is set don't bother reading the csum root. */ /* If IGNOREDATACSUMS is set don't bother reading the csum root. */
if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) { if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {

View File

@@ -199,7 +199,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
void btrfs_free_dummy_root(struct btrfs_root *root) void btrfs_free_dummy_root(struct btrfs_root *root)
{ {
if (!root) if (IS_ERR_OR_NULL(root))
return; return;
/* Will be freed by btrfs_free_fs_roots */ /* Will be freed by btrfs_free_fs_roots */
if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state))) if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))

View File

@@ -6841,18 +6841,18 @@ static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args, static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
const struct btrfs_device *device) const struct btrfs_device *device)
{ {
ASSERT((args->devid != (u64)-1) || args->missing); if (args->missing) {
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
!device->bdev)
return true;
return false;
}
if ((args->devid != (u64)-1) && device->devid != args->devid) if (device->devid != args->devid)
return false; return false;
if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0) if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
return false; return false;
if (!args->missing) return true;
return true;
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
!device->bdev)
return true;
return false;
} }
/* /*
@@ -7681,10 +7681,11 @@ error:
return ret; return ret;
} }
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) int btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{ {
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
struct btrfs_device *device; struct btrfs_device *device;
int ret = 0;
fs_devices->fs_info = fs_info; fs_devices->fs_info = fs_info;
@@ -7693,12 +7694,18 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
device->fs_info = fs_info; device->fs_info = fs_info;
list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) { list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
list_for_each_entry(device, &seed_devs->devices, dev_list) list_for_each_entry(device, &seed_devs->devices, dev_list) {
device->fs_info = fs_info; device->fs_info = fs_info;
ret = btrfs_get_dev_zone_info(device, false);
if (ret)
break;
}
seed_devs->fs_info = fs_info; seed_devs->fs_info = fs_info;
} }
mutex_unlock(&fs_devices->device_list_mutex); mutex_unlock(&fs_devices->device_list_mutex);
return ret;
} }
static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,

View File

@@ -539,7 +539,7 @@ int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats); struct btrfs_ioctl_get_dev_stats *stats);
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info); int btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info); int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans); int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev); void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);

View File

@@ -77,8 +77,10 @@ static void fuse_add_dirent_to_cache(struct file *file,
goto unlock; goto unlock;
addr = kmap_atomic(page); addr = kmap_atomic(page);
if (!offset) if (!offset) {
clear_page(addr); clear_page(addr);
SetPageUptodate(page);
}
memcpy(addr + offset, dirent, reclen); memcpy(addr + offset, dirent, reclen);
kunmap_atomic(addr); kunmap_atomic(addr);
fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen; fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen;
@@ -516,6 +518,12 @@ retry_locked:
page = find_get_page_flags(file->f_mapping, index, page = find_get_page_flags(file->f_mapping, index,
FGP_ACCESSED | FGP_LOCK); FGP_ACCESSED | FGP_LOCK);
/* Page gone missing, then re-added to cache, but not initialized? */
if (page && !PageUptodate(page)) {
unlock_page(page);
put_page(page);
page = NULL;
}
spin_lock(&fi->rdc.lock); spin_lock(&fi->rdc.lock);
if (!page) { if (!page) {
/* /*

View File

@@ -317,7 +317,7 @@ void nilfs_relax_pressure_in_lock(struct super_block *sb)
struct the_nilfs *nilfs = sb->s_fs_info; struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_sc_info *sci = nilfs->ns_writer;
if (!sci || !sci->sc_flush_request) if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
return; return;
set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
@@ -2243,7 +2243,7 @@ int nilfs_construct_segment(struct super_block *sb)
struct nilfs_transaction_info *ti; struct nilfs_transaction_info *ti;
int err; int err;
if (!sci) if (sb_rdonly(sb) || unlikely(!sci))
return -EROFS; return -EROFS;
/* A call inside transactions causes a deadlock. */ /* A call inside transactions causes a deadlock. */
@@ -2282,7 +2282,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
struct nilfs_transaction_info ti; struct nilfs_transaction_info ti;
int err = 0; int err = 0;
if (!sci) if (sb_rdonly(sb) || unlikely(!sci))
return -EROFS; return -EROFS;
nilfs_transaction_lock(sb, &ti, 0); nilfs_transaction_lock(sb, &ti, 0);
@@ -2778,11 +2778,12 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
if (nilfs->ns_writer) { if (nilfs->ns_writer) {
/* /*
* This happens if the filesystem was remounted * This happens if the filesystem is made read-only by
* read/write after nilfs_error degenerated it into a * __nilfs_error or nilfs_remount and then remounted
* read-only mount. * read/write. In these cases, reuse the existing
* writer.
*/ */
nilfs_detach_log_writer(sb); return 0;
} }
nilfs->ns_writer = nilfs_segctor_new(sb, root); nilfs->ns_writer = nilfs_segctor_new(sb, root);

View File

@@ -1134,8 +1134,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out; goto out;
if (*flags & SB_RDONLY) { if (*flags & SB_RDONLY) {
/* Shutting down log writer */
nilfs_detach_log_writer(sb);
sb->s_flags |= SB_RDONLY; sb->s_flags |= SB_RDONLY;
/* /*

View File

@@ -690,9 +690,7 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
{ {
unsigned long ncleansegs; unsigned long ncleansegs;
down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
return 0; return 0;
} }

View File

@@ -240,7 +240,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
poffset - lfi); poffset - lfi);
else { else {
if (!copy_name) { if (!copy_name) {
copy_name = kmalloc(UDF_NAME_LEN, copy_name = kmalloc(UDF_NAME_LEN_CS0,
GFP_NOFS); GFP_NOFS);
if (!copy_name) { if (!copy_name) {
fi = ERR_PTR(-ENOMEM); fi = ERR_PTR(-ENOMEM);

View File

@@ -337,6 +337,7 @@
#define DATA_DATA \ #define DATA_DATA \
*(.xiptext) \ *(.xiptext) \
*(DATA_MAIN) \ *(DATA_MAIN) \
*(.data..decrypted) \
*(.ref.data) \ *(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \ *(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \ MEM_KEEP(init.data*) \
@@ -969,7 +970,6 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \ #define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \ . = ALIGN(PAGE_SIZE); \
*(.data..decrypted) \
*(.data..percpu..decrypted) \ *(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
#else #else

View File

@@ -2034,6 +2034,7 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
void sock_map_unhash(struct sock *sk); void sock_map_unhash(struct sock *sk);
void sock_map_destroy(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout); void sock_map_close(struct sock *sk, long timeout);
#else #else
static inline int bpf_prog_offload_init(struct bpf_prog *prog, static inline int bpf_prog_offload_init(struct bpf_prog *prog,

View File

@@ -329,6 +329,27 @@ struct bpf_verifier_state {
iter < frame->allocated_stack / BPF_REG_SIZE; \ iter < frame->allocated_stack / BPF_REG_SIZE; \
iter++, reg = bpf_get_spilled_reg(iter, frame)) iter++, reg = bpf_get_spilled_reg(iter, frame))
/* Invoke __expr over regsiters in __vst, setting __state and __reg */
#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
({ \
struct bpf_verifier_state *___vstate = __vst; \
int ___i, ___j; \
for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
struct bpf_reg_state *___regs; \
__state = ___vstate->frame[___i]; \
___regs = __state->regs; \
for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
__reg = &___regs[___j]; \
(void)(__expr); \
} \
bpf_for_each_spilled_reg(___j, __state, __reg) { \
if (!__reg) \
continue; \
(void)(__expr); \
} \
} \
})
/* linked list of verifier states used to prune search */ /* linked list of verifier states used to prune search */
struct bpf_verifier_state_list { struct bpf_verifier_state_list {
struct bpf_verifier_state state; struct bpf_verifier_state state;

View File

@@ -96,6 +96,7 @@ struct sk_psock {
spinlock_t link_lock; spinlock_t link_lock;
refcount_t refcnt; refcount_t refcnt;
void (*saved_unhash)(struct sock *sk); void (*saved_unhash)(struct sock *sk);
void (*saved_destroy)(struct sock *sk);
void (*saved_close)(struct sock *sk, long timeout); void (*saved_close)(struct sock *sk, long timeout);
void (*saved_write_space)(struct sock *sk); void (*saved_write_space)(struct sock *sk);
void (*saved_data_ready)(struct sock *sk); void (*saved_data_ready)(struct sock *sk);
@@ -381,7 +382,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
} }
struct sk_psock *sk_psock_init(struct sock *sk, int node); struct sk_psock *sk_psock_init(struct sock *sk, int node);
void sk_psock_stop(struct sk_psock *psock, bool wait); void sk_psock_stop(struct sk_psock *psock);
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);

View File

@@ -5,6 +5,7 @@
#ifndef __SOC_OTX2_ASM_H #ifndef __SOC_OTX2_ASM_H
#define __SOC_OTX2_ASM_H #define __SOC_OTX2_ASM_H
#include <linux/types.h>
#if defined(CONFIG_ARM64) #if defined(CONFIG_ARM64)
/* /*
* otx2_lmt_flush is used for LMT store operation. * otx2_lmt_flush is used for LMT store operation.
@@ -34,9 +35,23 @@
: [rf] "+r"(val) \ : [rf] "+r"(val) \
: [rs] "r"(addr)); \ : [rs] "r"(addr)); \
}) })
static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
{
u64 result;
asm volatile (".cpu generic+lse\n"
"ldadda %x[i], %x[r], [%[b]]"
: [r] "=r" (result), "+m" (*ptr)
: [i] "r" (incr), [b] "r" (ptr)
: "memory");
return result;
}
#else #else
#define otx2_lmt_flush(ioaddr) ({ 0; }) #define otx2_lmt_flush(ioaddr) ({ 0; })
#define cn10k_lmt_flush(val, addr) ({ addr = val; }) #define cn10k_lmt_flush(val, addr) ({ addr = val; })
#define otx2_atomic64_fetch_add(incr, ptr) ({ incr; })
#endif #endif
#endif /* __SOC_OTX2_ASM_H */ #endif /* __SOC_OTX2_ASM_H */

View File

@@ -426,7 +426,7 @@ struct vfs_ns_cap_data {
*/ */
#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */ #define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */ #define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */
#endif /* _UAPI_LINUX_CAPABILITY_H */ #endif /* _UAPI_LINUX_CAPABILITY_H */

View File

@@ -770,12 +770,17 @@ out:
*/ */
static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
{ {
void *new_arr;
if (!new_n || old_n == new_n) if (!new_n || old_n == new_n)
goto out; goto out;
arr = krealloc_array(arr, new_n, size, GFP_KERNEL); new_arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
if (!arr) if (!new_arr) {
kfree(arr);
return NULL; return NULL;
}
arr = new_arr;
if (new_n > old_n) if (new_n > old_n)
memset(arr + old_n * size, 0, (new_n - old_n) * size); memset(arr + old_n * size, 0, (new_n - old_n) * size);
@@ -5629,31 +5634,15 @@ static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
* are now invalid, so turn them into unknown SCALAR_VALUE. * are now invalid, so turn them into unknown SCALAR_VALUE.
*/ */
static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
struct bpf_func_state *state)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (reg_is_pkt_pointer_any(&regs[i]))
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg_is_pkt_pointer_any(reg))
__mark_reg_unknown(env, reg);
}
}
static void clear_all_pkt_pointers(struct bpf_verifier_env *env) static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{ {
struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state;
int i; struct bpf_reg_state *reg;
for (i = 0; i <= vstate->curframe; i++) bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
__clear_all_pkt_pointers(env, vstate->frame[i]); if (reg_is_pkt_pointer_any(reg))
__mark_reg_unknown(env, reg);
}));
} }
enum { enum {
@@ -5682,41 +5671,28 @@ static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range
reg->range = AT_PKT_END; reg->range = AT_PKT_END;
} }
static void release_reg_references(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int ref_obj_id)
{
struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].ref_obj_id == ref_obj_id)
mark_reg_unknown(env, regs, i);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg->ref_obj_id == ref_obj_id)
__mark_reg_unknown(env, reg);
}
}
/* The pointer with the specified id has released its reference to kernel /* The pointer with the specified id has released its reference to kernel
* resources. Identify all copies of the same pointer and clear the reference. * resources. Identify all copies of the same pointer and clear the reference.
*/ */
static int release_reference(struct bpf_verifier_env *env, static int release_reference(struct bpf_verifier_env *env,
int ref_obj_id) int ref_obj_id)
{ {
struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state;
struct bpf_reg_state *reg;
int err; int err;
int i;
err = release_reference_state(cur_func(env), ref_obj_id); err = release_reference_state(cur_func(env), ref_obj_id);
if (err) if (err)
return err; return err;
for (i = 0; i <= vstate->curframe; i++) bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
release_reg_references(env, vstate->frame[i], ref_obj_id); if (reg->ref_obj_id == ref_obj_id) {
if (!env->allow_ptr_leaks)
__mark_reg_not_init(env, reg);
else
__mark_reg_unknown(env, reg);
}
}));
return 0; return 0;
} }
@@ -8216,34 +8192,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0; return 0;
} }
static void __find_good_pkt_pointers(struct bpf_func_state *state,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type, int new_range)
{
struct bpf_reg_state *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
if (reg->type == type && reg->id == dst_reg->id)
/* keep the maximum range already checked */
reg->range = max(reg->range, new_range);
}
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
}
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg, struct bpf_reg_state *dst_reg,
enum bpf_reg_type type, enum bpf_reg_type type,
bool range_right_open) bool range_right_open)
{ {
int new_range, i; struct bpf_func_state *state;
struct bpf_reg_state *reg;
int new_range;
if (dst_reg->off < 0 || if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open)) (dst_reg->off == 0 && range_right_open))
@@ -8308,9 +8264,11 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
* the range won't allow anything. * the range won't allow anything.
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/ */
for (i = 0; i <= vstate->curframe; i++) bpf_for_each_reg_in_vstate(vstate, state, reg, ({
__find_good_pkt_pointers(vstate->frame[i], dst_reg, type, if (reg->type == type && reg->id == dst_reg->id)
new_range); /* keep the maximum range already checked */
reg->range = max(reg->range, new_range);
}));
} }
static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
@@ -8799,7 +8757,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
if (!reg_may_point_to_spin_lock(reg)) { if (!reg_may_point_to_spin_lock(reg)) {
/* For not-NULL ptr, reg->ref_obj_id will be reset /* For not-NULL ptr, reg->ref_obj_id will be reset
* in release_reg_references(). * in release_reference().
* *
* reg->id is still used by spin_lock ptr. Other * reg->id is still used by spin_lock ptr. Other
* than spin_lock ptr type, reg->id can be reset. * than spin_lock ptr type, reg->id can be reset.
@@ -8809,22 +8767,6 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
} }
} }
static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
bool is_null)
{
struct bpf_reg_state *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
mark_ptr_or_null_reg(state, reg, id, is_null);
}
}
/* The logic is similar to find_good_pkt_pointers(), both could eventually /* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point. * be folded together at some point.
*/ */
@@ -8832,10 +8774,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null) bool is_null)
{ {
struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs; struct bpf_reg_state *regs = state->regs, *reg;
u32 ref_obj_id = regs[regno].ref_obj_id; u32 ref_obj_id = regs[regno].ref_obj_id;
u32 id = regs[regno].id; u32 id = regs[regno].id;
int i;
if (ref_obj_id && ref_obj_id == id && is_null) if (ref_obj_id && ref_obj_id == id && is_null)
/* regs[regno] is in the " == NULL" branch. /* regs[regno] is in the " == NULL" branch.
@@ -8844,8 +8785,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
*/ */
WARN_ON_ONCE(release_reference_state(state, id)); WARN_ON_ONCE(release_reference_state(state, id));
for (i = 0; i <= vstate->curframe; i++) bpf_for_each_reg_in_vstate(vstate, state, reg, ({
__mark_ptr_or_null_regs(vstate->frame[i], id, is_null); mark_ptr_or_null_reg(state, reg, id, is_null);
}));
} }
static bool try_match_pkt_pointers(const struct bpf_insn *insn, static bool try_match_pkt_pointers(const struct bpf_insn *insn,
@@ -8958,23 +8900,11 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
{ {
struct bpf_func_state *state; struct bpf_func_state *state;
struct bpf_reg_state *reg; struct bpf_reg_state *reg;
int i, j;
for (i = 0; i <= vstate->curframe; i++) { bpf_for_each_reg_in_vstate(vstate, state, reg, ({
state = vstate->frame[i]; if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
for (j = 0; j < MAX_BPF_REG; j++) { *reg = *known_reg;
reg = &state->regs[j]; }));
if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
*reg = *known_reg;
}
bpf_for_each_spilled_reg(j, state, reg) {
if (!reg)
continue;
if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
*reg = *known_reg;
}
}
} }
static int check_cond_jmp_op(struct bpf_verifier_env *env, static int check_cond_jmp_op(struct bpf_verifier_env *env,

View File

@@ -788,6 +788,7 @@ out:
static int dbgfs_rm_context(char *name) static int dbgfs_rm_context(char *name)
{ {
struct dentry *root, *dir, **new_dirs; struct dentry *root, *dir, **new_dirs;
struct inode *inode;
struct damon_ctx **new_ctxs; struct damon_ctx **new_ctxs;
int i, j; int i, j;
int ret = 0; int ret = 0;
@@ -803,6 +804,12 @@ static int dbgfs_rm_context(char *name)
if (!dir) if (!dir)
return -ENOENT; return -ENOENT;
inode = d_inode(dir);
if (!S_ISDIR(inode->i_mode)) {
ret = -EINVAL;
goto out_dput;
}
new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs), new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
GFP_KERNEL); GFP_KERNEL);
if (!new_dirs) { if (!new_dirs) {

View File

@@ -327,6 +327,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
WARN(1, "File system DAX not supported\n"); WARN(1, "File system DAX not supported\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
params.pgprot = pgprot_decrypted(params.pgprot);
break; break;
case MEMORY_DEVICE_GENERIC: case MEMORY_DEVICE_GENERIC:
break; break;

View File

@@ -63,7 +63,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
pte_t _dst_pte, *dst_pte; pte_t _dst_pte, *dst_pte;
bool writable = dst_vma->vm_flags & VM_WRITE; bool writable = dst_vma->vm_flags & VM_WRITE;
bool vm_shared = dst_vma->vm_flags & VM_SHARED; bool vm_shared = dst_vma->vm_flags & VM_SHARED;
bool page_in_cache = page->mapping; bool page_in_cache = page_mapping(page);
spinlock_t *ptl; spinlock_t *ptl;
struct inode *inode; struct inode *inode;
pgoff_t offset, max_off; pgoff_t offset, max_off;

Some files were not shown because too many files have changed in this diff Show More