Merge 5.15.34 into android13-5.15

Changes in 5.15.34
	lib/logic_iomem: correct fallback config references
	um: fix and optimize xor select template for CONFIG64 and timetravel mode
	rtc: wm8350: Handle error for wm8350_register_irq
	nbd: add error handling support for add_disk()
	nbd: Fix incorrect error handle when first_minor is illegal in nbd_dev_add
	nbd: Fix hungtask when nbd_config_put
	nbd: fix possible overflow on 'first_minor' in nbd_dev_add()
	kfence: count unexpectedly skipped allocations
	kfence: move saving stack trace of allocations into __kfence_alloc()
	kfence: limit currently covered allocations when pool nearly full
	KVM: x86/pmu: Use different raw event masks for AMD and Intel
	KVM: SVM: Fix kvm_cache_regs.h inclusions for is_guest_mode()
	KVM: x86/svm: Clear reserved bits written to PerfEvtSeln MSRs
	KVM: x86/pmu: Fix and isolate TSX-specific performance event logic
	KVM: x86/emulator: Emulate RDPID only if it is enabled in guest
	drm: Add orientation quirk for GPD Win Max
	ath5k: fix OOB in ath5k_eeprom_read_pcal_info_5111
	drm/amd/display: Add signal type check when verify stream backends same
	drm/amd/amdgpu/amdgpu_cs: fix refcount leak of a dma_fence obj
	drm/amd/display: Fix memory leak
	drm/amd/display: Use PSR version selected during set_psr_caps
	usb: gadget: tegra-xudc: Do not program SPARAM
	usb: gadget: tegra-xudc: Fix control endpoint's definitions
	usb: cdnsp: fix cdnsp_decode_trb function to properly handle ret value
	ptp: replace snprintf with sysfs_emit
	drm/amdkfd: Don't take process mutex for svm ioctls
	powerpc: dts: t104xrdb: fix phy type for FMAN 4/5
	ath11k: fix kernel panic during unload/load ath11k modules
	ath11k: pci: fix crash on suspend if board file is not found
	ath11k: mhi: use mhi_sync_power_up()
	net/smc: Send directly when TCP_CORK is cleared
	drm/bridge: Add missing pm_runtime_put_sync
	bpf: Make dst_port field in struct bpf_sock 16-bit wide
	scsi: mvsas: Replace snprintf() with sysfs_emit()
	scsi: bfa: Replace snprintf() with sysfs_emit()
	drm/v3d: fix missing unlock
	power: supply: axp20x_battery: properly report current when discharging
	mt76: mt7921: fix crash when startup fails.
	mt76: dma: initialize skip_unmap in mt76_dma_rx_fill
	cfg80211: don't add non transmitted BSS to 6GHz scanned channels
	libbpf: Fix build issue with llvm-readelf
	ipv6: make mc_forwarding atomic
	net: initialize init_net earlier
	powerpc: Set crashkernel offset to mid of RMA region
	drm/amdgpu: Fix recursive locking warning
	scsi: smartpqi: Fix kdump issue when controller is locked up
	PCI: aardvark: Fix support for MSI interrupts
	iommu/arm-smmu-v3: fix event handling soft lockup
	usb: ehci: add pci device support for Aspeed platforms
	PCI: endpoint: Fix alignment fault error in copy tests
	tcp: Don't acquire inet_listen_hashbucket::lock with disabled BH.
	PCI: pciehp: Add Qualcomm quirk for Command Completed erratum
	scsi: mpi3mr: Fix reporting of actual data transfer size
	scsi: mpi3mr: Fix memory leaks
	powerpc/set_memory: Avoid spinlock recursion in change_page_attr()
	power: supply: axp288-charger: Set Vhold to 4.4V
	net/mlx5e: Disable TX queues before registering the netdev
	usb: dwc3: pci: Set the swnode from inside dwc3_pci_quirks()
	iwlwifi: mvm: Correctly set fragmented EBS
	iwlwifi: mvm: move only to an enabled channel
	drm/msm/dsi: Remove spurious IRQF_ONESHOT flag
	ipv4: Invalidate neighbour for broadcast address upon address addition
	dm ioctl: prevent potential spectre v1 gadget
	dm: requeue IO if mapping table not yet available
	drm/amdkfd: make CRAT table missing message informational only
	vfio/pci: Stub vfio_pci_vga_rw when !CONFIG_VFIO_PCI_VGA
	scsi: pm8001: Fix pm80xx_pci_mem_copy() interface
	scsi: pm8001: Fix pm8001_mpi_task_abort_resp()
	scsi: pm8001: Fix task leak in pm8001_send_abort_all()
	scsi: pm8001: Fix tag leaks on error
	scsi: pm8001: Fix memory leak in pm8001_chip_fw_flash_update_req()
	mt76: mt7915: fix injected MPDU transmission to not use HW A-MSDU
	powerpc/64s/hash: Make hash faults work in NMI context
	mt76: mt7615: Fix assigning negative values to unsigned variable
	scsi: aha152x: Fix aha152x_setup() __setup handler return value
	scsi: hisi_sas: Free irq vectors in order for v3 HW
	scsi: hisi_sas: Limit users changing debugfs BIST count value
	net/smc: correct settings of RMB window update limit
	mips: ralink: fix a refcount leak in ill_acc_of_setup()
	macvtap: advertise link netns via netlink
	tuntap: add sanity checks about msg_controllen in sendmsg
	Bluetooth: Fix not checking for valid hdev on bt_dev_{info,warn,err,dbg}
	Bluetooth: use memset avoid memory leaks
	bnxt_en: Eliminate unintended link toggle during FW reset
	PCI: endpoint: Fix misused goto label
	MIPS: fix fortify panic when copying asm exception handlers
	powerpc/64e: Tie PPC_BOOK3E_64 to PPC_FSL_BOOK3E
	powerpc/secvar: fix refcount leak in format_show()
	scsi: libfc: Fix use after free in fc_exch_abts_resp()
	can: isotp: set default value for N_As to 50 micro seconds
	can: etas_es58x: es58x_fd_rx_event_msg(): initialize rx_event_msg before calling es58x_check_msg_len()
	riscv: Fixed misaligned memory access. Fixed pointer comparison.
	net: account alternate interface name memory
	net: limit altnames to 64k total
	net/mlx5e: Remove overzealous validations in netlink EEPROM query
	net: sfp: add 2500base-X quirk for Lantech SFP module
	usb: dwc3: omap: fix "unbalanced disables for smps10_out1" on omap5evm
	mt76: fix monitor mode crash with sdio driver
	xtensa: fix DTC warning unit_address_format
	MIPS: ingenic: correct unit node address
	Bluetooth: Fix use after free in hci_send_acl
	netfilter: conntrack: revisit gc autotuning
	netlabel: fix out-of-bounds memory accesses
	ceph: fix inode reference leakage in ceph_get_snapdir()
	ceph: fix memory leak in ceph_readdir when note_last_dentry returns error
	lib/Kconfig.debug: add ARCH dependency for FUNCTION_ALIGN option
	init/main.c: return 1 from handled __setup() functions
	minix: fix bug when opening a file with O_DIRECT
	clk: si5341: fix reported clk_rate when output divider is 2
	staging: vchiq_arm: Avoid NULL ptr deref in vchiq_dump_platform_instances
	staging: vchiq_core: handle NULL result of find_service_by_handle
	phy: amlogic: phy-meson-gxl-usb2: fix shared reset controller use
	phy: amlogic: meson8b-usb2: Use dev_err_probe()
	phy: amlogic: meson8b-usb2: fix shared reset control use
	clk: rockchip: drop CLK_SET_RATE_PARENT from dclk_vop* on rk3568
	cpufreq: CPPC: Fix performance/frequency conversion
	opp: Expose of-node's name in debugfs
	staging: wfx: fix an error handling in wfx_init_common()
	w1: w1_therm: fixes w1_seq for ds28ea00 sensors
	NFSv4.2: fix reference count leaks in _nfs42_proc_copy_notify()
	NFSv4: Protect the state recovery thread against direct reclaim
	habanalabs: fix possible memory leak in MMU DR fini
	xen: delay xen_hvm_init_time_ops() if kdump is boot on vcpu>=32
	clk: ti: Preserve node in ti_dt_clocks_register()
	clk: Enforce that disjoints limits are invalid
	SUNRPC/call_alloc: async tasks mustn't block waiting for memory
	SUNRPC/xprt: async tasks mustn't block waiting for memory
	SUNRPC: remove scheduling boost for "SWAPPER" tasks.
	NFS: swap IO handling is slightly different for O_DIRECT IO
	NFS: swap-out must always use STABLE writes.
	x86: Annotate call_on_stack()
	x86/Kconfig: Do not allow CONFIG_X86_X32_ABI=y with llvm-objcopy
	serial: samsung_tty: do not unlock port->lock for uart_write_wakeup()
	virtio_console: eliminate anonymous module_init & module_exit
	jfs: prevent NULL deref in diFree
	SUNRPC: Fix socket waits for write buffer space
	NFS: nfsiod should not block forever in mempool_alloc()
	NFS: Avoid writeback threads getting stuck in mempool_alloc()
	selftests: net: Add tls config dependency for tls selftests
	parisc: Fix CPU affinity for Lasi, WAX and Dino chips
	parisc: Fix patch code locking and flushing
	mm: fix race between MADV_FREE reclaim and blkdev direct IO read
	rtc: mc146818-lib: change return values of mc146818_get_time()
	rtc: Check return value from mc146818_get_time()
	rtc: mc146818-lib: fix RTC presence check
	drm/amdgpu: fix off by one in amdgpu_gfx_kiq_acquire()
	Drivers: hv: vmbus: Fix potential crash on module unload
	Revert "NFSv4: Handle the special Linux file open access mode"
	NFSv4: fix open failure with O_ACCMODE flag
	scsi: sr: Fix typo in CDROM(CLOSETRAY|EJECT) handling
	scsi: core: Fix sbitmap depth in scsi_realloc_sdev_budget_map()
	scsi: zorro7xx: Fix a resource leak in zorro7xx_remove_one()
	vdpa/mlx5: Rename control VQ workqueue to vdpa wq
	vdpa/mlx5: Propagate link status from device to vdpa driver
	vdpa: mlx5: prevent cvq work from hogging CPU
	net: sfc: add missing xdp queue reinitialization
	net/tls: fix slab-out-of-bounds bug in decrypt_internal
	vrf: fix packet sniffing for traffic originating from ip tunnels
	skbuff: fix coalescing for page_pool fragment recycling
	ice: Clear default forwarding VSI during VSI release
	mctp: Fix check for dev_hard_header() result
	net: ipv4: fix route with nexthop object delete warning
	net: stmmac: Fix unset max_speed difference between DT and non-DT platforms
	drm/imx: imx-ldb: Check for null pointer after calling kmemdup
	drm/imx: Fix memory leak in imx_pd_connector_get_modes
	drm/imx: dw_hdmi-imx: Fix bailout in error cases of probe
	regulator: rtq2134: Fix missing active_discharge_on setting
	regulator: atc260x: Fix missing active_discharge_on setting
	arch/arm64: Fix topology initialization for core scheduling
	bnxt_en: Synchronize tx when xdp redirects happen on same ring
	bnxt_en: reserve space inside receive page for skb_shared_info
	bnxt_en: Prevent XDP redirect from running when stopping TX queue
	sfc: Do not free an empty page_ring
	RDMA/mlx5: Don't remove cache MRs when a delay is needed
	RDMA/mlx5: Add a missing update of cache->last_add
	IB/cm: Cancel mad on the DREQ event when the state is MRA_REP_RCVD
	IB/rdmavt: add lock to call to rvt_error_qp to prevent a race condition
	sctp: count singleton chunks in assoc user stats
	dpaa2-ptp: Fix refcount leak in dpaa2_ptp_probe
	ice: Set txq_teid to ICE_INVAL_TEID on ring creation
	ice: Do not skip not enabled queues in ice_vc_dis_qs_msg
	ipv6: Fix stats accounting in ip6_pkt_drop
	ice: synchronize_rcu() when terminating rings
	ice: xsk: fix VSI state check in ice_xsk_wakeup()
	net: openvswitch: don't send internal clone attribute to the userspace.
	net: ethernet: mv643xx: Fix over zealous checking of_get_mac_address()
	net: openvswitch: fix leak of nested actions
	rxrpc: fix a race in rxrpc_exit_net()
	net: sfc: fix using uninitialized xdp tx_queue
	net: phy: mscc-miim: reject clause 45 register accesses
	qede: confirm skb is allocated before using
	spi: bcm-qspi: fix MSPI only access with bcm_qspi_exec_mem_op()
	bpf: Support dual-stack sockets in bpf_tcp_check_syncookie
	drbd: Fix five use after free bugs in get_initial_state
	scsi: ufs: ufshpb: Fix a NULL check on list iterator
	io_uring: nospec index for tags on files update
	io_uring: don't touch scm_fp_list after queueing skb
	SUNRPC: Handle ENOMEM in call_transmit_status()
	SUNRPC: Handle low memory situations in call_status()
	SUNRPC: svc_tcp_sendmsg() should handle errors from xdr_alloc_bvec()
	iommu/omap: Fix regression in probe for NULL pointer dereference
	perf: arm-spe: Fix perf report --mem-mode
	perf tools: Fix perf's libperf_print callback
	perf session: Remap buf if there is no space for event
	arm64: Add part number for Arm Cortex-A78AE
	scsi: mpt3sas: Fix use after free in _scsih_expander_node_remove()
	scsi: ufs: ufs-pci: Add support for Intel MTL
	Revert "mmc: sdhci-xenon: fix annoying 1.8V regulator warning"
	mmc: block: Check for errors after write on SPI
	mmc: mmci: stm32: correctly check all elements of sg list
	mmc: renesas_sdhi: don't overwrite TAP settings when HS400 tuning is complete
	mmc: core: Fixup support for writeback-cache for eMMC and SD
	lz4: fix LZ4_decompress_safe_partial read out of bound
	highmem: fix checks in __kmap_local_sched_{in,out}
	mmmremap.c: avoid pointless invalidate_range_start/end on mremap(old_size=0)
	mm/mempolicy: fix mpol_new leak in shared_policy_replace
	io_uring: don't check req->file in io_fsync_prep()
	io_uring: defer splice/tee file validity check until command issue
	io_uring: implement compat handling for IORING_REGISTER_IOWQ_AFF
	io_uring: fix race between timeout flush and removal
	x86/pm: Save the MSR validity status at context setup
	x86/speculation: Restore speculation related MSRs during S3 resume
	perf/x86/intel: Update the FRONTEND MSR mask on Sapphire Rapids
	btrfs: fix qgroup reserve overflow the qgroup limit
	btrfs: prevent subvol with swapfile from being deleted
	spi: core: add dma_map_dev for __spi_unmap_msg()
	arm64: patch_text: Fixup last cpu should be master
	RDMA/hfi1: Fix use-after-free bug for mm struct
	gpio: Restrict usage of GPIO chip irq members before initialization
	x86/msi: Fix msi message data shadow struct
	x86/mm/tlb: Revert retpoline avoidance approach
	perf/x86/intel: Don't extend the pseudo-encoding to GP counters
	ata: sata_dwc_460ex: Fix crash due to OOB write
	perf: qcom_l2_pmu: fix an incorrect NULL check on list iterator
	perf/core: Inherit event_caps
	irqchip/gic-v3: Fix GICR_CTLR.RWP polling
	fbdev: Fix unregistering of framebuffers without device
	amd/display: set backlight only if required
	SUNRPC: Prevent immediate close+reconnect
	drm/panel: ili9341: fix optional regulator handling
	drm/amdgpu/display: change pipe policy for DCN 2.1
	drm/amdgpu/smu10: fix SoC/fclk units in auto mode
	drm/amdgpu/vcn: Fix the register setting for vcn1
	drm/nouveau/pmu: Add missing callbacks for Tegra devices
	drm/amdkfd: Create file descriptor after client is added to smi_clients list
	drm/amdgpu: don't use BACO for reset in S3
	KVM: SVM: Allow AVIC support on system w/ physical APIC ID > 255
	net/smc: send directly on setting TCP_NODELAY
	Revert "selftests: net: Add tls config dependency for tls selftests"
	bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide
	selftests/bpf: Fix u8 narrow load checks for bpf_sk_lookup remote_port
	rtc: mc146818-lib: fix signedness bug in mc146818_get_time()
	SUNRPC: Don't call connect() more than once on a TCP socket
	Revert "nbd: fix possible overflow on 'first_minor' in nbd_dev_add()"
	perf build: Don't use -ffat-lto-objects in the python feature test when building with clang-13
	perf python: Fix probing for some clang command line options
	tools build: Filter out options and warnings not supported by clang
	tools build: Use $(shell ) instead of `` to get embedded libperl's ccopts
	dmaengine: Revert "dmaengine: shdma: Fix runtime PM imbalance on error"
	KVM: avoid NULL pointer dereference in kvm_dirty_ring_push
	Revert "net/mlx5: Accept devlink user input after driver initialization complete"
	ubsan: remove CONFIG_UBSAN_OBJECT_SIZE
	selftests: cgroup: Make cg_create() use 0755 for permission instead of 0644
	selftests: cgroup: Test open-time credential usage for migration checks
	selftests: cgroup: Test open-time cgroup namespace usage for migration checks
	mm: don't skip swap entry even if zap_details specified
	Drivers: hv: vmbus: Replace smp_store_mb() with virt_store_mb()
	x86/bug: Prevent shadowing in __WARN_FLAGS
	sched: Teach the forced-newidle balancer about CPU affinity limitation.
	x86,static_call: Fix __static_call_return0 for i386
	irqchip/gic-v4: Wait for GICR_VPENDBASER.Dirty to clear before descheduling
	powerpc/64: Fix build failure with allyesconfig in book3s_64_entry.S
	irqchip/gic, gic-v3: Prevent GSI to SGI translations
	mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning
	static_call: Don't make __static_call_return0 static
	powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit
	stacktrace: move filter_irq_stacks() to kernel/stacktrace.c
	Linux 5.15.34

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I98049d0d8ebd427296418d31085bfde482ad30e7
This commit is contained in:
Greg Kroah-Hartman
2022-04-21 14:18:22 +02:00
308 changed files with 3542 additions and 1900 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 33
SUBLEVEL = 34
EXTRAVERSION =
NAME = Trick or Treat

View File

@@ -80,7 +80,12 @@ init_rtc_epoch(void)
static int
alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
mc146818_get_time(tm);
int ret = mc146818_get_time(tm);
if (ret < 0) {
dev_err_ratelimited(dev, "unable to read current time\n");
return ret;
}
/* Adjust for non-default epochs. It's easier to depend on the
generic __get_rtc_time and adjust the epoch here than create

View File

@@ -75,6 +75,7 @@
#define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47
@@ -123,6 +124,7 @@
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)

View File

@@ -117,8 +117,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
int i, ret = 0;
struct aarch64_insn_patch *pp = arg;
/* The first CPU becomes master */
if (atomic_inc_return(&pp->cpu_count) == 1) {
/* The last CPU becomes master */
if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
pp->new_insns[i]);

View File

@@ -853,6 +853,7 @@ u8 spectre_bhb_loop_affected(int scope)
if (scope == SCOPE_LOCAL_CPU) {
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),

View File

@@ -239,6 +239,7 @@ asmlinkage notrace void secondary_start_kernel(void)
* Log the CPU info before it is marked online and might get read.
*/
cpuinfo_store_cpu();
store_cpu_topology(cpu);
/*
* Enable GIC and timers.
@@ -247,7 +248,6 @@ asmlinkage notrace void secondary_start_kernel(void)
ipi_setup(cpu);
store_cpu_topology(cpu);
numa_add_cpu(cpu);
/*

View File

@@ -450,7 +450,7 @@
#address-cells = <1>;
#size-cells = <1>;
eth0_addr: eth-mac-addr@0x22 {
eth0_addr: eth-mac-addr@22 {
reg = <0x22 0x6>;
};
};

View File

@@ -16,7 +16,7 @@ static inline void setup_8250_early_printk_port(unsigned long base,
unsigned int reg_shift, unsigned int timeout) {}
#endif
extern void set_handler(unsigned long offset, void *addr, unsigned long len);
void set_handler(unsigned long offset, const void *addr, unsigned long len);
extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
typedef void (*vi_handler_t)(void);

View File

@@ -2085,19 +2085,19 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
* If no shadow set is selected then use the default handler
* that does normal register saving and standard interrupt exit
*/
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi;
char *vec_start = using_rollback_handler() ?
&rollback_except_vec_vi : &except_vec_vi;
extern const u8 except_vec_vi[], except_vec_vi_lui[];
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
extern const u8 rollback_except_vec_vi[];
const u8 *vec_start = using_rollback_handler() ?
rollback_except_vec_vi : except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int lui_offset = &except_vec_vi_lui - vec_start + 2;
const int ori_offset = &except_vec_vi_ori - vec_start + 2;
const int lui_offset = except_vec_vi_lui - vec_start + 2;
const int ori_offset = except_vec_vi_ori - vec_start + 2;
#else
const int lui_offset = &except_vec_vi_lui - vec_start;
const int ori_offset = &except_vec_vi_ori - vec_start;
const int lui_offset = except_vec_vi_lui - vec_start;
const int ori_offset = except_vec_vi_ori - vec_start;
#endif
const int handler_len = &except_vec_vi_end - vec_start;
const int handler_len = except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) {
/*
@@ -2305,7 +2305,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
}
/* Install CPU exception handler */
void set_handler(unsigned long offset, void *addr, unsigned long size)
void set_handler(unsigned long offset, const void *addr, unsigned long size)
{
#ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);

View File

@@ -61,6 +61,7 @@ static int __init ill_acc_of_setup(void)
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("%pOFn: failed to lookup pdev\n", np);
of_node_put(np);
return -EINVAL;
}

View File

@@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
*need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page));
if (flags)
raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
}
@@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
{
clear_fixmap(fixmap);
if (flags)
raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
}
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
@@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
int mapped;
/* Make sure we don't have any aliases in cache */
flush_kernel_vmap_range(addr, len);
flush_icache_range(start, end);
flush_kernel_dcache_range_asm(start, end);
flush_kernel_icache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
@@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
* We're crossing a page boundary, so
* need to remap
*/
flush_kernel_vmap_range((void *)fixmap,
(p-fixmap) * sizeof(*p));
flush_kernel_dcache_range_asm((unsigned long)fixmap,
(unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap,
(unsigned long)p);
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
@@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
}
}
flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
flush_icache_range(start, end);
}
void __kprobes __patch_text(void *addr, u32 insn)

View File

@@ -139,12 +139,12 @@
fman@400000 {
ethernet@e6000 {
phy-handle = <&phy_rgmii_0>;
phy-connection-type = "rgmii";
phy-connection-type = "rgmii-id";
};
ethernet@e8000 {
phy-handle = <&phy_rgmii_1>;
phy-connection-type = "rgmii";
phy-connection-type = "rgmii-id";
};
mdio0: mdio@fc000 {

View File

@@ -567,7 +567,7 @@ DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
/* hash_utils.c */
DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
DECLARE_INTERRUPT_HANDLER(do_hash_fault);
/* fault.c */
DECLARE_INTERRUPT_HANDLER(do_page_fault);

View File

@@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
#define virt_addr_valid(vaddr) ({ \
unsigned long _addr = (unsigned long)vaddr; \
_addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
pfn_valid(virt_to_pfn(_addr)); \
})
/*
* On Book-E parts we need __va to parse the device tree and we can't

View File

@@ -1235,6 +1235,12 @@ int __init early_init_dt_scan_rtas(unsigned long node,
entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
#ifdef CONFIG_PPC64
/* need this feature to decide the crashkernel offset */
if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL))
powerpc_firmware_features |= FW_FEATURE_LPAR;
#endif
if (basep && entryp && sizep) {
rtas.base = *basep;
rtas.entry = *entryp;

View File

@@ -26,15 +26,18 @@ static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr,
const char *format;
node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend");
if (!of_device_is_available(node))
return -ENODEV;
if (!of_device_is_available(node)) {
rc = -ENODEV;
goto out;
}
rc = of_property_read_string(node, "format", &format);
if (rc)
return rc;
goto out;
rc = sprintf(buf, "%s\n", format);
out:
of_node_put(node);
return rc;

View File

@@ -147,10 +147,17 @@ void __init reserve_crashkernel(void)
if (!crashk_res.start) {
#ifdef CONFIG_PPC64
/*
* On 64bit we split the RMO in half but cap it at half of
* a small SLB (128MB) since the crash kernel needs to place
* itself and some stacks to be in the first segment.
* On the LPAR platform place the crash kernel to mid of
* RMA size (512MB or more) to ensure the crash kernel
* gets enough space to place itself and some stack to be
* in the first segment. At the same time normal kernel
* also get enough space to allocate memory for essential
* system resource in the first segment. Keep the crash
* kernel starts at 128MB offset on other platforms.
*/
if (firmware_has_feature(FW_FEATURE_LPAR))
crashk_res.start = ppc64_rma_size / 2;
else
crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
#else
crashk_res.start = KDUMP_KERNELBASE;

View File

@@ -407,10 +407,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
*/
ld r10,HSTATE_SCRATCH0(r13)
cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
beq machine_check_common
beq .Lcall_machine_check_common
cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
beq system_reset_common
beq .Lcall_system_reset_common
b .
.Lcall_machine_check_common:
b machine_check_common
.Lcall_system_reset_common:
b system_reset_common
#endif

View File

@@ -1522,8 +1522,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
DECLARE_INTERRUPT_HANDLER(__do_hash_fault);
DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
DEFINE_INTERRUPT_HANDLER(do_hash_fault)
{
unsigned long ea = regs->dar;
unsigned long dsisr = regs->dsisr;
@@ -1582,35 +1581,6 @@ DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
}
}
/*
* The _RAW interrupt entry checks for the in_nmi() case before
* running the full handler.
*/
DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
{
/*
* If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
* don't call hash_page, just fail the fault. This is required to
* prevent re-entrancy problems in the hash code, namely perf
* interrupts hitting while something holds H_PAGE_BUSY, and taking a
* hash fault. See the comment in hash_preload().
*
* We come here as a result of a DSI at a point where we don't want
* to call hash_page, such as when we are accessing memory (possibly
* user memory) inside a PMU interrupt that occurred while interrupts
* were soft-disabled. We want to invoke the exception handler for
* the access, or panic if there isn't a handler.
*/
if (unlikely(in_nmi())) {
do_bad_page_fault_segv(regs);
return 0;
}
__do_hash_fault(regs);
return 0;
}
#ifdef CONFIG_PPC_MM_SLICES
static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
{
@@ -1677,26 +1647,18 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
#endif /* CONFIG_PPC_64K_PAGES */
/*
* __hash_page_* must run with interrupts off, as it sets the
* H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
* time and may take a hash fault reading the user stack, see
* read_user_stack_slow() in the powerpc/perf code.
* __hash_page_* must run with interrupts off, including PMI interrupts
* off, as it sets the H_PAGE_BUSY bit.
*
* If that takes a hash fault on the same page as we lock here, it
* will bail out when seeing H_PAGE_BUSY set, and retry the access
* leading to an infinite loop.
*
* Disabling interrupts here does not prevent perf interrupts, but it
* will prevent them taking hash faults (see the NMI test in
* do_hash_page), then read_user_stack's copy_from_user_nofault will
* fail and perf will fall back to read_user_stack_slow(), which
* walks the Linux page tables.
* It's otherwise possible for perf interrupts to hit at any time and
* may take a hash fault reading the user stack, which could take a
* hash miss and deadlock on the same H_PAGE_BUSY bit.
*
* Interrupts must also be off for the duration of the
* mm_is_thread_local test and update, to prevent preempt running the
* mm on another CPU (XXX: this may be racy vs kthread_use_mm).
*/
local_irq_save(flags);
powerpc_local_irq_pmu_save(flags);
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
@@ -1721,7 +1683,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
local_irq_restore(flags);
powerpc_local_irq_pmu_restore(flags);
}
/*

View File

@@ -15,12 +15,14 @@
#include <asm/pgtable.h>
static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr,
unsigned long old, unsigned long new)
{
return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0);
}
/*
* Updates the attributes of a page in three steps:
*
* 1. take the page_table_lock
* 2. install the new entry with the updated attributes
* 3. flush the TLB
* Updates the attributes of a page atomically.
*
* This sequence is safe against concurrent updates, and also allows updating the
* attributes of a page currently being executed or accessed.
@@ -28,25 +30,21 @@
static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
{
long action = (long)data;
pte_t pte;
spin_lock(&init_mm.page_table_lock);
pte = ptep_get(ptep);
/* modify the PTE bits as desired, then apply */
/* modify the PTE bits as desired */
switch (action) {
case SET_MEMORY_RO:
pte = pte_wrprotect(pte);
/* Don't clear DIRTY bit */
pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO);
break;
case SET_MEMORY_RW:
pte = pte_mkwrite(pte_mkdirty(pte));
pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW);
break;
case SET_MEMORY_NX:
pte = pte_exprotect(pte);
pte_update_delta(ptep, addr, _PAGE_KERNEL_ROX, _PAGE_KERNEL_RO);
break;
case SET_MEMORY_X:
pte = pte_mkexec(pte);
pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX);
break;
case SET_MEMORY_NP:
pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0);
@@ -59,16 +57,12 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
break;
}
pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
/* See ptesync comment in radix__set_pte_at() */
if (radix_enabled())
asm volatile("ptesync": : :"memory");
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
spin_unlock(&init_mm.page_table_lock);
return 0;
}

View File

@@ -2,7 +2,6 @@
#ifndef _POWERPC_PERF_CALLCHAIN_H
#define _POWERPC_PERF_CALLCHAIN_H
int read_user_stack_slow(const void __user *ptr, void *buf, int nb);
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs);
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
@@ -26,17 +25,11 @@ static inline int __read_user_stack(const void __user *ptr, void *ret,
size_t size)
{
unsigned long addr = (unsigned long)ptr;
int rc;
if (addr > TASK_SIZE - size || (addr & (size - 1)))
return -EFAULT;
rc = copy_from_user_nofault(ret, ptr, size);
if (IS_ENABLED(CONFIG_PPC64) && !radix_enabled() && rc)
return read_user_stack_slow(ptr, ret, size);
return rc;
return copy_from_user_nofault(ret, ptr, size);
}
#endif /* _POWERPC_PERF_CALLCHAIN_H */

View File

@@ -18,33 +18,6 @@
#include "callchain.h"
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly. Radix
* has no need for this so it doesn't use read_user_stack_slow.
*/
int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
{
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
struct page *page;
void *kaddr;
if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
kaddr = page_address(page);
/* align address to page boundary */
offset = addr & ~PAGE_MASK;
memcpy(buf, kaddr + offset, nb);
put_page(page);
return 0;
}
return -EFAULT;
}
static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
{
return __read_user_stack(ptr, ret, sizeof(*ret));

View File

@@ -111,6 +111,7 @@ config PPC_BOOK3S_64
config PPC_BOOK3E_64
bool "Embedded processors"
select PPC_FSL_BOOK3E
select PPC_FPU # Make it a choice ?
select PPC_SMP_MUXED_IPI
select PPC_DOORBELL
@@ -287,7 +288,7 @@ config FSL_BOOKE
config PPC_FSL_BOOK3E
bool
select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
select FSL_EMB_PERFMON
imply FSL_EMB_PERFMON
select PPC_SMP_MUXED_IPI
select PPC_DOORBELL
default y if FSL_BOOKE

View File

@@ -1,64 +1,316 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022 Michael T. Kloos <michael@michaelkloos.com>
*/
#include <linux/linkage.h>
#include <asm/asm.h>
ENTRY(__memmove)
WEAK(memmove)
move t0, a0
move t1, a1
SYM_FUNC_START(__memmove)
SYM_FUNC_START_WEAK(memmove)
/*
* Returns
* a0 - dest
*
* Parameters
* a0 - Inclusive first byte of dest
* a1 - Inclusive first byte of src
* a2 - Length of copy n
*
* Because the return matches the parameter register a0,
* we will not clobber or modify that register.
*
* Note: This currently only works on little-endian.
* To port to big-endian, reverse the direction of shifts
* in the 2 misaligned fixup copy loops.
*/
beq a0, a1, exit_memcpy
beqz a2, exit_memcpy
srli t2, a2, 0x2
/* Return if nothing to do */
beq a0, a1, return_from_memmove
beqz a2, return_from_memmove
slt t3, a0, a1
beqz t3, do_reverse
/*
* Register Uses
* Forward Copy: a1 - Index counter of src
* Reverse Copy: a4 - Index counter of src
* Forward Copy: t3 - Index counter of dest
* Reverse Copy: t4 - Index counter of dest
* Both Copy Modes: t5 - Inclusive first multibyte/aligned of dest
* Both Copy Modes: t6 - Non-Inclusive last multibyte/aligned of dest
* Both Copy Modes: t0 - Link / Temporary for load-store
* Both Copy Modes: t1 - Temporary for load-store
* Both Copy Modes: t2 - Temporary for load-store
* Both Copy Modes: a5 - dest to src alignment offset
* Both Copy Modes: a6 - Shift ammount
* Both Copy Modes: a7 - Inverse Shift ammount
* Both Copy Modes: a2 - Alternate breakpoint for unrolled loops
*/
andi a2, a2, 0x3
li t4, 1
beqz t2, byte_copy
/*
* Solve for some register values now.
* Byte copy does not need t5 or t6.
*/
mv t3, a0
add t4, a0, a2
add a4, a1, a2
word_copy:
lw t3, 0(a1)
addi t2, t2, -1
addi a1, a1, 4
sw t3, 0(a0)
addi a0, a0, 4
bnez t2, word_copy
beqz a2, exit_memcpy
j byte_copy
/*
* Byte copy if copying less than (2 * SZREG) bytes. This can
* cause problems with the bulk copy implementation and is
* small enough not to bother.
*/
andi t0, a2, -(2 * SZREG)
beqz t0, byte_copy
do_reverse:
add a0, a0, a2
add a1, a1, a2
andi a2, a2, 0x3
li t4, -1
beqz t2, reverse_byte_copy
/*
* Now solve for t5 and t6.
*/
andi t5, t3, -SZREG
andi t6, t4, -SZREG
/*
* If dest(Register t3) rounded down to the nearest naturally
* aligned SZREG address, does not equal dest, then add SZREG
* to find the low-bound of SZREG alignment in the dest memory
* region. Note that this could overshoot the dest memory
* region if n is less than SZREG. This is one reason why
* we always byte copy if n is less than SZREG.
* Otherwise, dest is already naturally aligned to SZREG.
*/
beq t5, t3, 1f
addi t5, t5, SZREG
1:
reverse_word_copy:
addi a1, a1, -4
addi t2, t2, -1
lw t3, 0(a1)
addi a0, a0, -4
sw t3, 0(a0)
bnez t2, reverse_word_copy
beqz a2, exit_memcpy
/*
* If the dest and src are co-aligned to SZREG, then there is
* no need for the full rigmarole of a full misaligned fixup copy.
* Instead, do a simpler co-aligned copy.
*/
xor t0, a0, a1
andi t1, t0, (SZREG - 1)
beqz t1, coaligned_copy
/* Fall through to misaligned fixup copy */
reverse_byte_copy:
addi a0, a0, -1
addi a1, a1, -1
misaligned_fixup_copy:
bltu a1, a0, misaligned_fixup_copy_reverse
misaligned_fixup_copy_forward:
jal t0, byte_copy_until_aligned_forward
andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
sub a5, a1, t3 /* Find the difference between src and dest */
andi a1, a1, -SZREG /* Align the src pointer */
addi a2, t6, SZREG /* The other breakpoint for the unrolled loop*/
/*
* Compute The Inverse Shift
* a7 = XLEN - a6 = XLEN + -a6
* 2s complement negation to find the negative: -a6 = ~a6 + 1
* Add that to XLEN. XLEN = SZREG * 8.
*/
not a7, a6
addi a7, a7, (SZREG * 8 + 1)
/*
* Fix Misalignment Copy Loop - Forward
* load_val0 = load_ptr[0];
* do {
* load_val1 = load_ptr[1];
* store_ptr += 2;
* store_ptr[0 - 2] = (load_val0 >> {a6}) | (load_val1 << {a7});
*
* if (store_ptr == {a2})
* break;
*
* load_val0 = load_ptr[2];
* load_ptr += 2;
* store_ptr[1 - 2] = (load_val1 >> {a6}) | (load_val0 << {a7});
*
* } while (store_ptr != store_ptr_end);
* store_ptr = store_ptr_end;
*/
REG_L t0, (0 * SZREG)(a1)
1:
REG_L t1, (1 * SZREG)(a1)
addi t3, t3, (2 * SZREG)
srl t0, t0, a6
sll t2, t1, a7
or t2, t0, t2
REG_S t2, ((0 * SZREG) - (2 * SZREG))(t3)
beq t3, a2, 2f
REG_L t0, (2 * SZREG)(a1)
addi a1, a1, (2 * SZREG)
srl t1, t1, a6
sll t2, t0, a7
or t2, t1, t2
REG_S t2, ((1 * SZREG) - (2 * SZREG))(t3)
bne t3, t6, 1b
2:
mv t3, t6 /* Fix the dest pointer in case the loop was broken */
add a1, t3, a5 /* Restore the src pointer */
j byte_copy_forward /* Copy any remaining bytes */
misaligned_fixup_copy_reverse:
jal t0, byte_copy_until_aligned_reverse
andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
sub a5, a4, t4 /* Find the difference between src and dest */
andi a4, a4, -SZREG /* Align the src pointer */
addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/
/*
* Compute The Inverse Shift
* a7 = XLEN - a6 = XLEN + -a6
* 2s complement negation to find the negative: -a6 = ~a6 + 1
* Add that to XLEN. XLEN = SZREG * 8.
*/
not a7, a6
addi a7, a7, (SZREG * 8 + 1)
/*
* Fix Misalignment Copy Loop - Reverse
* load_val1 = load_ptr[0];
* do {
* load_val0 = load_ptr[-1];
* store_ptr -= 2;
* store_ptr[1] = (load_val0 >> {a6}) | (load_val1 << {a7});
*
* if (store_ptr == {a2})
* break;
*
* load_val1 = load_ptr[-2];
* load_ptr -= 2;
* store_ptr[0] = (load_val1 >> {a6}) | (load_val0 << {a7});
*
* } while (store_ptr != store_ptr_end);
* store_ptr = store_ptr_end;
*/
REG_L t1, ( 0 * SZREG)(a4)
1:
REG_L t0, (-1 * SZREG)(a4)
addi t4, t4, (-2 * SZREG)
sll t1, t1, a7
srl t2, t0, a6
or t2, t1, t2
REG_S t2, ( 1 * SZREG)(t4)
beq t4, a2, 2f
REG_L t1, (-2 * SZREG)(a4)
addi a4, a4, (-2 * SZREG)
sll t0, t0, a7
srl t2, t1, a6
or t2, t0, t2
REG_S t2, ( 0 * SZREG)(t4)
bne t4, t5, 1b
2:
mv t4, t5 /* Fix the dest pointer in case the loop was broken */
add a4, t4, a5 /* Restore the src pointer */
j byte_copy_reverse /* Copy any remaining bytes */
/*
* Simple copy loops for SZREG co-aligned memory locations.
* These also make calls to do byte copies for any unaligned
* data at their terminations.
*/
coaligned_copy:
bltu a1, a0, coaligned_copy_reverse
coaligned_copy_forward:
jal t0, byte_copy_until_aligned_forward
1:
REG_L t1, ( 0 * SZREG)(a1)
addi a1, a1, SZREG
addi t3, t3, SZREG
REG_S t1, (-1 * SZREG)(t3)
bne t3, t6, 1b
j byte_copy_forward /* Copy any remaining bytes */
coaligned_copy_reverse:
jal t0, byte_copy_until_aligned_reverse
1:
REG_L t1, (-1 * SZREG)(a4)
addi a4, a4, -SZREG
addi t4, t4, -SZREG
REG_S t1, ( 0 * SZREG)(t4)
bne t4, t5, 1b
j byte_copy_reverse /* Copy any remaining bytes */
/*
* These are basically sub-functions within the function. They
* are used to byte copy until the dest pointer is in alignment.
* At which point, a bulk copy method can be used by the
* calling code. These work on the same registers as the bulk
* copy loops. Therefore, the register values can be picked
* up from where they were left and we avoid code duplication
* without any overhead except the call in and return jumps.
*/
byte_copy_until_aligned_forward:
beq t3, t5, 2f
1:
lb t1, 0(a1)
addi a1, a1, 1
addi t3, t3, 1
sb t1, -1(t3)
bne t3, t5, 1b
2:
jalr zero, 0x0(t0) /* Return to multibyte copy loop */
byte_copy_until_aligned_reverse:
beq t4, t6, 2f
1:
lb t1, -1(a4)
addi a4, a4, -1
addi t4, t4, -1
sb t1, 0(t4)
bne t4, t6, 1b
2:
jalr zero, 0x0(t0) /* Return to multibyte copy loop */
/*
* Simple byte copy loops.
* These will byte copy until they reach the end of data to copy.
* At that point, they will call to return from memmove.
*/
byte_copy:
lb t3, 0(a1)
addi a2, a2, -1
sb t3, 0(a0)
add a1, a1, t4
add a0, a0, t4
bnez a2, byte_copy
bltu a1, a0, byte_copy_reverse
exit_memcpy:
move a0, t0
move a1, t1
byte_copy_forward:
beq t3, t4, 2f
1:
lb t1, 0(a1)
addi a1, a1, 1
addi t3, t3, 1
sb t1, -1(t3)
bne t3, t4, 1b
2:
ret
END(__memmove)
byte_copy_reverse:
beq t4, t3, 2f
1:
lb t1, -1(a4)
addi a4, a4, -1
addi t4, t4, -1
sb t1, 0(t4)
bne t4, t3, 1b
2:
return_from_memmove:
ret
SYM_FUNC_END(memmove)
SYM_FUNC_END(__memmove)

View File

@@ -4,8 +4,10 @@
#ifdef CONFIG_64BIT
#undef CONFIG_X86_32
#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_sse_pf64))
#else
#define CONFIG_X86_32 1
#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_8regs))
#endif
#include <asm/cpufeature.h>
@@ -16,7 +18,7 @@
#undef XOR_SELECT_TEMPLATE
/* pick an arbitrary one - measuring isn't possible with inf-cpu */
#define XOR_SELECT_TEMPLATE(x) \
(time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL)
(time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x))
#endif
#endif

View File

@@ -2800,6 +2800,11 @@ config IA32_AOUT
config X86_X32
bool "x32 ABI for 64-bit mode"
depends on X86_64
# llvm-objcopy does not convert x86_64 .note.gnu.property or
# compressed debug sections to x86_x32 properly:
# https://github.com/ClangBuiltLinux/linux/issues/514
# https://github.com/ClangBuiltLinux/linux/issues/1141
depends on $(success,$(OBJCOPY) --version | head -n1 | grep -qv llvm)
help
Include code to run binaries for the x32 native 32-bit ABI
for 64-bit processors. An x32 process gets access to the

View File

@@ -281,7 +281,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
EVENT_EXTRA_END
@@ -5466,7 +5466,11 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con
/* Disabled fixed counters which are not in CPUID */
c->idxmsk64 &= intel_ctrl;
if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
/*
* Don't extend the pseudo-encoding to the
* generic counters
*/
if (!use_fixed_pseudo_encoding(c->code))
c->idxmsk64 |= (1ULL << num_counters) - 1;
}
c->idxmsk64 &=

View File

@@ -77,9 +77,9 @@ do { \
*/
#define __WARN_FLAGS(flags) \
do { \
__auto_type f = BUGFLAG_WARNING|(flags); \
__auto_type __flags = BUGFLAG_WARNING|(flags); \
instrumentation_begin(); \
_BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
_BUG_FLAGS(ASM_UD2, __flags, ASM_REACHABLE); \
instrumentation_end(); \
} while (0)

View File

@@ -99,7 +99,8 @@
}
#define ASM_CALL_ARG0 \
"call %P[__func] \n"
"call %P[__func] \n" \
ASM_REACHABLE
#define ASM_CALL_ARG1 \
"movq %[arg1], %%rdi \n" \

View File

@@ -505,6 +505,7 @@ struct kvm_pmu {
u64 global_ctrl_mask;
u64 global_ovf_ctrl_mask;
u64 reserved_bits;
u64 raw_event_mask;
u8 version;
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];

View File

@@ -12,14 +12,17 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
/* Structs and defines for the X86 specific MSI message format */
typedef struct x86_msi_data {
union {
struct {
u32 vector : 8,
delivery_mode : 3,
dest_mode_logical : 1,
reserved : 2,
active_low : 1,
is_level : 1;
};
u32 dmar_subhandle;
};
} __attribute__ ((packed)) arch_msi_msg_data_t;
#define arch_msi_msg_data x86_msi_data

View File

@@ -241,6 +241,11 @@ struct x86_pmu_capability {
#define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
#define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
static inline bool use_fixed_pseudo_encoding(u64 code)
{
return !(code & 0xff);
}
/*
* We model BTS tracing as another fixed-mode PMC.
*

View File

@@ -1435,8 +1435,12 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
hpet_rtc_timer_reinit();
memset(&curr_time, 0, sizeof(struct rtc_time));
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
mc146818_get_time(&curr_time);
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
if (unlikely(mc146818_get_time(&curr_time) < 0)) {
pr_err_ratelimited("unable to read current time from RTC\n");
return IRQ_HANDLED;
}
}
if (hpet_rtc_flags & RTC_UIE &&
curr_time.tm_sec != hpet_prev_update_sec) {

View File

@@ -12,10 +12,9 @@ enum insn_type {
};
/*
* data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax
* The REX.W cancels the effect of any data16.
* cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
*/
static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
{

View File

@@ -3514,8 +3514,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
{
u64 tsc_aux = 0;
if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
if (!ctxt->ops->guest_has_rdpid(ctxt))
return emulate_ud(ctxt);
ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
ctxt->dst.val = tsc_aux;
return X86EMUL_CONTINUE;
}

View File

@@ -226,6 +226,7 @@ struct x86_emulate_ops {
bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);

View File

@@ -96,8 +96,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
u64 config, bool exclude_user,
bool exclude_kernel, bool intr,
bool in_tx, bool in_tx_cp)
bool exclude_kernel, bool intr)
{
struct perf_event *event;
struct perf_event_attr attr = {
@@ -113,16 +112,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
attr.sample_period = get_sample_period(pmc, pmc->counter);
if (in_tx)
attr.config |= HSW_IN_TX;
if (in_tx_cp) {
if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
guest_cpuid_is_intel(pmc->vcpu)) {
/*
* HSW_IN_TX_CHECKPOINTED is not supported with nonzero
* period. Just clear the sample period so at least
* allocating the counter doesn't fail.
*/
attr.sample_period = 0;
attr.config |= HSW_IN_TX_CHECKPOINTED;
}
event = perf_event_create_kernel_counter(&attr, -1, current,
@@ -178,6 +175,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter;
int i;
struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
bool allow_event = true;
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
@@ -217,7 +215,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
}
if (type == PERF_TYPE_RAW)
config = eventsel & AMD64_RAW_EVENT_MASK;
config = eventsel & pmu->raw_event_mask;
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
return;
@@ -228,9 +226,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
pmc_reprogram_counter(pmc, type, config,
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
eventsel & ARCH_PERFMON_EVENTSEL_INT,
(eventsel & HSW_IN_TX),
(eventsel & HSW_IN_TX_CHECKPOINTED));
eventsel & ARCH_PERFMON_EVENTSEL_INT);
}
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
@@ -266,7 +262,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
kvm_x86_ops.pmu_ops->find_fixed_event(idx),
!(en_field & 0x2), /* exclude user */
!(en_field & 0x1), /* exclude kernel */
pmi, false, false);
pmi);
}
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);

View File

@@ -947,15 +947,10 @@ out:
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
u64 entry;
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
/*
* Since the host physical APIC id is 8 bits,
* we can support host APIC ID upto 255.
*/
if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
return;
entry = READ_ONCE(*(svm->avic_physical_id_cache));

View File

@@ -261,13 +261,11 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* MSR_EVNTSELn */
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
if (pmc) {
if (data == pmc->eventsel)
return 0;
if (!(data & pmu->reserved_bits)) {
data &= ~pmu->reserved_bits;
if (data != pmc->eventsel)
reprogram_gp_counter(pmc, data);
return 0;
}
}
return 1;
}
@@ -283,6 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
pmu->reserved_bits = 0xfffffff000280000ull;
pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;

View File

@@ -22,6 +22,8 @@
#include <asm/svm.h>
#include <asm/sev-common.h>
#include "kvm_cache_regs.h"
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
#define IOPM_SIZE PAGE_SIZE * 3
@@ -497,7 +499,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)

View File

@@ -4,7 +4,6 @@
*/
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include <asm/mshyperv.h>

View File

@@ -396,6 +396,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct kvm_pmc *pmc;
u32 msr = msr_info->index;
u64 data = msr_info->data;
u64 reserved_bits;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -451,7 +452,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
if (!(data & pmu->reserved_bits)) {
reserved_bits = pmu->reserved_bits;
if ((pmc->idx == 2) &&
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
if (!(data & reserved_bits)) {
reprogram_gp_counter(pmc, data);
return 0;
}
@@ -478,6 +483,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
@@ -524,8 +530,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
if (entry &&
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
pmu->reserved_bits ^= HSW_IN_TX;
pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
}
bitmap_set(pmu->all_valid_pmc_idx,
0, pmu->nr_arch_gp_counters);

View File

@@ -7393,6 +7393,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
}
static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
{
return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
}
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
@@ -7475,6 +7480,7 @@ static const struct x86_emulate_ops emulate_ops = {
.guest_has_long_mode = emulator_guest_has_long_mode,
.guest_has_movbe = emulator_guest_has_movbe,
.guest_has_fxsr = emulator_guest_has_fxsr,
.guest_has_rdpid = emulator_guest_has_rdpid,
.set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags,
.exiting_smm = emulator_exiting_smm,

View File

@@ -854,13 +854,11 @@ done:
nr_invalidate);
}
static bool tlb_is_not_lazy(int cpu)
static bool tlb_is_not_lazy(int cpu, void *data)
{
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
}
static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);
@@ -889,36 +887,11 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
* up on the new contents of what used to be page tables, while
* doing a speculative memory access.
*/
if (info->freed_tables) {
if (info->freed_tables)
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
} else {
/*
* Although we could have used on_each_cpu_cond_mask(),
* open-coding it has performance advantages, as it eliminates
* the need for indirect calls or retpolines. In addition, it
* allows to use a designated cpumask for evaluating the
* condition, instead of allocating one.
*
* This code works under the assumption that there are no nested
* TLB flushes, an assumption that is already made in
* flush_tlb_mm_range().
*
* cond_cpumask is logically a stack-local variable, but it is
* more efficient to have it off the stack and not to allocate
* it on demand. Preemption is disabled and this code is
* non-reentrant.
*/
struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask);
int cpu;
cpumask_clear(cond_cpumask);
for_each_cpu(cpu, cpumask) {
if (tlb_is_not_lazy(cpu))
__cpumask_set_cpu(cpu, cond_cpumask);
}
on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true);
}
else
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
(void *)info, 1, cpumask);
}
void flush_tlb_multi(const struct cpumask *cpumask,

View File

@@ -40,7 +40,8 @@ static void msr_save_context(struct saved_context *ctxt)
struct saved_msr *end = msr + ctxt->saved_msrs.num;
while (msr < end) {
msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
if (msr->valid)
rdmsrl(msr->info.msr_no, msr->info.reg.q);
msr++;
}
}
@@ -424,8 +425,10 @@ static int msr_build_context(const u32 *msr_id, const int num)
}
for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
u64 dummy;
msr_array[i].info.msr_no = msr_id[j];
msr_array[i].valid = false;
msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
msr_array[i].info.reg.q = 0;
}
saved_msrs->num = total_num;
@@ -500,10 +503,24 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
return ret;
}
static void pm_save_spec_msr(void)
{
u32 spec_msr_id[] = {
MSR_IA32_SPEC_CTRL,
MSR_IA32_TSX_CTRL,
MSR_TSX_FORCE_ABORT,
MSR_IA32_MCU_OPT_CTRL,
MSR_AMD64_LS_CFG,
};
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
}
static int pm_check_save_msr(void)
{
dmi_check_system(msr_save_dmi_table);
pm_cpu_check(msr_save_cpu_table);
pm_save_spec_msr();
return 0;
}

View File

@@ -19,6 +19,12 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void)
*/
xen_vcpu_setup(0);
/*
* Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS.
* Refer to comments in xen_hvm_init_time_ops().
*/
xen_hvm_init_time_ops();
/*
* The alternative logic (which patches the unlock/lock) runs before
* the smp bootup up code is activated. Hence we need to set this up

View File

@@ -558,6 +558,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
void __init xen_hvm_init_time_ops(void)
{
static bool hvm_time_initialized;
if (hvm_time_initialized)
return;
/*
* vector callback is needed otherwise we cannot receive interrupts
* on cpu > 0 and at this point we don't know how many cpus are
@@ -567,7 +572,22 @@ void __init xen_hvm_init_time_ops(void)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
pr_info("Xen doesn't support pvclock on HVM, disable pv timer");
pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer");
return;
}
/*
* Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'.
* The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest
* boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access
* __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic.
*
* The xen_hvm_init_time_ops() should be called again later after
* __this_cpu_read(xen_vcpu) is available.
*/
if (!__this_cpu_read(xen_vcpu)) {
pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n",
xen_vcpu_nr(0));
return;
}
@@ -577,6 +597,8 @@ void __init xen_hvm_init_time_ops(void)
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
x86_platform.set_wallclock = xen_set_wallclock;
hvm_time_initialized = true;
}
#endif

View File

@@ -8,19 +8,19 @@
reg = <0x00000000 0x08000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
partition@0 {
label = "data";
reg = <0x00000000 0x06000000>;
};
partition@0x6000000 {
partition@6000000 {
label = "boot loader area";
reg = <0x06000000 0x00800000>;
};
partition@0x6800000 {
partition@6800000 {
label = "kernel image";
reg = <0x06800000 0x017e0000>;
};
partition@0x7fe0000 {
partition@7fe0000 {
label = "boot environment";
reg = <0x07fe0000 0x00020000>;
};

View File

@@ -8,19 +8,19 @@
reg = <0x08000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
partition@0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
};
partition@0x400000 {
partition@400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
partition@0xa00000 {
partition@a00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
partition@0xfe0000 {
partition@fe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};

View File

@@ -8,11 +8,11 @@
reg = <0x08000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
partition@0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
};
partition@0x3f0000 {
partition@3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};

View File

@@ -145,7 +145,11 @@ struct sata_dwc_device {
#endif
};
#define SATA_DWC_QCMD_MAX 32
/*
* Allow one extra special slot for commands and DMA management
* to account for libata internal commands.
*/
#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
struct sata_dwc_device_port {
struct sata_dwc_device *hsdev;

View File

@@ -120,7 +120,11 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
mc146818_get_time(&time);
if (mc146818_get_time(&time) < 0) {
pr_err("Unable to read current time from RTC\n");
return 0;
}
pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)

View File

@@ -1642,22 +1642,22 @@ struct sib_info {
};
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
extern void notify_resource_state(struct sk_buff *,
extern int notify_resource_state(struct sk_buff *,
unsigned int,
struct drbd_resource *,
struct resource_info *,
enum drbd_notification_type);
extern void notify_device_state(struct sk_buff *,
extern int notify_device_state(struct sk_buff *,
unsigned int,
struct drbd_device *,
struct device_info *,
enum drbd_notification_type);
extern void notify_connection_state(struct sk_buff *,
extern int notify_connection_state(struct sk_buff *,
unsigned int,
struct drbd_connection *,
struct connection_info *,
enum drbd_notification_type);
extern void notify_peer_device_state(struct sk_buff *,
extern int notify_peer_device_state(struct sk_buff *,
unsigned int,
struct drbd_peer_device *,
struct peer_device_info *,

View File

@@ -4617,7 +4617,7 @@ static int nla_put_notification_header(struct sk_buff *msg,
return drbd_notification_header_to_skb(msg, &nh, true);
}
void notify_resource_state(struct sk_buff *skb,
int notify_resource_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource *resource,
struct resource_info *resource_info,
@@ -4659,16 +4659,17 @@ void notify_resource_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
return;
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
void notify_device_state(struct sk_buff *skb,
int notify_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_device *device,
struct device_info *device_info,
@@ -4708,16 +4709,17 @@ void notify_device_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
return;
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
void notify_connection_state(struct sk_buff *skb,
int notify_connection_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection *connection,
struct connection_info *connection_info,
@@ -4757,16 +4759,17 @@ void notify_connection_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
return;
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
void notify_peer_device_state(struct sk_buff *skb,
int notify_peer_device_state(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device *peer_device,
struct peer_device_info *peer_device_info,
@@ -4807,13 +4810,14 @@ void notify_peer_device_state(struct sk_buff *skb,
if (err && err != -ESRCH)
goto failed;
}
return;
return 0;
nla_put_failure:
nlmsg_free(skb);
failed:
drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
err, seq);
return err;
}
void notify_helper(enum drbd_notification_type type,
@@ -4864,7 +4868,7 @@ fail:
err, seq);
}
static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
{
struct drbd_genlmsghdr *dh;
int err;
@@ -4878,11 +4882,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
if (nla_put_notification_header(skb, NOTIFY_EXISTS))
goto nla_put_failure;
genlmsg_end(skb, dh);
return;
return 0;
nla_put_failure:
nlmsg_free(skb);
pr_err("Error %d sending event. Event seq:%u\n", err, seq);
return err;
}
static void free_state_changes(struct list_head *list)
@@ -4909,6 +4914,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int seq = cb->args[2];
unsigned int n;
enum drbd_notification_type flags = 0;
int err = 0;
/* There is no need for taking notification_mutex here: it doesn't
matter if the initial state events mix with later state chage
@@ -4917,32 +4923,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[5]--;
if (cb->args[5] == 1) {
notify_initial_state_done(skb, seq);
err = notify_initial_state_done(skb, seq);
goto out;
}
n = cb->args[4]++;
if (cb->args[4] < cb->args[3])
flags |= NOTIFY_CONTINUES;
if (n < 1) {
notify_resource_state_change(skb, seq, state_change->resource,
err = notify_resource_state_change(skb, seq, state_change->resource,
NOTIFY_EXISTS | flags);
goto next;
}
n--;
if (n < state_change->n_connections) {
notify_connection_state_change(skb, seq, &state_change->connections[n],
err = notify_connection_state_change(skb, seq, &state_change->connections[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_connections;
if (n < state_change->n_devices) {
notify_device_state_change(skb, seq, &state_change->devices[n],
err = notify_device_state_change(skb, seq, &state_change->devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
n -= state_change->n_devices;
if (n < state_change->n_devices * state_change->n_connections) {
notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
NOTIFY_EXISTS | flags);
goto next;
}
@@ -4957,6 +4963,9 @@ next:
cb->args[4] = 0;
}
out:
if (err)
return err;
else
return skb->len;
}

View File

@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
return rv;
}
void notify_resource_state_change(struct sk_buff *skb,
int notify_resource_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_resource_state_change *resource_state_change,
enum drbd_notification_type type)
@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb,
.res_susp_fen = resource_state_change->susp_fen[NEW],
};
notify_resource_state(skb, seq, resource, &resource_info, type);
return notify_resource_state(skb, seq, resource, &resource_info, type);
}
void notify_connection_state_change(struct sk_buff *skb,
int notify_connection_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_connection_state_change *connection_state_change,
enum drbd_notification_type type)
@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb,
.conn_role = connection_state_change->peer_role[NEW],
};
notify_connection_state(skb, seq, connection, &connection_info, type);
return notify_connection_state(skb, seq, connection, &connection_info, type);
}
void notify_device_state_change(struct sk_buff *skb,
int notify_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_device_state_change *device_state_change,
enum drbd_notification_type type)
@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb,
.dev_disk_state = device_state_change->disk_state[NEW],
};
notify_device_state(skb, seq, device, &device_info, type);
return notify_device_state(skb, seq, device, &device_info, type);
}
void notify_peer_device_state_change(struct sk_buff *skb,
int notify_peer_device_state_change(struct sk_buff *skb,
unsigned int seq,
struct drbd_peer_device_state_change *p,
enum drbd_notification_type type)
@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb,
.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
};
notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
}
static void broadcast_state_change(struct drbd_state_change *state_change)
@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
bool resource_state_has_changed;
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
void (*last_func)(struct sk_buff *, unsigned int, void *,
int (*last_func)(struct sk_buff *, unsigned int, void *,
enum drbd_notification_type) = NULL;
void *last_arg = NULL;

View File

@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_
extern void copy_old_to_new_state_change(struct drbd_state_change *);
extern void forget_state_change(struct drbd_state_change *);
extern void notify_resource_state_change(struct sk_buff *,
extern int notify_resource_state_change(struct sk_buff *,
unsigned int,
struct drbd_resource_state_change *,
enum drbd_notification_type type);
extern void notify_connection_state_change(struct sk_buff *,
extern int notify_connection_state_change(struct sk_buff *,
unsigned int,
struct drbd_connection_state_change *,
enum drbd_notification_type type);
extern void notify_device_state_change(struct sk_buff *,
extern int notify_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_device_state_change *,
enum drbd_notification_type type);
extern void notify_peer_device_state_change(struct sk_buff *,
extern int notify_peer_device_state_change(struct sk_buff *,
unsigned int,
struct drbd_peer_device_state_change *,
enum drbd_notification_type type);

View File

@@ -254,7 +254,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, nbd->index);
mutex_unlock(&nbd_index_mutex);
destroy_workqueue(nbd->recv_workq);
kfree(nbd);
}
@@ -1260,10 +1260,6 @@ static void nbd_config_put(struct nbd_device *nbd)
kfree(nbd->config);
nbd->config = NULL;
if (nbd->recv_workq)
destroy_workqueue(nbd->recv_workq);
nbd->recv_workq = NULL;
nbd->tag_set.timeout = 0;
nbd->disk->queue->limits.discard_granularity = 0;
nbd->disk->queue->limits.discard_alignment = 0;
@@ -1292,14 +1288,6 @@ static int nbd_start_device(struct nbd_device *nbd)
return -EINVAL;
}
nbd->recv_workq = alloc_workqueue("knbd%d-recv",
WQ_MEM_RECLAIM | WQ_HIGHPRI |
WQ_UNBOUND, 0, nbd->index);
if (!nbd->recv_workq) {
dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
return -ENOMEM;
}
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
nbd->pid = task_pid_nr(current);
@@ -1725,6 +1713,15 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
}
nbd->disk = disk;
nbd->recv_workq = alloc_workqueue("nbd%d-recv",
WQ_MEM_RECLAIM | WQ_HIGHPRI |
WQ_UNBOUND, 0, nbd->index);
if (!nbd->recv_workq) {
dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
err = -ENOMEM;
goto out_err_disk;
}
/*
* Tell the block layer that we are not a rotational device
*/
@@ -1755,14 +1752,16 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
disk->first_minor = index << part_shift;
if (disk->first_minor < index || disk->first_minor > MINORMASK) {
err = -EINVAL;
goto out_free_idr;
goto out_free_work;
}
disk->minors = 1 << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
add_disk(disk);
err = add_disk(disk);
if (err)
goto out_free_work;
/*
* Now publish the device.
@@ -1771,6 +1770,10 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd_total_devices++;
return nbd;
out_free_work:
destroy_workqueue(nbd->recv_workq);
out_err_disk:
blk_cleanup_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
@@ -2024,12 +2027,9 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
nbd_disconnect(nbd);
sock_shutdown(nbd);
/*
* Make sure recv thread has finished, so it does not drop the last
* config ref and try to destroy the workqueue from inside the work
* queue. And this also ensure that we can safely call nbd_clear_que()
* Make sure recv thread has finished, we can safely call nbd_clear_que()
* to cancel the inflight I/Os.
*/
if (nbd->recv_workq)
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;

View File

@@ -2245,7 +2245,7 @@ static struct virtio_driver virtio_rproc_serial = {
.remove = virtcons_remove,
};
static int __init init(void)
static int __init virtio_console_init(void)
{
int err;
@@ -2280,7 +2280,7 @@ free:
return err;
}
static void __exit fini(void)
static void __exit virtio_console_fini(void)
{
reclaim_dma_bufs();
@@ -2290,8 +2290,8 @@ static void __exit fini(void)
class_destroy(pdrvdata.class);
debugfs_remove_recursive(pdrvdata.debugfs_dir);
}
module_init(init);
module_exit(fini);
module_init(virtio_console_init);
module_exit(virtio_console_fini);
MODULE_DESCRIPTION("Virtio console driver");
MODULE_LICENSE("GPL");

View File

@@ -798,6 +798,15 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw,
u32 r_divider;
u8 r[3];
err = regmap_read(output->data->regmap,
SI5341_OUT_CONFIG(output), &val);
if (err < 0)
return err;
/* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */
if (val & SI5341_OUT_CFG_RDIV_FORCE2)
return parent_rate / 2;
err = regmap_bulk_read(output->data->regmap,
SI5341_OUT_R_REG(output), r, 3);
if (err < 0)
@@ -814,13 +823,6 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw,
r_divider += 1;
r_divider <<= 1;
err = regmap_read(output->data->regmap,
SI5341_OUT_CONFIG(output), &val);
if (err < 0)
return err;
if (val & SI5341_OUT_CFG_RDIV_FORCE2)
r_divider = 2;
return parent_rate / r_divider;
}

View File

@@ -633,6 +633,24 @@ static void clk_core_get_boundaries(struct clk_core *core,
*max_rate = min(*max_rate, clk_user->max_rate);
}
static bool clk_core_check_boundaries(struct clk_core *core,
unsigned long min_rate,
unsigned long max_rate)
{
struct clk *user;
lockdep_assert_held(&prepare_lock);
if (min_rate > core->max_rate || max_rate < core->min_rate)
return false;
hlist_for_each_entry(user, &core->clks, clks_node)
if (min_rate > user->max_rate || max_rate < user->min_rate)
return false;
return true;
}
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate)
{
@@ -2426,6 +2444,11 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
clk->min_rate = min;
clk->max_rate = max;
if (!clk_core_check_boundaries(clk->core, min, max)) {
ret = -EINVAL;
goto out;
}
rate = clk_core_get_rate_nolock(clk->core);
if (rate < min || rate > max) {
/*
@@ -2454,6 +2477,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
}
}
out:
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);

View File

@@ -1038,13 +1038,13 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
RK3568_CLKGATE_CON(20), 8, GFLAGS),
GATE(HCLK_VOP, "hclk_vop", "hclk_vo", 0,
RK3568_CLKGATE_CON(20), 9, GFLAGS),
COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
RK3568_CLKSEL_CON(39), 10, 2, MFLAGS, 0, 8, DFLAGS,
RK3568_CLKGATE_CON(20), 10, GFLAGS),
COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
RK3568_CLKSEL_CON(40), 10, 2, MFLAGS, 0, 8, DFLAGS,
RK3568_CLKGATE_CON(20), 11, GFLAGS),
COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, 0,
COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
RK3568_CLKSEL_CON(41), 10, 2, MFLAGS, 0, 8, DFLAGS,
RK3568_CLKGATE_CON(20), 12, GFLAGS),
GATE(CLK_VOP_PWM, "clk_vop_pwm", "xin24m", 0,

View File

@@ -131,7 +131,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
{
struct ti_dt_clk *c;
struct device_node *node, *parent;
struct device_node *node, *parent, *child;
struct clk *clk;
struct of_phandle_args clkspec;
char buf[64];
@@ -171,10 +171,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
node = of_find_node_by_name(NULL, buf);
if (num_args && compat_mode) {
parent = node;
node = of_get_child_by_name(parent, "clock");
if (!node)
node = of_get_child_by_name(parent, "clk");
child = of_get_child_by_name(parent, "clock");
if (!child)
child = of_get_child_by_name(parent, "clk");
if (child) {
of_node_put(parent);
node = child;
}
}
clkspec.np = node;

View File

@@ -303,52 +303,48 @@ static u64 cppc_get_dmi_max_khz(void)
/*
* If CPPC lowest_freq and nominal_freq registers are exposed then we can
* use them to convert perf to freq and vice versa
*
* If the perf/freq point lies between Nominal and Lowest, we can treat
* (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
* and extrapolate the rest
* For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
* use them to convert perf to freq and vice versa. The conversion is
* extrapolated as an affine function passing by the 2 points:
* - (Low perf, Low freq)
* - (Nominal perf, Nominal perf)
*/
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
unsigned int perf)
{
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
s64 retval, offset = 0;
static u64 max_khz;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
if (perf >= caps->nominal_perf) {
mul = caps->nominal_freq;
div = caps->nominal_perf;
} else {
mul = caps->nominal_freq - caps->lowest_freq;
div = caps->nominal_perf - caps->lowest_perf;
}
offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
mul = max_khz;
div = caps->highest_perf;
}
return (u64)perf * mul / div;
retval = offset + div64_u64(perf * mul, div);
if (retval >= 0)
return retval;
return 0;
}
static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
unsigned int freq)
{
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
s64 retval, offset = 0;
static u64 max_khz;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
if (freq >= caps->nominal_freq) {
mul = caps->nominal_perf;
div = caps->nominal_freq;
} else {
mul = caps->lowest_perf;
div = caps->lowest_freq;
}
mul = caps->nominal_perf - caps->lowest_perf;
div = caps->nominal_freq - caps->lowest_freq;
offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
@@ -356,7 +352,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
div = max_khz;
}
return (u64)freq * mul / div;
retval = offset + div64_u64(freq * mul, div);
if (retval >= 0)
return retval;
return 0;
}
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,

View File

@@ -115,10 +115,8 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
if (ret < 0) {
if (ret < 0)
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
pm_runtime_put(schan->dev);
}
pm_runtime_barrier(schan->dev);

View File

@@ -1368,6 +1368,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* Avoid race condition with other code, which tries to lookup
* an IRQ before the irqchip has been properly registered,
* i.e. while gpiochip is still being brought up.
*/
if (!gc->irq.initialized)
return -EPROBE_DEFER;
#endif
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -1552,6 +1562,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
acpi_gpiochip_request_interrupts(gc);
/*
* Using barrier() here to prevent compiler from reordering
* gc->irq.initialized before initialization of above
* GPIO chip irq members.
*/
barrier();
gc->irq.initialized = true;
return 0;
}

View File

@@ -1508,6 +1508,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
return 0;
default:
dma_fence_put(fence);
return -EINVAL;
}
}

View File

@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
while (queue_bit-- >= 0) {
while (--queue_bit >= 0) {
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;

View File

@@ -1343,7 +1343,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
dma_resv_lock(bo->base.resv, NULL);
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
return;
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
if (!WARN_ON(r)) {

View File

@@ -601,8 +601,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)

View File

@@ -1807,13 +1807,9 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
if (!args->start_addr || !args->size)
return -EINVAL;
mutex_lock(&p->mutex);
r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
args->attrs);
mutex_unlock(&p->mutex);
return r;
}
#else

View File

@@ -1563,7 +1563,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
/* Fetch the CRAT table from ACPI */
status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
if (status == AE_NOT_FOUND) {
pr_warn("CRAT table not found\n");
pr_info("CRAT table not found\n");
return -ENODATA;
} else if (ACPI_FAILURE(status)) {
const char *err = acpi_format_exception(status);

View File

@@ -270,15 +270,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
return ret;
}
ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
O_RDWR);
if (ret < 0) {
kfifo_free(&client->fifo);
kfree(client);
return ret;
}
*fd = ret;
init_waitqueue_head(&client->wait_queue);
spin_lock_init(&client->lock);
client->events = 0;
@@ -288,5 +279,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
list_add_rcu(&client->list, &dev->smi_clients);
spin_unlock(&dev->smi_lock);
ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
O_RDWR);
if (ret < 0) {
spin_lock(&dev->smi_lock);
list_del_rcu(&client->list);
spin_unlock(&dev->smi_lock);
synchronize_rcu();
kfifo_free(&client->fifo);
kfree(client);
return ret;
}
*fd = ret;
return 0;
}

View File

@@ -3522,7 +3522,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
max - min);
}
static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
@@ -3550,7 +3550,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
return rc ? 0 : 1;
if (rc)
dm->actual_brightness[bl_idx] = user_brightness;
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
@@ -9316,7 +9317,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* restore the backlight level */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i] &&
(amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
(dm->actual_brightness[i] != dm->brightness[i]))
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
#endif

View File

@@ -446,6 +446,12 @@ struct amdgpu_display_manager {
* cached backlight values.
*/
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
/**
* @actual_brightness:
*
* last successfully applied backlight values.
*/
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
};
enum dsc_clock_force_state {

View File

@@ -229,8 +229,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -388,8 +390,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
break;
r = put_user((*(rd_buf + result)), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -1316,8 +1320,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -1333,8 +1339,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -1503,8 +1511,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -1520,8 +1530,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -1688,8 +1700,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -1705,8 +1719,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -1869,8 +1885,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -1886,8 +1904,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -2045,8 +2065,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -2062,8 +2084,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -2102,8 +2126,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -2119,8 +2145,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -2174,8 +2202,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -2191,8 +2221,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -2246,8 +2278,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
break;
}
if (!pipe_ctx)
if (!pipe_ctx) {
kfree(rd_buf);
return -ENXIO;
}
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
@@ -2263,8 +2297,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
@@ -3254,8 +3290,10 @@ static ssize_t dcc_en_bits_read(
dc->hwss.get_dcc_en_bits(dc, dcc_en_bits);
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
if (!rd_buf)
if (!rd_buf) {
kfree(dcc_en_bits);
return -ENOMEM;
}
for (i = 0; i < num_pipes; i++)
offset += snprintf(rd_buf + offset, rd_buf_size - offset,
@@ -3268,8 +3306,10 @@ static ssize_t dcc_en_bits_read(
if (*pos >= rd_buf_size)
break;
r = put_user(*(rd_buf + result), buf);
if (r)
if (r) {
kfree(rd_buf);
return r; /* r = -EFAULT */
}
buf += 1;
size -= 1;
*pos += 1;

View File

@@ -74,10 +74,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
link = stream->link;
psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
if (psr_config.psr_version > 0) {
psr_config.psr_exit_link_training_required = 0x1;
if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
psr_config.psr_version = link->psr_settings.psr_version;
psr_config.psr_frame_capture_indication_req = 0;
psr_config.psr_rfb_setup_time = 0x37;
psr_config.psr_sdp_transmit_line_num_deadline = 0x20;

View File

@@ -1599,6 +1599,9 @@ static bool are_stream_backends_same(
if (is_timing_changed(stream_a, stream_b))
return false;
if (stream_a->signal != stream_b->signal)
return false;
if (stream_a->dpms_off != stream_b->dpms_off)
return false;

View File

@@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@@ -1045,6 +1045,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
/* Don't use baco for reset in S3.
* This is a workaround for some platforms
* where entering BACO during suspend
* seems to cause reboots or hangs.
* This might be related to the fact that BACO controls
* power to the whole GPU including devices like audio and USB.
* Powering down/up everything may adversely affect these other
* devices. Needs more investigation.
*/
if (adev->in_s3)
return false;
if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
return false;

View File

@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
(data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,

View File

@@ -861,18 +861,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
drm_mode_debug_printmodeline(adjusted_mode);
pm_runtime_get_sync(dev);
if (pm_runtime_resume_and_get(dev) < 0)
return;
if (clk_prepare_enable(dsi->lcdif_clk) < 0)
return;
goto runtime_put;
if (clk_prepare_enable(dsi->core_clk) < 0)
return;
goto runtime_put;
/* Step 1 from DSI reset-out instructions */
ret = reset_control_deassert(dsi->rst_pclk);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
return;
goto runtime_put;
}
/* Step 2 from DSI reset-out instructions */
@@ -882,13 +883,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
ret = reset_control_deassert(dsi->rst_esc);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
return;
goto runtime_put;
}
ret = reset_control_deassert(dsi->rst_byte);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
return;
goto runtime_put;
}
return;
runtime_put:
pm_runtime_put_sync(dev);
}
static void

View File

@@ -166,6 +166,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
},
.driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* GPD Win Max */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* GPD Pocket, note that the the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"

View File

@@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
struct imx_hdmi *hdmi;
int ret;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
@@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
hdmi->bridge = of_drm_find_bridge(np);
if (!hdmi->bridge) {
dev_err(hdmi->dev, "Unable to find bridge\n");
dw_hdmi_remove(hdmi->hdmi);
return -ENODEV;
}
return component_add(&pdev->dev, &dw_hdmi_imx_ops);
ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
if (ret)
dw_hdmi_remove(hdmi->hdmi);
return ret;
}
static int dw_hdmi_imx_remove(struct platform_device *pdev)

View File

@@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev,
edidp = of_get_property(child, "edid", &edid_len);
if (edidp) {
channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
if (!channel->edid)
return -ENOMEM;
} else if (!channel->panel) {
/* fallback to display-timings node */
ret = of_get_drm_display_mode(child,

View File

@@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret)
if (ret) {
drm_mode_destroy(connector->dev, mode);
return ret;
}
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;

View File

@@ -1909,7 +1909,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* do not autoenable, will be enabled later */
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
"dsi_isr", msm_host);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",

View File

@@ -216,6 +216,7 @@ gm20b_pmu = {
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
.reset = gf100_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)

View File

@@ -23,7 +23,7 @@
*/
#include "priv.h"
static void
void
gp102_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;

View File

@@ -83,6 +83,7 @@ gp10b_pmu = {
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
.reset = gp102_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)

View File

@@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
bool gf100_pmu_enabled(struct nvkm_pmu *);
void gf100_pmu_reset(struct nvkm_pmu *);
void gp102_pmu_reset(struct nvkm_pmu *pmu);
void gk110_pmu_pgob(struct nvkm_pmu *, bool);

View File

@@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
int ret;
vcc = devm_regulator_get_optional(dev, "vcc");
if (IS_ERR(vcc))
if (IS_ERR(vcc)) {
dev_err(dev, "get optional vcc failed\n");
vcc = NULL;
}
dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
struct mipi_dbi_dev, drm);

View File

@@ -625,7 +625,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (!render->base.perfmon) {
ret = -ENOENT;
goto fail;
goto fail_perfmon;
}
}
@@ -678,6 +678,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
fail_perfmon:
drm_gem_unlock_reservations(last_job->bo,
last_job->bo_count, &acquire_ctx);
fail:
@@ -854,7 +855,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
args->perfmon_id);
if (!job->base.perfmon) {
ret = -ENOENT;
goto fail;
goto fail_perfmon;
}
}
@@ -886,6 +887,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
fail_perfmon:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:

View File

@@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
* execute:
*
* (a) In the "normal (i.e., not resuming from hibernation)" path,
* the full barrier in smp_store_mb() guarantees that the store
* the full barrier in virt_store_mb() guarantees that the store
* is propagated to all CPUs before the add_channel_work work
* is queued. In turn, add_channel_work is queued before the
* channel's ring buffer is allocated/initialized and the
@@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
* recv_int_page before retrieving the channel pointer from the
* array of channels.
*
* (b) In the "resuming from hibernation" path, the smp_store_mb()
* (b) In the "resuming from hibernation" path, the virt_store_mb()
* guarantees that the store is propagated to all CPUs before
* the VMBus connection is marked as ready for the resume event
* (cf. check_ready_for_resume_event()). The interrupt handler
* of the VMBus driver and vmbus_chan_sched() can not run before
* vmbus_bus_resume() has completed execution (cf. resume_noirq).
*/
smp_store_mb(
virt_store_mb(
vmbus_connection.channels[channel->offermsg.child_relid],
channel);
}

View File

@@ -2776,9 +2776,14 @@ static void __exit vmbus_exit(void)
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
kmsg_dump_unregister(&hv_kmsg_dumper);
unregister_die_notifier(&hyperv_die_block);
}
/*
* The panic notifier is always registered, hence we should
* also unconditionally unregister it here as well.
*/
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_block);
}
free_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);

View File

@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->msg);
break;
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
[CM_DREQ_COUNTER]);

View File

@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
unsigned long flags;
struct list_head del_list;
/* Prevent freeing of mm until we are completely finished. */
mmgrab(handler->mn.mm);
/* Unregister first so we don't get any more notifications. */
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
do_remove(handler, &del_list);
/* Now the mm may be freed. */
mmdrop(handler->mn.mm);
kfree(handler);
}

View File

@@ -536,8 +536,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
spin_lock_irq(&ent->lock);
if (ent->disabled)
goto out;
if (need_delay)
if (need_delay) {
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
goto out;
}
remove_cache_mr_locked(ent);
queue_adjust_cache_locked(ent);
}
@@ -633,6 +635,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
WRITE_ONCE(dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;

View File

@@ -3190,7 +3190,11 @@ serr_no_r_lock:
spin_lock_irqsave(&sqp->s_lock, flags);
rvt_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
int lastwqe;
spin_lock(&sqp->r_lock);
lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
spin_unlock(&sqp->r_lock);
sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);

View File

@@ -1552,6 +1552,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
dev_info(smmu->dev, "\t0x%016llx\n",
(unsigned long long)evt[i]);
cond_resched();
}
/*

View File

@@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
sizeof(phandle));
if (num_iommus < 0)
return 0;
return ERR_PTR(-ENODEV);
arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)

Some files were not shown because too many files have changed in this diff Show More