Merge 5.15.25 into android13-5.15
Changes in 5.15.25 drm/nouveau/pmu/gm200-: use alternate falcon reset sequence fs/proc: task_mmu.c: don't read mapcount for migration entry btrfs: zoned: cache reported zone during mount scsi: lpfc: Fix mailbox command failure during driver initialization HID:Add support for UGTABLET WP5540 Revert "svm: Add warning message for AVIC IPI invalid target" parisc: Show error if wrong 32/64-bit compiler is being used serial: parisc: GSC: fix build when IOSAPIC is not set parisc: Drop __init from map_pages declaration parisc: Fix data TLB miss in sba_unmap_sg parisc: Fix sglist access in ccio-dma.c mmc: block: fix read single on recovery logic mm: don't try to NUMA-migrate COW pages that have other uses HID: amd_sfh: Add illuminance mask to limit ALS max value HID: i2c-hid: goodix: Fix a lockdep splat HID: amd_sfh: Increase sensor command timeout HID: amd_sfh: Correct the structure field name PCI: hv: Fix NUMA node assignment when kernel boots with custom NUMA topology parisc: Add ioread64_lo_hi() and iowrite64_lo_hi() btrfs: send: in case of IO error log it platform/x86: touchscreen_dmi: Add info for the RWC NANOTE P8 AY07J 2-in-1 platform/x86: ISST: Fix possible circular locking dependency detected kunit: tool: Import missing importlib.abc selftests: rtc: Increase test timeout so that all tests run kselftest: signal all child processes net: ieee802154: at86rf230: Stop leaking skb's selftests/zram: Skip max_comp_streams interface on newer kernel selftests/zram01.sh: Fix compression ratio calculation selftests/zram: Adapt the situation that /dev/zram0 is being used selftests: openat2: Print also errno in failure messages selftests: openat2: Add missing dependency in Makefile selftests: openat2: Skip testcases that fail with EOPNOTSUPP selftests: skip mincore.check_file_mmap when fs lacks needed support ax25: improve the incomplete fix to avoid UAF and NPD bugs pinctrl: bcm63xx: fix unmet dependency on REGMAP for GPIO_REGMAP vfs: make freeze_super abort when sync_filesystem returns error quota: make dquot_quota_sync return errors from ->sync_fs scsi: pm80xx: Fix double completion for SATA devices kselftest: Fix vdso_test_abi return status scsi: core: Reallocate device's budget map on queue depth change scsi: pm8001: Fix use-after-free for aborted TMF sas_task scsi: pm8001: Fix use-after-free for aborted SSP/STP sas_task drm/amd: Warn users about potential s0ix problems nvme: fix a possible use-after-free in controller reset during load nvme-tcp: fix possible use-after-free in transport error_recovery work nvme-rdma: fix possible use-after-free in transport error_recovery work net: sparx5: do not refer to skb after passing it on drm/amd: add support to check whether the system is set to s3 drm/amd: Only run s3 or s0ix if system is configured properly drm/amdgpu: fix logic inversion in check x86/Xen: streamline (and fix) PV CPU enumeration Revert "module, async: async_synchronize_full() on module init iff async is used" gcc-plugins/stackleak: Use noinstr in favor of notrace random: wake up /dev/random writers after zap KVM: x86/xen: Fix runstate updates to be atomic when preempting vCPU KVM: x86: nSVM/nVMX: set nested_run_pending on VM entry which is a result of RSM KVM: x86: SVM: don't passthrough SMAP/SMEP/PKE bits in !NPT && !gCR0.PG case KVM: x86: nSVM: fix potential NULL derefernce on nested migration KVM: x86: nSVM: mark vmcb01 as dirty when restoring SMM saved state iwlwifi: fix use-after-free drm/radeon: Fix backlight control on iMac 12,1 drm/atomic: Don't pollute crtc_state->mode_blob with error pointers drm/amd/pm: correct the sequence of sending gpu reset msg drm/amdgpu: skipping SDMA hw_init and hw_fini for S0ix. drm/i915/opregion: check port number bounds for SWSCI display power state drm/i915: Fix dbuf slice config lookup drm/i915: Fix mbus join config lookup vsock: remove vsock from connected table when connect is interrupted by a signal drm/cma-helper: Set VM_DONTEXPAND for mmap drm/i915/gvt: Make DRM_I915_GVT depend on X86 drm/i915/ttm: tweak priority hint selection iwlwifi: pcie: fix locking when "HW not ready" iwlwifi: pcie: gen2: fix locking when "HW not ready" iwlwifi: mvm: don't send SAR GEO command for 3160 devices selftests: netfilter: fix exit value for nft_concat_range netfilter: nft_synproxy: unregister hooks on init error path selftests: netfilter: disable rp_filter on router ipv4: fix data races in fib_alias_hw_flags_set ipv6: fix data-race in fib6_info_hw_flags_set / fib6_purge_rt ipv6: mcast: use rcu-safe version of ipv6_get_lladdr() ipv6: per-netns exclusive flowlabel checks Revert "net: ethernet: bgmac: Use devm_platform_ioremap_resource_byname" mac80211: mlme: check for null after calling kmemdup brcmfmac: firmware: Fix crash in brcm_alt_fw_path cfg80211: fix race in netlink owner interface destruction net: dsa: lan9303: fix reset on probe net: dsa: mv88e6xxx: flush switchdev FDB workqueue before removing VLAN net: dsa: lantiq_gswip: fix use after free in gswip_remove() net: dsa: lan9303: handle hwaccel VLAN tags net: dsa: lan9303: add VLAN IDs to master device net: ieee802154: ca8210: Fix lifs/sifs periods ping: fix the dif and sdif check in ping_lookup bonding: force carrier update when releasing slave drop_monitor: fix data-race in dropmon_net_event / trace_napi_poll_hit net_sched: add __rcu annotation to netdev->qdisc bonding: fix data-races around agg_select_timer libsubcmd: Fix use-after-free for realloc(..., 0) net/smc: Avoid overwriting the copies of clcsock callback functions net: phy: mediatek: remove PHY mode check on MT7531 atl1c: fix tx timeout after link flap on Mikrotik 10/25G NIC tipc: fix wrong publisher node address in link publications dpaa2-switch: fix default return of dpaa2_switch_flower_parse_mirror_key dpaa2-eth: Initialize mutex used in one step timestamping path net: bridge: multicast: notify switchdev driver whenever MC processing gets disabled perf bpf: Defer freeing string after possible strlen() on it selftests/exec: Add non-regular to TEST_GEN_PROGS arm64: Correct wrong label in macro __init_el2_gicv3 ALSA: usb-audio: revert to IMPLICIT_FB_FIXED_DEV for M-Audio FastTrack Ultra ALSA: hda/realtek: Add quirk for Legion Y9000X 2019 ALSA: hda/realtek: Fix deadlock by COEF mutex ALSA: hda: Fix regression on forced probe mask option ALSA: hda: Fix missing codec probe on Shenker Dock 15 ASoC: ops: Fix stereo change notifications in snd_soc_put_volsw() ASoC: ops: Fix stereo change notifications in snd_soc_put_volsw_range() ASoC: ops: Fix stereo change notifications in snd_soc_put_volsw_sx() ASoC: ops: Fix stereo change notifications in snd_soc_put_xr_sx() cifs: fix set of group SID via NTSD xattrs powerpc/603: Fix boot failure with DEBUG_PAGEALLOC and KFENCE powerpc/lib/sstep: fix 'ptesync' build error mtd: rawnand: gpmi: don't leak PM reference in error path smb3: fix snapshot mount option tipc: fix wrong notification node addresses scsi: ufs: Remove dead code scsi: ufs: Fix a deadlock in the error handler ASoC: tas2770: Insert post reset delay ASoC: qcom: Actually clear DMA interrupt register for HDMI block/wbt: fix negative inflight counter when remove scsi device NFS: Remove an incorrect revalidation in nfs4_update_changeattr_locked() NFS: LOOKUP_DIRECTORY is also ok with symlinks NFS: Do not report writeback errors in nfs_getattr() tty: n_tty: do not look ahead for EOL character past the end of the buffer block: fix surprise removal for drivers calling blk_set_queue_dying mtd: rawnand: qcom: Fix clock sequencing in qcom_nandc_probe() mtd: parsers: qcom: Fix kernel panic on skipped partition mtd: parsers: qcom: Fix missing free for pparts in cleanup mtd: phram: Prevent divide by zero bug in phram_setup() mtd: rawnand: brcmnand: Fixed incorrect sub-page ECC status HID: elo: fix memory leak in elo_probe mtd: rawnand: ingenic: Fix missing put_device in ingenic_ecc_get Drivers: hv: vmbus: Fix memory leak in vmbus_add_channel_kobj KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id() KVM: x86/pmu: Don't truncate the PerfEvtSeln MSR when creating a perf event KVM: x86/pmu: Use AMD64_RAW_EVENT_MASK for PERF_TYPE_RAW ARM: OMAP2+: hwmod: Add of_node_put() before break ARM: OMAP2+: adjust the location of put_device() call in omapdss_init_of phy: usb: Leave some clocks running during suspend staging: vc04_services: Fix RCU dereference check phy: phy-mtk-tphy: Fix duplicated argument in phy-mtk-tphy irqchip/sifive-plic: Add missing thead,c900-plic match string x86/bug: Merge annotate_reachable() into _BUG_FLAGS() asm netfilter: conntrack: don't refresh sctp entries in closed state ksmbd: fix same UniqueId for dot and dotdot entries ksmbd: don't align last entry offset in smb2 query directory arm64: dts: meson-gx: add ATF BL32 reserved-memory region arm64: dts: meson-g12: add ATF BL32 reserved-memory region arm64: dts: meson-g12: drop BL32 region from SEI510/SEI610 pidfd: fix test failure due to stack overflow on some arches selftests: fixup build warnings in pidfd / clone3 tests mm: io_uring: allow oom-killer from io_uring_setup ACPI: PM: Revert "Only mark EC GPE for wakeup on Intel systems" kconfig: let 'shell' return enough output for deep path names ata: libata-core: Disable TRIM on M88V29 soc: aspeed: lpc-ctrl: Block error printing on probe defer cases xprtrdma: fix pointer derefs in error cases of rpcrdma_ep_create drm/rockchip: dw_hdmi: Do not leave clock enabled in error case tracing: Fix tp_printk option related with tp_printk_stop_on_boot display/amd: decrease message verbosity about watermarks table failure drm/amd/display: Cap pflip irqs per max otg number drm/amd/display: fix yellow carp wm clamping net: usb: qmi_wwan: Add support for Dell DW5829e net: macb: Align the dma and coherent dma masks kconfig: fix failing to generate auto.conf scsi: lpfc: Fix pt2pt NVMe PRLI reject LOGO loop EDAC: Fix calculation of returned address and next offset in edac_align_ptr() ucounts: Handle wrapping in is_ucounts_overlimit ucounts: In set_cred_ucounts assume new->ucounts is non-NULL ucounts: Base set_cred_ucounts changes on the real user ucounts: Enforce RLIMIT_NPROC not RLIMIT_NPROC+1 lib/iov_iter: initialize "flags" in new pipe_buffer rlimit: Fix RLIMIT_NPROC enforcement failure caused by capability calls in set_user ucounts: Move RLIMIT_NPROC handling after set_user net: sched: limit TC_ACT_REPEAT loops dmaengine: sh: rcar-dmac: Check for error num after setting mask dmaengine: stm32-dmamux: Fix PM disable depth imbalance in stm32_dmamux_probe dmaengine: sh: rcar-dmac: Check for error num after dma_set_max_seg_size tests: fix idmapped mount_setattr test i2c: qcom-cci: don't delete an unregistered adapter i2c: qcom-cci: don't put a device tree node before i2c_add_adapter() dmaengine: ptdma: Fix the error handling path in pt_core_init() copy_process(): Move fd_install() out of sighand->siglock critical section scsi: qedi: Fix ABBA deadlock in qedi_process_tmf_resp() and qedi_process_cmd_cleanup_resp() ice: enable parsing IPSEC SPI headers for RSS i2c: brcmstb: fix support for DSL and CM variants lockdep: Correct lock_classes index mapping Linux 5.15.25 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ib129a0e11f5e82d67563329a5de1b0aef1d87928
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 15
|
PATCHLEVEL = 15
|
||||||
SUBLEVEL = 24
|
SUBLEVEL = 25
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Trick or Treat
|
NAME = Trick or Treat
|
||||||
|
|
||||||
|
|||||||
@@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
||||||
|
put_device(&pdev->dev);
|
||||||
if (r) {
|
if (r) {
|
||||||
pr_err("Unable to populate DSS submodule devices\n");
|
pr_err("Unable to populate DSS submodule devices\n");
|
||||||
put_device(&pdev->dev);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -752,9 +752,11 @@ static int __init _init_clkctrl_providers(void)
|
|||||||
|
|
||||||
for_each_matching_node(np, ti_clkctrl_match_table) {
|
for_each_matching_node(np, ti_clkctrl_match_table) {
|
||||||
ret = _setup_clkctrl_provider(np);
|
ret = _setup_clkctrl_provider(np);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
of_node_put(np);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -107,6 +107,12 @@
|
|||||||
no-map;
|
no-map;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* 32 MiB reserved for ARM Trusted Firmware (BL32) */
|
||||||
|
secmon_reserved_bl32: secmon@5300000 {
|
||||||
|
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||||
|
no-map;
|
||||||
|
};
|
||||||
|
|
||||||
linux,cma {
|
linux,cma {
|
||||||
compatible = "shared-dma-pool";
|
compatible = "shared-dma-pool";
|
||||||
reusable;
|
reusable;
|
||||||
|
|||||||
@@ -157,14 +157,6 @@
|
|||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
};
|
};
|
||||||
|
|
||||||
reserved-memory {
|
|
||||||
/* TEE Reserved Memory */
|
|
||||||
bl32_reserved: bl32@5000000 {
|
|
||||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
|
||||||
no-map;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
sdio_pwrseq: sdio-pwrseq {
|
sdio_pwrseq: sdio-pwrseq {
|
||||||
compatible = "mmc-pwrseq-simple";
|
compatible = "mmc-pwrseq-simple";
|
||||||
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
||||||
|
|||||||
@@ -49,6 +49,12 @@
|
|||||||
no-map;
|
no-map;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* 32 MiB reserved for ARM Trusted Firmware (BL32) */
|
||||||
|
secmon_reserved_bl32: secmon@5300000 {
|
||||||
|
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||||
|
no-map;
|
||||||
|
};
|
||||||
|
|
||||||
linux,cma {
|
linux,cma {
|
||||||
compatible = "shared-dma-pool";
|
compatible = "shared-dma-pool";
|
||||||
reusable;
|
reusable;
|
||||||
|
|||||||
@@ -203,14 +203,6 @@
|
|||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
};
|
};
|
||||||
|
|
||||||
reserved-memory {
|
|
||||||
/* TEE Reserved Memory */
|
|
||||||
bl32_reserved: bl32@5000000 {
|
|
||||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
|
||||||
no-map;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
sdio_pwrseq: sdio-pwrseq {
|
sdio_pwrseq: sdio-pwrseq {
|
||||||
compatible = "mmc-pwrseq-simple";
|
compatible = "mmc-pwrseq-simple";
|
||||||
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
||||||
|
|||||||
@@ -106,7 +106,7 @@
|
|||||||
msr_s SYS_ICC_SRE_EL2, x0
|
msr_s SYS_ICC_SRE_EL2, x0
|
||||||
isb // Make sure SRE is now set
|
isb // Make sure SRE is now set
|
||||||
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
|
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
|
||||||
tbz x0, #0, 1f // and check that it sticks
|
tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
|
||||||
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
||||||
.Lskip_gicv3_\@:
|
.Lskip_gicv3_\@:
|
||||||
.endm
|
.endm
|
||||||
|
|||||||
@@ -12,6 +12,14 @@
|
|||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
|
|
||||||
|
/* compiler build environment sanity checks: */
|
||||||
|
#if !defined(CONFIG_64BIT) && defined(__LP64__)
|
||||||
|
#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
|
||||||
|
#endif
|
||||||
|
#if defined(CONFIG_64BIT) && !defined(__LP64__)
|
||||||
|
#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
|
||||||
|
#endif
|
||||||
|
|
||||||
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
|
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
|
||||||
* on use of volatile and __*_bit() (set/clear/change):
|
* on use of volatile and __*_bit() (set/clear/change):
|
||||||
* *_bit() want use of volatile.
|
* *_bit() want use of volatile.
|
||||||
|
|||||||
@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
|
|||||||
return *((u64 *)addr);
|
return *((u64 *)addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 ioread64_lo_hi(const void __iomem *addr)
|
||||||
|
{
|
||||||
|
u32 low, high;
|
||||||
|
|
||||||
|
low = ioread32(addr);
|
||||||
|
high = ioread32(addr + sizeof(u32));
|
||||||
|
|
||||||
|
return low + ((u64)high << 32);
|
||||||
|
}
|
||||||
|
|
||||||
u64 ioread64_hi_lo(const void __iomem *addr)
|
u64 ioread64_hi_lo(const void __iomem *addr)
|
||||||
{
|
{
|
||||||
u32 low, high;
|
u32 low, high;
|
||||||
@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void iowrite64_lo_hi(u64 val, void __iomem *addr)
|
||||||
|
{
|
||||||
|
iowrite32(val, addr);
|
||||||
|
iowrite32(val >> 32, addr + sizeof(u32));
|
||||||
|
}
|
||||||
|
|
||||||
void iowrite64_hi_lo(u64 val, void __iomem *addr)
|
void iowrite64_hi_lo(u64 val, void __iomem *addr)
|
||||||
{
|
{
|
||||||
iowrite32(val >> 32, addr + sizeof(u32));
|
iowrite32(val >> 32, addr + sizeof(u32));
|
||||||
@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
|
|||||||
EXPORT_SYMBOL(ioread32be);
|
EXPORT_SYMBOL(ioread32be);
|
||||||
EXPORT_SYMBOL(ioread64);
|
EXPORT_SYMBOL(ioread64);
|
||||||
EXPORT_SYMBOL(ioread64be);
|
EXPORT_SYMBOL(ioread64be);
|
||||||
|
EXPORT_SYMBOL(ioread64_lo_hi);
|
||||||
EXPORT_SYMBOL(ioread64_hi_lo);
|
EXPORT_SYMBOL(ioread64_hi_lo);
|
||||||
EXPORT_SYMBOL(iowrite8);
|
EXPORT_SYMBOL(iowrite8);
|
||||||
EXPORT_SYMBOL(iowrite16);
|
EXPORT_SYMBOL(iowrite16);
|
||||||
@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
|
|||||||
EXPORT_SYMBOL(iowrite32be);
|
EXPORT_SYMBOL(iowrite32be);
|
||||||
EXPORT_SYMBOL(iowrite64);
|
EXPORT_SYMBOL(iowrite64);
|
||||||
EXPORT_SYMBOL(iowrite64be);
|
EXPORT_SYMBOL(iowrite64be);
|
||||||
|
EXPORT_SYMBOL(iowrite64_lo_hi);
|
||||||
EXPORT_SYMBOL(iowrite64_hi_lo);
|
EXPORT_SYMBOL(iowrite64_hi_lo);
|
||||||
EXPORT_SYMBOL(ioread8_rep);
|
EXPORT_SYMBOL(ioread8_rep);
|
||||||
EXPORT_SYMBOL(ioread16_rep);
|
EXPORT_SYMBOL(ioread16_rep);
|
||||||
|
|||||||
@@ -341,7 +341,7 @@ static void __init setup_bootmem(void)
|
|||||||
|
|
||||||
static bool kernel_set_to_readonly;
|
static bool kernel_set_to_readonly;
|
||||||
|
|
||||||
static void __init map_pages(unsigned long start_vaddr,
|
static void __ref map_pages(unsigned long start_vaddr,
|
||||||
unsigned long start_paddr, unsigned long size,
|
unsigned long start_paddr, unsigned long size,
|
||||||
pgprot_t pgprot, int force)
|
pgprot_t pgprot, int force)
|
||||||
{
|
{
|
||||||
@@ -453,7 +453,7 @@ void __init set_kernel_text_rw(int enable_read_write)
|
|||||||
flush_tlb_all();
|
flush_tlb_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __ref free_initmem(void)
|
void free_initmem(void)
|
||||||
{
|
{
|
||||||
unsigned long init_begin = (unsigned long)__init_begin;
|
unsigned long init_begin = (unsigned long)__init_begin;
|
||||||
unsigned long init_end = (unsigned long)__init_end;
|
unsigned long init_end = (unsigned long)__init_end;
|
||||||
@@ -467,7 +467,6 @@ void __ref free_initmem(void)
|
|||||||
/* The init text pages are marked R-X. We have to
|
/* The init text pages are marked R-X. We have to
|
||||||
* flush the icache and mark them RW-
|
* flush the icache and mark them RW-
|
||||||
*
|
*
|
||||||
* This is tricky, because map_pages is in the init section.
|
|
||||||
* Do a dummy remap of the data section first (the data
|
* Do a dummy remap of the data section first (the data
|
||||||
* section is already PAGE_KERNEL) to pull in the TLB entries
|
* section is already PAGE_KERNEL) to pull in the TLB entries
|
||||||
* for map_kernel */
|
* for map_kernel */
|
||||||
|
|||||||
@@ -421,14 +421,14 @@ InstructionTLBMiss:
|
|||||||
*/
|
*/
|
||||||
/* Get PTE (linux-style) and check access */
|
/* Get PTE (linux-style) and check access */
|
||||||
mfspr r3,SPRN_IMISS
|
mfspr r3,SPRN_IMISS
|
||||||
#ifdef CONFIG_MODULES
|
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||||
cmplw 0,r1,r3
|
cmplw 0,r1,r3
|
||||||
#endif
|
#endif
|
||||||
mfspr r2, SPRN_SDR1
|
mfspr r2, SPRN_SDR1
|
||||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
|
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
|
||||||
rlwinm r2, r2, 28, 0xfffff000
|
rlwinm r2, r2, 28, 0xfffff000
|
||||||
#ifdef CONFIG_MODULES
|
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||||
bgt- 112f
|
bgt- 112f
|
||||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||||
|
|||||||
@@ -3181,12 +3181,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
|
|||||||
case BARRIER_EIEIO:
|
case BARRIER_EIEIO:
|
||||||
eieio();
|
eieio();
|
||||||
break;
|
break;
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
case BARRIER_LWSYNC:
|
case BARRIER_LWSYNC:
|
||||||
asm volatile("lwsync" : : : "memory");
|
asm volatile("lwsync" : : : "memory");
|
||||||
break;
|
break;
|
||||||
case BARRIER_PTESYNC:
|
case BARRIER_PTESYNC:
|
||||||
asm volatile("ptesync" : : : "memory");
|
asm volatile("ptesync" : : : "memory");
|
||||||
break;
|
break;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||||
|
|
||||||
#define _BUG_FLAGS(ins, flags) \
|
#define _BUG_FLAGS(ins, flags, extra) \
|
||||||
do { \
|
do { \
|
||||||
asm_inline volatile("1:\t" ins "\n" \
|
asm_inline volatile("1:\t" ins "\n" \
|
||||||
".pushsection __bug_table,\"aw\"\n" \
|
".pushsection __bug_table,\"aw\"\n" \
|
||||||
@@ -31,7 +31,8 @@ do { \
|
|||||||
"\t.word %c1" "\t# bug_entry::line\n" \
|
"\t.word %c1" "\t# bug_entry::line\n" \
|
||||||
"\t.word %c2" "\t# bug_entry::flags\n" \
|
"\t.word %c2" "\t# bug_entry::flags\n" \
|
||||||
"\t.org 2b+%c3\n" \
|
"\t.org 2b+%c3\n" \
|
||||||
".popsection" \
|
".popsection\n" \
|
||||||
|
extra \
|
||||||
: : "i" (__FILE__), "i" (__LINE__), \
|
: : "i" (__FILE__), "i" (__LINE__), \
|
||||||
"i" (flags), \
|
"i" (flags), \
|
||||||
"i" (sizeof(struct bug_entry))); \
|
"i" (sizeof(struct bug_entry))); \
|
||||||
@@ -39,14 +40,15 @@ do { \
|
|||||||
|
|
||||||
#else /* !CONFIG_DEBUG_BUGVERBOSE */
|
#else /* !CONFIG_DEBUG_BUGVERBOSE */
|
||||||
|
|
||||||
#define _BUG_FLAGS(ins, flags) \
|
#define _BUG_FLAGS(ins, flags, extra) \
|
||||||
do { \
|
do { \
|
||||||
asm_inline volatile("1:\t" ins "\n" \
|
asm_inline volatile("1:\t" ins "\n" \
|
||||||
".pushsection __bug_table,\"aw\"\n" \
|
".pushsection __bug_table,\"aw\"\n" \
|
||||||
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
|
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
|
||||||
"\t.word %c0" "\t# bug_entry::flags\n" \
|
"\t.word %c0" "\t# bug_entry::flags\n" \
|
||||||
"\t.org 2b+%c1\n" \
|
"\t.org 2b+%c1\n" \
|
||||||
".popsection" \
|
".popsection\n" \
|
||||||
|
extra \
|
||||||
: : "i" (flags), \
|
: : "i" (flags), \
|
||||||
"i" (sizeof(struct bug_entry))); \
|
"i" (sizeof(struct bug_entry))); \
|
||||||
} while (0)
|
} while (0)
|
||||||
@@ -55,7 +57,7 @@ do { \
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define _BUG_FLAGS(ins, flags) asm volatile(ins)
|
#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins)
|
||||||
|
|
||||||
#endif /* CONFIG_GENERIC_BUG */
|
#endif /* CONFIG_GENERIC_BUG */
|
||||||
|
|
||||||
@@ -63,8 +65,8 @@ do { \
|
|||||||
#define BUG() \
|
#define BUG() \
|
||||||
do { \
|
do { \
|
||||||
instrumentation_begin(); \
|
instrumentation_begin(); \
|
||||||
_BUG_FLAGS(ASM_UD2, 0); \
|
_BUG_FLAGS(ASM_UD2, 0, ""); \
|
||||||
unreachable(); \
|
__builtin_unreachable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -75,9 +77,9 @@ do { \
|
|||||||
*/
|
*/
|
||||||
#define __WARN_FLAGS(flags) \
|
#define __WARN_FLAGS(flags) \
|
||||||
do { \
|
do { \
|
||||||
|
__auto_type f = BUGFLAG_WARNING|(flags); \
|
||||||
instrumentation_begin(); \
|
instrumentation_begin(); \
|
||||||
_BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
|
_BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
|
||||||
annotate_reachable(); \
|
|
||||||
instrumentation_end(); \
|
instrumentation_end(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
|
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
|
||||||
unsigned config, bool exclude_user,
|
u64 config, bool exclude_user,
|
||||||
bool exclude_kernel, bool intr,
|
bool exclude_kernel, bool intr,
|
||||||
bool in_tx, bool in_tx_cp)
|
bool in_tx, bool in_tx_cp)
|
||||||
{
|
{
|
||||||
@@ -173,8 +173,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
|
|||||||
|
|
||||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||||
{
|
{
|
||||||
unsigned config, type = PERF_TYPE_RAW;
|
u64 config;
|
||||||
u8 event_select, unit_mask;
|
u32 type = PERF_TYPE_RAW;
|
||||||
struct kvm *kvm = pmc->vcpu->kvm;
|
struct kvm *kvm = pmc->vcpu->kvm;
|
||||||
struct kvm_pmu_event_filter *filter;
|
struct kvm_pmu_event_filter *filter;
|
||||||
int i;
|
int i;
|
||||||
@@ -206,23 +206,18 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
|||||||
if (!allow_event)
|
if (!allow_event)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
|
||||||
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
|
||||||
|
|
||||||
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
|
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
|
||||||
ARCH_PERFMON_EVENTSEL_INV |
|
ARCH_PERFMON_EVENTSEL_INV |
|
||||||
ARCH_PERFMON_EVENTSEL_CMASK |
|
ARCH_PERFMON_EVENTSEL_CMASK |
|
||||||
HSW_IN_TX |
|
HSW_IN_TX |
|
||||||
HSW_IN_TX_CHECKPOINTED))) {
|
HSW_IN_TX_CHECKPOINTED))) {
|
||||||
config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
|
config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
|
||||||
event_select,
|
|
||||||
unit_mask);
|
|
||||||
if (config != PERF_COUNT_HW_MAX)
|
if (config != PERF_COUNT_HW_MAX)
|
||||||
type = PERF_TYPE_HARDWARE;
|
type = PERF_TYPE_HARDWARE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == PERF_TYPE_RAW)
|
if (type == PERF_TYPE_RAW)
|
||||||
config = eventsel & X86_RAW_EVENT_MASK;
|
config = eventsel & AMD64_RAW_EVENT_MASK;
|
||||||
|
|
||||||
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
|
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_pmu_ops {
|
struct kvm_pmu_ops {
|
||||||
unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
|
unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
|
||||||
u8 unit_mask);
|
|
||||||
unsigned (*find_fixed_event)(int idx);
|
unsigned (*find_fixed_event)(int idx);
|
||||||
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
|
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
|
||||||
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
|
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
|
||||||
|
|||||||
@@ -342,8 +342,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
|
|||||||
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
|
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
|
||||||
break;
|
break;
|
||||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||||
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
|
|
||||||
index, vcpu->vcpu_id, icrh, icrl);
|
|
||||||
break;
|
break;
|
||||||
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
||||||
WARN_ONCE(1, "Invalid backing page\n");
|
WARN_ONCE(1, "Invalid backing page\n");
|
||||||
|
|||||||
@@ -1357,18 +1357,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
|||||||
!nested_vmcb_valid_sregs(vcpu, save))
|
!nested_vmcb_valid_sregs(vcpu, save))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
/*
|
|
||||||
* While the nested guest CR3 is already checked and set by
|
|
||||||
* KVM_SET_SREGS, it was set when nested state was yet loaded,
|
|
||||||
* thus MMU might not be initialized correctly.
|
|
||||||
* Set it again to fix this.
|
|
||||||
*/
|
|
||||||
|
|
||||||
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
|
||||||
nested_npt_enabled(svm), false);
|
|
||||||
if (WARN_ON_ONCE(ret))
|
|
||||||
goto out_free;
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All checks done, we can enter guest mode. Userspace provides
|
* All checks done, we can enter guest mode. Userspace provides
|
||||||
@@ -1394,6 +1382,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
|||||||
|
|
||||||
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
||||||
nested_vmcb02_prepare_control(svm);
|
nested_vmcb02_prepare_control(svm);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* While the nested guest CR3 is already checked and set by
|
||||||
|
* KVM_SET_SREGS, it was set when nested state was yet loaded,
|
||||||
|
* thus MMU might not be initialized correctly.
|
||||||
|
* Set it again to fix this.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
||||||
|
nested_npt_enabled(svm), false);
|
||||||
|
if (WARN_ON_ONCE(ret))
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
|
|
||||||
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out_free:
|
out_free:
|
||||||
|
|||||||
@@ -134,10 +134,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
|||||||
return &pmu->gp_counters[msr_to_index(msr)];
|
return &pmu->gp_counters[msr_to_index(msr)];
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
|
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
|
||||||
u8 event_select,
|
|
||||||
u8 unit_mask)
|
|
||||||
{
|
{
|
||||||
|
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||||
|
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
|
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
|
||||||
@@ -320,7 +320,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_pmu_ops amd_pmu_ops = {
|
struct kvm_pmu_ops amd_pmu_ops = {
|
||||||
.find_arch_event = amd_find_arch_event,
|
.pmc_perf_hw_id = amd_pmc_perf_hw_id,
|
||||||
.find_fixed_event = amd_find_fixed_event,
|
.find_fixed_event = amd_find_fixed_event,
|
||||||
.pmc_is_enabled = amd_pmc_is_enabled,
|
.pmc_is_enabled = amd_pmc_is_enabled,
|
||||||
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
|
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
|
||||||
|
|||||||
@@ -1727,6 +1727,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|||||||
{
|
{
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
u64 hcr0 = cr0;
|
u64 hcr0 = cr0;
|
||||||
|
bool old_paging = is_paging(vcpu);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
|
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
|
||||||
@@ -1743,8 +1744,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|||||||
#endif
|
#endif
|
||||||
vcpu->arch.cr0 = cr0;
|
vcpu->arch.cr0 = cr0;
|
||||||
|
|
||||||
if (!npt_enabled)
|
if (!npt_enabled) {
|
||||||
hcr0 |= X86_CR0_PG | X86_CR0_WP;
|
hcr0 |= X86_CR0_PG | X86_CR0_WP;
|
||||||
|
if (old_paging != is_paging(vcpu))
|
||||||
|
svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* re-enable caching here because the QEMU bios
|
* re-enable caching here because the QEMU bios
|
||||||
@@ -1788,8 +1792,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
svm_flush_tlb(vcpu);
|
svm_flush_tlb(vcpu);
|
||||||
|
|
||||||
vcpu->arch.cr4 = cr4;
|
vcpu->arch.cr4 = cr4;
|
||||||
if (!npt_enabled)
|
if (!npt_enabled) {
|
||||||
cr4 |= X86_CR4_PAE;
|
cr4 |= X86_CR4_PAE;
|
||||||
|
|
||||||
|
if (!is_paging(vcpu))
|
||||||
|
cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
|
||||||
|
}
|
||||||
cr4 |= host_cr4_mce;
|
cr4 |= host_cr4_mce;
|
||||||
to_svm(vcpu)->vmcb->save.cr4 = cr4;
|
to_svm(vcpu)->vmcb->save.cr4 = cr4;
|
||||||
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
|
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
|
||||||
@@ -4384,10 +4392,17 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
|||||||
* Enter the nested guest now
|
* Enter the nested guest now
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
vmcb_mark_all_dirty(svm->vmcb01.ptr);
|
||||||
|
|
||||||
vmcb12 = map.hva;
|
vmcb12 = map.hva;
|
||||||
nested_load_control_from_vmcb12(svm, &vmcb12->control);
|
nested_load_control_from_vmcb12(svm, &vmcb12->control);
|
||||||
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
|
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
goto unmap_save;
|
||||||
|
|
||||||
|
svm->nested.nested_run_pending = 1;
|
||||||
|
|
||||||
unmap_save:
|
unmap_save:
|
||||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||||
unmap_map:
|
unmap_map:
|
||||||
|
|||||||
@@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
|
|||||||
reprogram_counter(pmu, bit);
|
reprogram_counter(pmu, bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
|
static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
|
||||||
u8 event_select,
|
|
||||||
u8 unit_mask)
|
|
||||||
{
|
{
|
||||||
|
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
||||||
|
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||||
|
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
|
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
|
||||||
@@ -706,7 +707,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_pmu_ops intel_pmu_ops = {
|
struct kvm_pmu_ops intel_pmu_ops = {
|
||||||
.find_arch_event = intel_find_arch_event,
|
.pmc_perf_hw_id = intel_pmc_perf_hw_id,
|
||||||
.find_fixed_event = intel_find_fixed_event,
|
.find_fixed_event = intel_find_fixed_event,
|
||||||
.pmc_is_enabled = intel_pmc_is_enabled,
|
.pmc_is_enabled = intel_pmc_is_enabled,
|
||||||
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
|
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
|
||||||
|
|||||||
@@ -7532,6 +7532,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
vmx->nested.nested_run_pending = 1;
|
||||||
vmx->nested.smm.guest_mode = false;
|
vmx->nested.smm.guest_mode = false;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -93,32 +93,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
|
|||||||
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_xen *vx = &v->arch.xen;
|
struct kvm_vcpu_xen *vx = &v->arch.xen;
|
||||||
|
struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
|
||||||
|
struct kvm_memslots *slots = kvm_memslots(v->kvm);
|
||||||
|
bool atomic = (state == RUNSTATE_runnable);
|
||||||
uint64_t state_entry_time;
|
uint64_t state_entry_time;
|
||||||
unsigned int offset;
|
int __user *user_state;
|
||||||
|
uint64_t __user *user_times;
|
||||||
|
|
||||||
kvm_xen_update_runstate(v, state);
|
kvm_xen_update_runstate(v, state);
|
||||||
|
|
||||||
if (!vx->runstate_set)
|
if (!vx->runstate_set)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
|
||||||
|
kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* We made sure it fits in a single page */
|
||||||
|
BUG_ON(!ghc->memslot);
|
||||||
|
|
||||||
|
if (atomic)
|
||||||
|
pagefault_disable();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The only difference between 32-bit and 64-bit versions of the
|
||||||
|
* runstate struct us the alignment of uint64_t in 32-bit, which
|
||||||
|
* means that the 64-bit version has an additional 4 bytes of
|
||||||
|
* padding after the first field 'state'.
|
||||||
|
*
|
||||||
|
* So we use 'int __user *user_state' to point to the state field,
|
||||||
|
* and 'uint64_t __user *user_times' for runstate_entry_time. So
|
||||||
|
* the actual array of time[] in each state starts at user_times[1].
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
|
||||||
|
BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
|
||||||
|
user_state = (int __user *)ghc->hva;
|
||||||
|
|
||||||
BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
|
BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
|
||||||
|
|
||||||
offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
|
user_times = (uint64_t __user *)(ghc->hva +
|
||||||
|
offsetof(struct compat_vcpu_runstate_info,
|
||||||
|
state_entry_time));
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
/*
|
|
||||||
* The only difference is alignment of uint64_t in 32-bit.
|
|
||||||
* So the first field 'state' is accessed directly using
|
|
||||||
* offsetof() (where its offset happens to be zero), while the
|
|
||||||
* remaining fields which are all uint64_t, start at 'offset'
|
|
||||||
* which we tweak here by adding 4.
|
|
||||||
*/
|
|
||||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
|
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
|
||||||
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
|
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
|
||||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
|
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
|
||||||
offsetof(struct compat_vcpu_runstate_info, time) + 4);
|
offsetof(struct compat_vcpu_runstate_info, time) + 4);
|
||||||
|
|
||||||
if (v->kvm->arch.xen.long_mode)
|
if (v->kvm->arch.xen.long_mode)
|
||||||
offset = offsetof(struct vcpu_runstate_info, state_entry_time);
|
user_times = (uint64_t __user *)(ghc->hva +
|
||||||
|
offsetof(struct vcpu_runstate_info,
|
||||||
|
state_entry_time));
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* First write the updated state_entry_time at the appropriate
|
* First write the updated state_entry_time at the appropriate
|
||||||
@@ -132,10 +157,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
|||||||
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
|
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
|
||||||
sizeof(state_entry_time));
|
sizeof(state_entry_time));
|
||||||
|
|
||||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
if (__put_user(state_entry_time, user_times))
|
||||||
&state_entry_time, offset,
|
goto out;
|
||||||
sizeof(state_entry_time)))
|
|
||||||
return;
|
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -149,11 +172,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
|||||||
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
|
BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
|
||||||
sizeof(vx->current_runstate));
|
sizeof(vx->current_runstate));
|
||||||
|
|
||||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
if (__put_user(vx->current_runstate, user_state))
|
||||||
&vx->current_runstate,
|
goto out;
|
||||||
offsetof(struct vcpu_runstate_info, state),
|
|
||||||
sizeof(vx->current_runstate)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write the actual runstate times immediately after the
|
* Write the actual runstate times immediately after the
|
||||||
@@ -168,24 +188,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
|||||||
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
|
BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
|
||||||
sizeof(vx->runstate_times));
|
sizeof(vx->runstate_times));
|
||||||
|
|
||||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
|
||||||
&vx->runstate_times[0],
|
goto out;
|
||||||
offset + sizeof(u64),
|
|
||||||
sizeof(vx->runstate_times)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
|
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
|
||||||
* runstate_entry_time field.
|
* runstate_entry_time field.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
|
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
|
||||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
__put_user(state_entry_time, user_times);
|
||||||
&state_entry_time, offset,
|
smp_wmb();
|
||||||
sizeof(state_entry_time)))
|
|
||||||
return;
|
out:
|
||||||
|
mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
if (atomic)
|
||||||
|
pagefault_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||||
@@ -337,6 +356,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* It must fit within a single page */
|
||||||
|
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
|
||||||
|
r = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||||
&vcpu->arch.xen.vcpu_info_cache,
|
&vcpu->arch.xen.vcpu_info_cache,
|
||||||
data->u.gpa,
|
data->u.gpa,
|
||||||
@@ -354,6 +379,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* It must fit within a single page */
|
||||||
|
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
|
||||||
|
r = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||||
&vcpu->arch.xen.vcpu_time_info_cache,
|
&vcpu->arch.xen.vcpu_time_info_cache,
|
||||||
data->u.gpa,
|
data->u.gpa,
|
||||||
@@ -375,6 +406,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* It must fit within a single page */
|
||||||
|
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
|
||||||
|
r = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||||
&vcpu->arch.xen.runstate_cache,
|
&vcpu->arch.xen.runstate_cache,
|
||||||
data->u.gpa,
|
data->u.gpa,
|
||||||
|
|||||||
@@ -1364,10 +1364,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||||||
|
|
||||||
xen_acpi_sleep_register();
|
xen_acpi_sleep_register();
|
||||||
|
|
||||||
/* Avoid searching for BIOS MP tables */
|
|
||||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
|
||||||
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
|
|
||||||
|
|
||||||
xen_boot_params_init_edd();
|
xen_boot_params_init_edd();
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
|
|||||||
@@ -148,28 +148,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init xen_fill_possible_map(void)
|
static void __init _get_smp_config(unsigned int early)
|
||||||
{
|
|
||||||
int i, rc;
|
|
||||||
|
|
||||||
if (xen_initial_domain())
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (i = 0; i < nr_cpu_ids; i++) {
|
|
||||||
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
|
||||||
if (rc >= 0) {
|
|
||||||
num_processors++;
|
|
||||||
set_cpu_possible(i, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init xen_filter_cpu_maps(void)
|
|
||||||
{
|
{
|
||||||
int i, rc;
|
int i, rc;
|
||||||
unsigned int subtract = 0;
|
unsigned int subtract = 0;
|
||||||
|
|
||||||
if (!xen_initial_domain())
|
if (early)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
num_processors = 0;
|
num_processors = 0;
|
||||||
@@ -210,7 +194,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
|
|||||||
* sure the old memory can be recycled. */
|
* sure the old memory can be recycled. */
|
||||||
make_lowmem_page_readwrite(xen_initial_gdt);
|
make_lowmem_page_readwrite(xen_initial_gdt);
|
||||||
|
|
||||||
xen_filter_cpu_maps();
|
|
||||||
xen_setup_vcpu_info_placement();
|
xen_setup_vcpu_info_placement();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -486,5 +469,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
|
|||||||
void __init xen_smp_init(void)
|
void __init xen_smp_init(void)
|
||||||
{
|
{
|
||||||
smp_ops = xen_smp_ops;
|
smp_ops = xen_smp_ops;
|
||||||
xen_fill_possible_map();
|
|
||||||
|
/* Avoid searching for BIOS MP tables */
|
||||||
|
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||||
|
x86_init.mpparse.get_smp_config = _get_smp_config;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6878,6 +6878,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
|
|||||||
spin_unlock_irq(&bfqd->lock);
|
spin_unlock_irq(&bfqd->lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
wbt_enable_default(bfqd->queue);
|
||||||
|
|
||||||
kfree(bfqd);
|
kfree(bfqd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -350,13 +350,6 @@ void blk_queue_start_drain(struct request_queue *q)
|
|||||||
wake_up_all(&q->mq_freeze_wq);
|
wake_up_all(&q->mq_freeze_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_set_queue_dying(struct request_queue *q)
|
|
||||||
{
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
|
||||||
blk_queue_start_drain(q);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_cleanup_queue - shutdown a request queue
|
* blk_cleanup_queue - shutdown a request queue
|
||||||
* @q: request queue to shutdown
|
* @q: request queue to shutdown
|
||||||
@@ -374,7 +367,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||||||
WARN_ON_ONCE(blk_queue_registered(q));
|
WARN_ON_ONCE(blk_queue_registered(q));
|
||||||
|
|
||||||
/* mark @q DYING, no new request or merges will be allowed afterwards */
|
/* mark @q DYING, no new request or merges will be allowed afterwards */
|
||||||
blk_set_queue_dying(q);
|
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||||
|
blk_queue_start_drain(q);
|
||||||
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||||
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
||||||
|
|||||||
@@ -523,8 +523,6 @@ void elv_unregister_queue(struct request_queue *q)
|
|||||||
kobject_del(&e->kobj);
|
kobject_del(&e->kobj);
|
||||||
|
|
||||||
e->registered = 0;
|
e->registered = 0;
|
||||||
/* Re-enable throttling in case elevator disabled it */
|
|
||||||
wbt_enable_default(q);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -544,6 +544,20 @@ out_free_ext_minor:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(device_add_disk);
|
EXPORT_SYMBOL(device_add_disk);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_mark_disk_dead - mark a disk as dead
|
||||||
|
* @disk: disk to mark as dead
|
||||||
|
*
|
||||||
|
* Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
|
||||||
|
* to this disk.
|
||||||
|
*/
|
||||||
|
void blk_mark_disk_dead(struct gendisk *disk)
|
||||||
|
{
|
||||||
|
set_bit(GD_DEAD, &disk->state);
|
||||||
|
blk_queue_start_drain(disk->queue);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* del_gendisk - remove the gendisk
|
* del_gendisk - remove the gendisk
|
||||||
* @disk: the struct gendisk to remove
|
* @disk: the struct gendisk to remove
|
||||||
|
|||||||
@@ -424,14 +424,10 @@ static int lps0_device_attach(struct acpi_device *adev,
|
|||||||
mem_sleep_current = PM_SUSPEND_TO_IDLE;
|
mem_sleep_current = PM_SUSPEND_TO_IDLE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
|
* Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
|
||||||
* use intel-hid or intel-vbtn but require the EC GPE to be enabled while
|
* EC GPE to be enabled while suspended for certain wakeup devices to
|
||||||
* suspended for certain wakeup devices to work, so mark it as wakeup-capable.
|
* work, so mark it as wakeup-capable.
|
||||||
*
|
|
||||||
* Only enable on !AMD as enabling this universally causes problems for a number
|
|
||||||
* of AMD based systems.
|
|
||||||
*/
|
*/
|
||||||
if (!acpi_s2idle_vendor_amd())
|
|
||||||
acpi_ec_mark_gpe_for_wake();
|
acpi_ec_mark_gpe_for_wake();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -4014,6 +4014,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||||||
|
|
||||||
/* devices that don't properly handle TRIM commands */
|
/* devices that don't properly handle TRIM commands */
|
||||||
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||||
|
{ "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
|
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
|
||||||
|
|||||||
@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
|||||||
"Completion workers still active!\n");
|
"Completion workers still active!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_set_queue_dying(dd->queue);
|
blk_mark_disk_dead(dd->disk);
|
||||||
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
|
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
|
||||||
|
|
||||||
/* Clean up the block layer. */
|
/* Clean up the block layer. */
|
||||||
|
|||||||
@@ -7182,7 +7182,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
|
|||||||
* IO to complete/fail.
|
* IO to complete/fail.
|
||||||
*/
|
*/
|
||||||
blk_mq_freeze_queue(rbd_dev->disk->queue);
|
blk_mq_freeze_queue(rbd_dev->disk->queue);
|
||||||
blk_set_queue_dying(rbd_dev->disk->queue);
|
blk_mark_disk_dead(rbd_dev->disk);
|
||||||
}
|
}
|
||||||
|
|
||||||
del_gendisk(rbd_dev->disk);
|
del_gendisk(rbd_dev->disk);
|
||||||
|
|||||||
@@ -2128,7 +2128,7 @@ static void blkfront_closing(struct blkfront_info *info)
|
|||||||
|
|
||||||
/* No more blkif_request(). */
|
/* No more blkif_request(). */
|
||||||
blk_mq_stop_hw_queues(info->rq);
|
blk_mq_stop_hw_queues(info->rq);
|
||||||
blk_set_queue_dying(info->rq);
|
blk_mark_disk_dead(info->gd);
|
||||||
set_capacity(info->gd, 0);
|
set_capacity(info->gd, 0);
|
||||||
|
|
||||||
for_each_rinfo(info, rinfo, i) {
|
for_each_rinfo(info, rinfo, i) {
|
||||||
|
|||||||
@@ -1963,7 +1963,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
|||||||
*/
|
*/
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
input_pool.entropy_count = 0;
|
if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
|
||||||
|
wake_up_interruptible(&random_write_wait);
|
||||||
|
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
case RNDRESEEDCRNG:
|
case RNDRESEEDCRNG:
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
|||||||
@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
|
|||||||
if (!cmd_q->qbase) {
|
if (!cmd_q->qbase) {
|
||||||
dev_err(dev, "unable to allocate command queue\n");
|
dev_err(dev, "unable to allocate command queue\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto e_dma_alloc;
|
goto e_destroy_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd_q->qidx = 0;
|
cmd_q->qidx = 0;
|
||||||
@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
|
|||||||
|
|
||||||
/* Request an irq */
|
/* Request an irq */
|
||||||
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
|
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
|
||||||
if (ret)
|
if (ret) {
|
||||||
goto e_pool;
|
dev_err(dev, "unable to allocate an IRQ\n");
|
||||||
|
goto e_free_dma;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update the device registers with queue information. */
|
/* Update the device registers with queue information. */
|
||||||
cmd_q->qcontrol &= ~CMD_Q_SIZE;
|
cmd_q->qcontrol &= ~CMD_Q_SIZE;
|
||||||
@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
|
|||||||
/* Register the DMA engine support */
|
/* Register the DMA engine support */
|
||||||
ret = pt_dmaengine_register(pt);
|
ret = pt_dmaengine_register(pt);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto e_dmaengine;
|
goto e_free_irq;
|
||||||
|
|
||||||
/* Set up debugfs entries */
|
/* Set up debugfs entries */
|
||||||
ptdma_debugfs_setup(pt);
|
ptdma_debugfs_setup(pt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
e_dmaengine:
|
e_free_irq:
|
||||||
free_irq(pt->pt_irq, pt);
|
free_irq(pt->pt_irq, pt);
|
||||||
|
|
||||||
e_dma_alloc:
|
e_free_dma:
|
||||||
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
|
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
|
||||||
|
|
||||||
e_pool:
|
e_destroy_pool:
|
||||||
dev_err(dev, "unable to allocate an IRQ\n");
|
|
||||||
dma_pool_destroy(pt->cmd_q.dma_pool);
|
dma_pool_destroy(pt->cmd_q.dma_pool);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
dmac->dev = &pdev->dev;
|
dmac->dev = &pdev->dev;
|
||||||
platform_set_drvdata(pdev, dmac);
|
platform_set_drvdata(pdev, dmac);
|
||||||
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
|
ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
|
||||||
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
|
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
|||||||
@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
|||||||
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
|
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
|
||||||
&stm32_dmamux->dmarouter);
|
&stm32_dmamux->dmarouter);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_clk;
|
goto pm_disable;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
pm_disable:
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
err_clk:
|
err_clk:
|
||||||
clk_disable_unprepare(stm32_dmamux->clk);
|
clk_disable_unprepare(stm32_dmamux->clk);
|
||||||
|
|
||||||
|
|||||||
@@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
|
|||||||
else
|
else
|
||||||
return (char *)ptr;
|
return (char *)ptr;
|
||||||
|
|
||||||
r = (unsigned long)p % align;
|
r = (unsigned long)ptr % align;
|
||||||
|
|
||||||
if (r == 0)
|
if (r == 0)
|
||||||
return (char *)ptr;
|
return (char *)ptr;
|
||||||
|
|||||||
@@ -1397,12 +1397,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
|
|||||||
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
|
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
|
||||||
|
|
||||||
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
|
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
|
||||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
|
||||||
void amdgpu_acpi_detect(void);
|
void amdgpu_acpi_detect(void);
|
||||||
#else
|
#else
|
||||||
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
|
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
|
||||||
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
||||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
|
||||||
static inline void amdgpu_acpi_detect(void) { }
|
static inline void amdgpu_acpi_detect(void) { }
|
||||||
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
|
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
|
||||||
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
|
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
|
||||||
@@ -1411,6 +1409,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
|
|||||||
enum amdgpu_ss ss_state) { return 0; }
|
enum amdgpu_ss ss_state) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
|
||||||
|
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
|
||||||
|
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||||
|
#else
|
||||||
|
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||||
|
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
|
||||||
|
#endif
|
||||||
|
|
||||||
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||||
uint64_t addr, struct amdgpu_bo **bo,
|
uint64_t addr, struct amdgpu_bo **bo,
|
||||||
struct amdgpu_bo_va_mapping **mapping);
|
struct amdgpu_bo_va_mapping **mapping);
|
||||||
|
|||||||
@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_SUSPEND)
|
||||||
|
/**
|
||||||
|
* amdgpu_acpi_is_s3_active
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device_pointer
|
||||||
|
*
|
||||||
|
* returns true if supported, false if not.
|
||||||
|
*/
|
||||||
|
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return !(adev->flags & AMD_IS_APU) ||
|
||||||
|
(pm_suspend_target_state == PM_SUSPEND_MEM);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_acpi_is_s0ix_active
|
* amdgpu_acpi_is_s0ix_active
|
||||||
*
|
*
|
||||||
@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
|
|||||||
*/
|
*/
|
||||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
|
if (!(adev->flags & AMD_IS_APU) ||
|
||||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
|
||||||
if (adev->flags & AMD_IS_APU)
|
return false;
|
||||||
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
|
|
||||||
}
|
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
|
||||||
#endif
|
dev_warn_once(adev->dev,
|
||||||
|
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
|
||||||
|
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !IS_ENABLED(CONFIG_AMD_PMC)
|
||||||
|
dev_warn_once(adev->dev,
|
||||||
|
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
|
||||||
|
return false;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
|
#endif /* CONFIG_AMD_PMC */
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_SUSPEND */
|
||||||
|
|||||||
@@ -1499,6 +1499,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
|
|||||||
static int amdgpu_pmops_prepare(struct device *dev)
|
static int amdgpu_pmops_prepare(struct device *dev)
|
||||||
{
|
{
|
||||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||||
|
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||||
|
|
||||||
/* Return a positive number here so
|
/* Return a positive number here so
|
||||||
* DPM_FLAG_SMART_SUSPEND works properly
|
* DPM_FLAG_SMART_SUSPEND works properly
|
||||||
@@ -1506,6 +1507,13 @@ static int amdgpu_pmops_prepare(struct device *dev)
|
|||||||
if (amdgpu_device_supports_boco(drm_dev))
|
if (amdgpu_device_supports_boco(drm_dev))
|
||||||
return pm_runtime_suspended(dev);
|
return pm_runtime_suspended(dev);
|
||||||
|
|
||||||
|
/* if we will not support s3 or s2i for the device
|
||||||
|
* then skip suspend
|
||||||
|
*/
|
||||||
|
if (!amdgpu_acpi_is_s0ix_active(adev) &&
|
||||||
|
!amdgpu_acpi_is_s3_active(adev))
|
||||||
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1892,7 +1892,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (direct_submit && !ring->sched.ready) {
|
if (!direct_submit && !ring->sched.ready) {
|
||||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2062,6 +2062,10 @@ static int sdma_v4_0_suspend(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
/* SMU saves SDMA state for us */
|
||||||
|
if (adev->in_s0ix)
|
||||||
|
return 0;
|
||||||
|
|
||||||
return sdma_v4_0_hw_fini(adev);
|
return sdma_v4_0_hw_fini(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2069,6 +2073,10 @@ static int sdma_v4_0_resume(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
/* SMU restores SDMA state for us */
|
||||||
|
if (adev->in_s0ix)
|
||||||
|
return 0;
|
||||||
|
|
||||||
return sdma_v4_0_hw_init(adev);
|
return sdma_v4_0_hw_init(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3230,7 +3230,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* Use GRPH_PFLIP interrupt */
|
/* Use GRPH_PFLIP interrupt */
|
||||||
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
|
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
|
||||||
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
|
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
|
||||||
i++) {
|
i++) {
|
||||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
|
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|||||||
@@ -120,6 +120,10 @@ int dcn31_smu_send_msg_with_param(
|
|||||||
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||||
|
|
||||||
if (result == VBIOSSMC_Result_Failed) {
|
if (result == VBIOSSMC_Result_Failed) {
|
||||||
|
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
|
||||||
|
param == TABLE_WATERMARKS)
|
||||||
|
DC_LOG_WARNING("Watermarks table not configured properly by SMU");
|
||||||
|
else
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
|
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
|
||||||
return -1;
|
return -1;
|
||||||
|
|||||||
@@ -1118,6 +1118,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
|||||||
|
|
||||||
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
|
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
|
||||||
|
|
||||||
|
dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
|
||||||
|
|
||||||
if (dc->res_pool->dmcu != NULL)
|
if (dc->res_pool->dmcu != NULL)
|
||||||
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -185,6 +185,7 @@ struct dc_caps {
|
|||||||
struct dc_color_caps color;
|
struct dc_color_caps color;
|
||||||
bool vbios_lttpr_aware;
|
bool vbios_lttpr_aware;
|
||||||
bool vbios_lttpr_enable;
|
bool vbios_lttpr_enable;
|
||||||
|
uint32_t max_otg_num;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dc_bug_wa {
|
struct dc_bug_wa {
|
||||||
|
|||||||
@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
|
|||||||
ret_val = wm_ns * refclk_mhz;
|
ret_val = wm_ns * refclk_mhz;
|
||||||
ret_val /= 1000;
|
ret_val /= 1000;
|
||||||
|
|
||||||
if (ret_val > clamp_value)
|
if (ret_val > clamp_value) {
|
||||||
|
/* clamping WMs is abnormal, unexpected and may lead to underflow*/
|
||||||
|
ASSERT(0);
|
||||||
ret_val = clamp_value;
|
ret_val = clamp_value;
|
||||||
|
}
|
||||||
|
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
|
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
|
||||||
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
|
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
|
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
|
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
|
||||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
|
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
|
||||||
|
|
||||||
@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
|
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
|
||||||
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
|
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
|
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
|
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
|
||||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
|
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
|
||||||
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
|
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
|
||||||
@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
|
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
|
||||||
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
|
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
|
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
|
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
|
||||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
|
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
|
||||||
|
|
||||||
@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
|
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
|
||||||
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
|
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
|
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
|
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
|
||||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
|
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
|
||||||
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
|
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
|
||||||
@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
|
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
|
||||||
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
|
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
|
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
|
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
|
||||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
|
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
|
||||||
|
|
||||||
@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
|
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
|
||||||
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
|
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
|
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
|
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
|
||||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
|
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
|
||||||
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
|
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
|
||||||
@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
|
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
|
||||||
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
|
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
|
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
|
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
|
||||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
|
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
|
||||||
|
|
||||||
@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||||||
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
|
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
|
||||||
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
|
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
|
||||||
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
|
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0x3fff);
|
||||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
|
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
|
||||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
|
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
|
||||||
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
|
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
|
||||||
@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
|
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
|
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
|
||||||
@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->a.cstate_pstate.cstate_exit_ns;
|
watermarks->a.cstate_pstate.cstate_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->a.cstate_pstate.cstate_exit_ns,
|
watermarks->a.cstate_pstate.cstate_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
|
||||||
@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
|
||||||
@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
|
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
|
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
|
||||||
@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
|
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
|
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
|
||||||
@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->b.cstate_pstate.cstate_exit_ns;
|
watermarks->b.cstate_pstate.cstate_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->b.cstate_pstate.cstate_exit_ns,
|
watermarks->b.cstate_pstate.cstate_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
|
||||||
@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
|
||||||
@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
|
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
|
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
|
||||||
@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
|
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
|
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
|
||||||
@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->c.cstate_pstate.cstate_exit_ns;
|
watermarks->c.cstate_pstate.cstate_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->c.cstate_pstate.cstate_exit_ns,
|
watermarks->c.cstate_pstate.cstate_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
|
||||||
@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
|
||||||
@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
|
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
|
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
|
||||||
@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
|
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
|
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
|
||||||
@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->d.cstate_pstate.cstate_exit_ns;
|
watermarks->d.cstate_pstate.cstate_exit_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->d.cstate_pstate.cstate_exit_ns,
|
watermarks->d.cstate_pstate.cstate_exit_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
|
||||||
@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
|
||||||
@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||||||
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
|
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
|
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
|
||||||
@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||||||
watermarks->a.cstate_pstate.pstate_change_ns;
|
watermarks->a.cstate_pstate.pstate_change_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->a.cstate_pstate.pstate_change_ns,
|
watermarks->a.cstate_pstate.pstate_change_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
|
||||||
@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||||||
watermarks->b.cstate_pstate.pstate_change_ns;
|
watermarks->b.cstate_pstate.pstate_change_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->b.cstate_pstate.pstate_change_ns,
|
watermarks->b.cstate_pstate.pstate_change_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
|
||||||
@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||||||
watermarks->c.cstate_pstate.pstate_change_ns;
|
watermarks->c.cstate_pstate.pstate_change_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->c.cstate_pstate.pstate_change_ns,
|
watermarks->c.cstate_pstate.pstate_change_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
|
||||||
@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||||||
watermarks->d.cstate_pstate.pstate_change_ns;
|
watermarks->d.cstate_pstate.pstate_change_ns;
|
||||||
prog_wm_value = convert_and_clamp(
|
prog_wm_value = convert_and_clamp(
|
||||||
watermarks->d.cstate_pstate.pstate_change_ns,
|
watermarks->d.cstate_pstate.pstate_change_ns,
|
||||||
refclk_mhz, 0x1fffff);
|
refclk_mhz, 0xffff);
|
||||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
|
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
|
||||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
|
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
|
||||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
|
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
|
||||||
|
|||||||
@@ -291,14 +291,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
|
|||||||
|
|
||||||
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
|
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
|
||||||
{
|
{
|
||||||
int ret = 0, index = 0;
|
int ret = 0;
|
||||||
|
|
||||||
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
|
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
|
||||||
SMU_MSG_GfxDeviceDriverReset);
|
|
||||||
if (index < 0)
|
|
||||||
return index == -EACCES ? 0 : index;
|
|
||||||
|
|
||||||
ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err(smu->adev->dev, "Failed to mode reset!\n");
|
dev_err(smu->adev->dev, "Failed to mode reset!\n");
|
||||||
|
|
||||||
|
|||||||
@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
|
|||||||
state->mode_blob = NULL;
|
state->mode_blob = NULL;
|
||||||
|
|
||||||
if (mode) {
|
if (mode) {
|
||||||
|
struct drm_property_blob *blob;
|
||||||
|
|
||||||
drm_mode_convert_to_umode(&umode, mode);
|
drm_mode_convert_to_umode(&umode, mode);
|
||||||
state->mode_blob =
|
blob = drm_property_create_blob(crtc->dev,
|
||||||
drm_property_create_blob(state->crtc->dev,
|
sizeof(umode), &umode);
|
||||||
sizeof(umode),
|
if (IS_ERR(blob))
|
||||||
&umode);
|
return PTR_ERR(blob);
|
||||||
if (IS_ERR(state->mode_blob))
|
|
||||||
return PTR_ERR(state->mode_blob);
|
|
||||||
|
|
||||||
drm_mode_copy(&state->mode, mode);
|
drm_mode_copy(&state->mode, mode);
|
||||||
|
|
||||||
|
state->mode_blob = blob;
|
||||||
state->enable = true;
|
state->enable = true;
|
||||||
drm_dbg_atomic(crtc->dev,
|
drm_dbg_atomic(crtc->dev,
|
||||||
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
|
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
|
||||||
|
|||||||
@@ -515,6 +515,7 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
|||||||
*/
|
*/
|
||||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||||
vma->vm_flags &= ~VM_PFNMAP;
|
vma->vm_flags &= ~VM_PFNMAP;
|
||||||
|
vma->vm_flags |= VM_DONTEXPAND;
|
||||||
|
|
||||||
cma_obj = to_drm_gem_cma_obj(obj);
|
cma_obj = to_drm_gem_cma_obj(obj);
|
||||||
|
|
||||||
|
|||||||
@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
|
|||||||
config DRM_I915_GVT
|
config DRM_I915_GVT
|
||||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||||
depends on DRM_I915
|
depends on DRM_I915
|
||||||
|
depends on X86
|
||||||
depends on 64BIT
|
depends on 64BIT
|
||||||
default n
|
default n
|
||||||
help
|
help
|
||||||
|
|||||||
@@ -361,6 +361,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
|||||||
port++;
|
port++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The port numbering and mapping here is bizarre. The now-obsolete
|
||||||
|
* swsci spec supports ports numbered [0..4]. Port E is handled as a
|
||||||
|
* special case, but port F and beyond are not. The functionality is
|
||||||
|
* supposed to be obsolete for new platforms. Just bail out if the port
|
||||||
|
* number is out of bounds after mapping.
|
||||||
|
*/
|
||||||
|
if (port > 4) {
|
||||||
|
drm_dbg_kms(&dev_priv->drm,
|
||||||
|
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
|
||||||
|
intel_encoder->base.base.id, intel_encoder->base.name,
|
||||||
|
port_name(intel_encoder->port), port);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!enable)
|
if (!enable)
|
||||||
parm |= 4 << 8;
|
parm |= 4 << 8;
|
||||||
|
|
||||||
|
|||||||
@@ -759,11 +759,9 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
|
|||||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||||
bo->priority = I915_TTM_PRIO_PURGE;
|
bo->priority = I915_TTM_PRIO_PURGE;
|
||||||
} else if (!i915_gem_object_has_pages(obj)) {
|
} else if (!i915_gem_object_has_pages(obj)) {
|
||||||
if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
|
|
||||||
bo->priority = I915_TTM_PRIO_HAS_PAGES;
|
|
||||||
} else {
|
|
||||||
if (bo->priority > I915_TTM_PRIO_NO_PAGES)
|
|
||||||
bo->priority = I915_TTM_PRIO_NO_PAGES;
|
bo->priority = I915_TTM_PRIO_NO_PAGES;
|
||||||
|
} else {
|
||||||
|
bo->priority = I915_TTM_PRIO_HAS_PAGES;
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
|
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
|
||||||
|
|||||||
@@ -4844,7 +4844,7 @@ static bool check_mbus_joined(u8 active_pipes,
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
|
for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
|
||||||
if (dbuf_slices[i].active_pipes == active_pipes)
|
if (dbuf_slices[i].active_pipes == active_pipes)
|
||||||
return dbuf_slices[i].join_mbus;
|
return dbuf_slices[i].join_mbus;
|
||||||
}
|
}
|
||||||
@@ -4861,7 +4861,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
|
for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
|
||||||
if (dbuf_slices[i].active_pipes == active_pipes &&
|
if (dbuf_slices[i].active_pipes == active_pipes &&
|
||||||
dbuf_slices[i].join_mbus == join_mbus)
|
dbuf_slices[i].join_mbus == join_mbus)
|
||||||
return dbuf_slices[i].dbuf_mask[pipe];
|
return dbuf_slices[i].dbuf_mask[pipe];
|
||||||
|
|||||||
@@ -117,10 +117,14 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon)
|
|||||||
int
|
int
|
||||||
nvkm_falcon_reset(struct nvkm_falcon *falcon)
|
nvkm_falcon_reset(struct nvkm_falcon *falcon)
|
||||||
{
|
{
|
||||||
|
if (!falcon->func->reset) {
|
||||||
nvkm_falcon_disable(falcon);
|
nvkm_falcon_disable(falcon);
|
||||||
return nvkm_falcon_enable(falcon);
|
return nvkm_falcon_enable(falcon);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return falcon->func->reset(falcon);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
|
nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -23,9 +23,38 @@
|
|||||||
*/
|
*/
|
||||||
#include "priv.h"
|
#include "priv.h"
|
||||||
|
|
||||||
|
static int
|
||||||
|
gm200_pmu_flcn_reset(struct nvkm_falcon *falcon)
|
||||||
|
{
|
||||||
|
struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
|
||||||
|
|
||||||
|
nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff);
|
||||||
|
pmu->func->reset(pmu);
|
||||||
|
return nvkm_falcon_enable(falcon);
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct nvkm_falcon_func
|
||||||
|
gm200_pmu_flcn = {
|
||||||
|
.debug = 0xc08,
|
||||||
|
.fbif = 0xe00,
|
||||||
|
.load_imem = nvkm_falcon_v1_load_imem,
|
||||||
|
.load_dmem = nvkm_falcon_v1_load_dmem,
|
||||||
|
.read_dmem = nvkm_falcon_v1_read_dmem,
|
||||||
|
.bind_context = nvkm_falcon_v1_bind_context,
|
||||||
|
.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
|
||||||
|
.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
|
||||||
|
.set_start_addr = nvkm_falcon_v1_set_start_addr,
|
||||||
|
.start = nvkm_falcon_v1_start,
|
||||||
|
.enable = nvkm_falcon_v1_enable,
|
||||||
|
.disable = nvkm_falcon_v1_disable,
|
||||||
|
.reset = gm200_pmu_flcn_reset,
|
||||||
|
.cmdq = { 0x4a0, 0x4b0, 4 },
|
||||||
|
.msgq = { 0x4c8, 0x4cc, 0 },
|
||||||
|
};
|
||||||
|
|
||||||
static const struct nvkm_pmu_func
|
static const struct nvkm_pmu_func
|
||||||
gm200_pmu = {
|
gm200_pmu = {
|
||||||
.flcn = >215_pmu_flcn,
|
.flcn = &gm200_pmu_flcn,
|
||||||
.enabled = gf100_pmu_enabled,
|
.enabled = gf100_pmu_enabled,
|
||||||
.reset = gf100_pmu_reset,
|
.reset = gf100_pmu_reset,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
|
|||||||
|
|
||||||
static const struct nvkm_pmu_func
|
static const struct nvkm_pmu_func
|
||||||
gm20b_pmu = {
|
gm20b_pmu = {
|
||||||
.flcn = >215_pmu_flcn,
|
.flcn = &gm200_pmu_flcn,
|
||||||
.enabled = gf100_pmu_enabled,
|
.enabled = gf100_pmu_enabled,
|
||||||
.intr = gt215_pmu_intr,
|
.intr = gt215_pmu_intr,
|
||||||
.recv = gm20b_pmu_recv,
|
.recv = gm20b_pmu_recv,
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
|
|||||||
|
|
||||||
static const struct nvkm_pmu_func
|
static const struct nvkm_pmu_func
|
||||||
gp102_pmu = {
|
gp102_pmu = {
|
||||||
.flcn = >215_pmu_flcn,
|
.flcn = &gm200_pmu_flcn,
|
||||||
.enabled = gp102_pmu_enabled,
|
.enabled = gp102_pmu_enabled,
|
||||||
.reset = gp102_pmu_reset,
|
.reset = gp102_pmu_reset,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ gp10b_pmu_acr = {
|
|||||||
|
|
||||||
static const struct nvkm_pmu_func
|
static const struct nvkm_pmu_func
|
||||||
gp10b_pmu = {
|
gp10b_pmu = {
|
||||||
.flcn = >215_pmu_flcn,
|
.flcn = &gm200_pmu_flcn,
|
||||||
.enabled = gf100_pmu_enabled,
|
.enabled = gf100_pmu_enabled,
|
||||||
.intr = gt215_pmu_intr,
|
.intr = gt215_pmu_intr,
|
||||||
.recv = gm20b_pmu_recv,
|
.recv = gm20b_pmu_recv,
|
||||||
|
|||||||
@@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *);
|
|||||||
|
|
||||||
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
|
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
|
||||||
|
|
||||||
|
extern const struct nvkm_falcon_func gm200_pmu_flcn;
|
||||||
|
|
||||||
void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
|
void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
|
||||||
void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
|
void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
|
||||||
int gm20b_pmu_acr_boot(struct nvkm_falcon *);
|
int gm20b_pmu_acr_boot(struct nvkm_falcon *);
|
||||||
|
|||||||
@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
|
|||||||
* so don't register a backlight device
|
* so don't register a backlight device
|
||||||
*/
|
*/
|
||||||
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
||||||
(rdev->pdev->device == 0x6741))
|
(rdev->pdev->device == 0x6741) &&
|
||||||
|
!dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!radeon_encoder->enc_priv)
|
if (!radeon_encoder->enc_priv)
|
||||||
|
|||||||
@@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = clk_prepare_enable(hdmi->vpll_clk);
|
|
||||||
if (ret) {
|
|
||||||
DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
|
|
||||||
ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
|
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
|
||||||
if (IS_ERR(hdmi->phy)) {
|
if (IS_ERR(hdmi->phy)) {
|
||||||
ret = PTR_ERR(hdmi->phy);
|
ret = PTR_ERR(hdmi->phy);
|
||||||
@@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(hdmi->vpll_clk);
|
||||||
|
if (ret) {
|
||||||
|
DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
|
||||||
|
ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
|
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
|
||||||
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
|
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
|
||||||
|
|
||||||
|
|||||||
@@ -36,11 +36,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
|
|||||||
{
|
{
|
||||||
union cmd_response cmd_resp;
|
union cmd_response cmd_resp;
|
||||||
|
|
||||||
/* Get response with status within a max of 800 ms timeout */
|
/* Get response with status within a max of 1600 ms timeout */
|
||||||
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
|
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
|
||||||
(cmd_resp.response_v2.response == sensor_sts &&
|
(cmd_resp.response_v2.response == sensor_sts &&
|
||||||
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
|
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
|
||||||
cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
|
cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
|
||||||
return cmd_resp.response_v2.response;
|
return cmd_resp.response_v2.response;
|
||||||
|
|
||||||
return SENSOR_DISABLED;
|
return SENSOR_DISABLED;
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ union sfh_cmd_base {
|
|||||||
} s;
|
} s;
|
||||||
struct {
|
struct {
|
||||||
u32 cmd_id : 4;
|
u32 cmd_id : 4;
|
||||||
u32 intr_enable : 1;
|
u32 intr_disable : 1;
|
||||||
u32 rsvd1 : 3;
|
u32 rsvd1 : 3;
|
||||||
u32 length : 7;
|
u32 length : 7;
|
||||||
u32 mem_type : 1;
|
u32 mem_type : 1;
|
||||||
|
|||||||
@@ -26,6 +26,7 @@
|
|||||||
#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
|
#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
|
||||||
#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
|
#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
|
||||||
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
|
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
|
||||||
|
#define ILLUMINANCE_MASK GENMASK(14, 0)
|
||||||
|
|
||||||
int get_report_descriptor(int sensor_idx, u8 *rep_desc)
|
int get_report_descriptor(int sensor_idx, u8 *rep_desc)
|
||||||
{
|
{
|
||||||
@@ -245,7 +246,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
|
|||||||
get_common_inputs(&als_input.common_property, report_id);
|
get_common_inputs(&als_input.common_property, report_id);
|
||||||
/* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
|
/* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
|
||||||
if (supported_input == V2_STATUS)
|
if (supported_input == V2_STATUS)
|
||||||
als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
|
als_input.illuminance_value =
|
||||||
|
readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
|
||||||
else
|
else
|
||||||
als_input.illuminance_value =
|
als_input.illuminance_value =
|
||||||
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
|
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
|
||||||
|
|||||||
@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err_free:
|
err_free:
|
||||||
|
usb_put_dev(udev);
|
||||||
kfree(priv);
|
kfree(priv);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1356,6 +1356,7 @@
|
|||||||
#define USB_VENDOR_ID_UGTIZER 0x2179
|
#define USB_VENDOR_ID_UGTIZER 0x2179
|
||||||
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
|
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
|
||||||
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
|
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
|
||||||
|
#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
|
||||||
|
|
||||||
#define USB_VENDOR_ID_VIEWSONIC 0x0543
|
#define USB_VENDOR_ID_VIEWSONIC 0x0543
|
||||||
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
|
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
|
||||||
|
|||||||
@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
|
|||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
|
|||||||
|
|
||||||
struct regulator *vdd;
|
struct regulator *vdd;
|
||||||
struct notifier_block nb;
|
struct notifier_block nb;
|
||||||
struct mutex regulator_mutex;
|
|
||||||
struct gpio_desc *reset_gpio;
|
struct gpio_desc *reset_gpio;
|
||||||
const struct goodix_i2c_hid_timing_data *timings;
|
const struct goodix_i2c_hid_timing_data *timings;
|
||||||
};
|
};
|
||||||
@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
|
|||||||
container_of(nb, struct i2c_hid_of_goodix, nb);
|
container_of(nb, struct i2c_hid_of_goodix, nb);
|
||||||
int ret = NOTIFY_OK;
|
int ret = NOTIFY_OK;
|
||||||
|
|
||||||
mutex_lock(&ihid_goodix->regulator_mutex);
|
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case REGULATOR_EVENT_PRE_DISABLE:
|
case REGULATOR_EVENT_PRE_DISABLE:
|
||||||
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
|
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
|
||||||
@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&ihid_goodix->regulator_mutex);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
|
|||||||
if (!ihid_goodix)
|
if (!ihid_goodix)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
mutex_init(&ihid_goodix->regulator_mutex);
|
|
||||||
|
|
||||||
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
|
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
|
||||||
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
|
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
|
||||||
|
|
||||||
@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
|
|||||||
* long. Holding the controller in reset apparently draws extra
|
* long. Holding the controller in reset apparently draws extra
|
||||||
* power.
|
* power.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&ihid_goodix->regulator_mutex);
|
|
||||||
ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
|
ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
|
||||||
ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
|
ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
|
||||||
if (ret) {
|
if (ret)
|
||||||
mutex_unlock(&ihid_goodix->regulator_mutex);
|
|
||||||
return dev_err_probe(&client->dev, ret,
|
return dev_err_probe(&client->dev, ret,
|
||||||
"regulator notifier request failed\n");
|
"regulator notifier request failed\n");
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If someone else is holding the regulator on (or the regulator is
|
* If someone else is holding the regulator on (or the regulator is
|
||||||
* an always-on one) we might never be told to deassert reset. Do it
|
* an always-on one) we might never be told to deassert reset. Do it
|
||||||
* now. Here we'll assume that someone else might have _just
|
* now... and temporarily bump the regulator reference count just to
|
||||||
* barely_ turned the regulator on so we'll do the full
|
* make sure it is impossible for this to race with our own notifier!
|
||||||
* "post_power_delay" just in case.
|
* We also assume that someone else might have _just barely_ turned
|
||||||
|
* the regulator on so we'll do the full "post_power_delay" just in
|
||||||
|
* case.
|
||||||
*/
|
*/
|
||||||
if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
|
if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
|
||||||
|
ret = regulator_enable(ihid_goodix->vdd);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
|
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
|
||||||
mutex_unlock(&ihid_goodix->regulator_mutex);
|
regulator_disable(ihid_goodix->vdd);
|
||||||
|
}
|
||||||
|
|
||||||
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
|
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2027,8 +2027,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
|
|||||||
kobj->kset = dev->channels_kset;
|
kobj->kset = dev->channels_kset;
|
||||||
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
|
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
|
||||||
"%u", relid);
|
"%u", relid);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
kobject_put(kobj);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = sysfs_create_group(kobj, &vmbus_chan_group);
|
ret = sysfs_create_group(kobj, &vmbus_chan_group);
|
||||||
|
|
||||||
@@ -2037,6 +2039,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
|
|||||||
* The calling functions' error handling paths will cleanup the
|
* The calling functions' error handling paths will cleanup the
|
||||||
* empty channel directory.
|
* empty channel directory.
|
||||||
*/
|
*/
|
||||||
|
kobject_put(kobj);
|
||||||
dev_err(device, "Unable to set up channel sysfs files\n");
|
dev_err(device, "Unable to set up channel sysfs files\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
/* set the data in/out register size for compatible SoCs */
|
/* set the data in/out register size for compatible SoCs */
|
||||||
if (of_device_is_compatible(dev->device->of_node,
|
if (of_device_is_compatible(dev->device->of_node,
|
||||||
"brcmstb,brcmper-i2c"))
|
"brcm,brcmper-i2c"))
|
||||||
dev->data_regsz = sizeof(u8);
|
dev->data_regsz = sizeof(u8);
|
||||||
else
|
else
|
||||||
dev->data_regsz = sizeof(u32);
|
dev->data_regsz = sizeof(u32);
|
||||||
|
|||||||
@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
|
|||||||
cci->master[idx].adap.quirks = &cci->data->quirks;
|
cci->master[idx].adap.quirks = &cci->data->quirks;
|
||||||
cci->master[idx].adap.algo = &cci_algo;
|
cci->master[idx].adap.algo = &cci_algo;
|
||||||
cci->master[idx].adap.dev.parent = dev;
|
cci->master[idx].adap.dev.parent = dev;
|
||||||
cci->master[idx].adap.dev.of_node = child;
|
cci->master[idx].adap.dev.of_node = of_node_get(child);
|
||||||
cci->master[idx].master = idx;
|
cci->master[idx].master = idx;
|
||||||
cci->master[idx].cci = cci;
|
cci->master[idx].cci = cci;
|
||||||
|
|
||||||
@@ -643,9 +643,11 @@ static int cci_probe(struct platform_device *pdev)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = i2c_add_adapter(&cci->master[i].adap);
|
ret = i2c_add_adapter(&cci->master[i].adap);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
|
of_node_put(cci->master[i].adap.dev.of_node);
|
||||||
goto error_i2c;
|
goto error_i2c;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
||||||
pm_runtime_use_autosuspend(dev);
|
pm_runtime_use_autosuspend(dev);
|
||||||
@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_i2c:
|
error_i2c:
|
||||||
for (; i >= 0; i--) {
|
for (--i ; i >= 0; i--) {
|
||||||
if (cci->master[i].cci)
|
if (cci->master[i].cci) {
|
||||||
i2c_del_adapter(&cci->master[i].adap);
|
i2c_del_adapter(&cci->master[i].adap);
|
||||||
|
of_node_put(cci->master[i].adap.dev.of_node);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
error:
|
error:
|
||||||
disable_irq(cci->irq);
|
disable_irq(cci->irq);
|
||||||
@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < cci->data->num_masters; i++) {
|
for (i = 0; i < cci->data->num_masters; i++) {
|
||||||
if (cci->master[i].cci)
|
if (cci->master[i].cci) {
|
||||||
i2c_del_adapter(&cci->master[i].adap);
|
i2c_del_adapter(&cci->master[i].adap);
|
||||||
|
of_node_put(cci->master[i].adap.dev.of_node);
|
||||||
|
}
|
||||||
cci_halt(cci, i);
|
cci_halt(cci, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -398,3 +398,4 @@ out_free_priv:
|
|||||||
|
|
||||||
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
|
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
|
||||||
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
|
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
|
||||||
|
IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
|
||||||
|
|||||||
@@ -2156,7 +2156,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
|||||||
set_bit(DMF_FREEING, &md->flags);
|
set_bit(DMF_FREEING, &md->flags);
|
||||||
spin_unlock(&_minor_lock);
|
spin_unlock(&_minor_lock);
|
||||||
|
|
||||||
blk_set_queue_dying(md->queue);
|
blk_mark_disk_dead(md->disk);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Take suspend_lock so that presuspend and postsuspend methods
|
* Take suspend_lock so that presuspend and postsuspend methods
|
||||||
|
|||||||
@@ -1682,12 +1682,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
|
|||||||
struct mmc_card *card = mq->card;
|
struct mmc_card *card = mq->card;
|
||||||
struct mmc_host *host = card->host;
|
struct mmc_host *host = card->host;
|
||||||
blk_status_t error = BLK_STS_OK;
|
blk_status_t error = BLK_STS_OK;
|
||||||
int retries = 0;
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u32 status;
|
u32 status;
|
||||||
int err;
|
int err;
|
||||||
|
int retries = 0;
|
||||||
|
|
||||||
|
while (retries++ <= MMC_READ_SINGLE_RETRIES) {
|
||||||
mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
|
mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
|
||||||
|
|
||||||
mmc_wait_for_req(host, mrq);
|
mmc_wait_for_req(host, mrq);
|
||||||
@@ -1703,10 +1704,9 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
|
|||||||
goto error_exit;
|
goto error_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
|
if (!mrq->cmd->error)
|
||||||
continue;
|
break;
|
||||||
|
}
|
||||||
retries = 0;
|
|
||||||
|
|
||||||
if (mrq->cmd->error ||
|
if (mrq->cmd->error ||
|
||||||
mrq->data->error ||
|
mrq->data->error ||
|
||||||
|
|||||||
@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (erasesize)
|
|
||||||
div_u64_rem(len, (uint32_t)erasesize, &rem);
|
|
||||||
|
|
||||||
if (len == 0 || erasesize == 0 || erasesize > len
|
if (len == 0 || erasesize == 0 || erasesize > len
|
||||||
|| erasesize > UINT_MAX || rem) {
|
|| erasesize > UINT_MAX) {
|
||||||
parse_err("illegal erasesize or len\n");
|
parse_err("illegal erasesize or len\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div_u64_rem(len, (uint32_t)erasesize, &rem);
|
||||||
|
if (rem) {
|
||||||
|
parse_err("len is not multiple of erasesize\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
ret = register_device(name, start, len, (uint32_t)erasesize);
|
ret = register_device(name, start, len, (uint32_t)erasesize);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
|
|||||||
@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
|
|||||||
mtd->oobsize / trans,
|
mtd->oobsize / trans,
|
||||||
host->hwcfg.sector_size_1k);
|
host->hwcfg.sector_size_1k);
|
||||||
|
|
||||||
if (!ret) {
|
if (ret != -EBADMSG) {
|
||||||
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
|
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
|
||||||
|
|
||||||
if (*err_addr)
|
if (*err_addr)
|
||||||
|
|||||||
@@ -2293,7 +2293,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
|
|||||||
this->hw.must_apply_timings = false;
|
this->hw.must_apply_timings = false;
|
||||||
ret = gpmi_nfc_apply_timings(this);
|
ret = gpmi_nfc_apply_timings(this);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto out_pm;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
|
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
|
||||||
@@ -2422,6 +2422,7 @@ unmap:
|
|||||||
|
|
||||||
this->bch = false;
|
this->bch = false;
|
||||||
|
|
||||||
|
out_pm:
|
||||||
pm_runtime_mark_last_busy(this->dev);
|
pm_runtime_mark_last_busy(this->dev);
|
||||||
pm_runtime_put_autosuspend(this->dev);
|
pm_runtime_put_autosuspend(this->dev);
|
||||||
|
|
||||||
|
|||||||
@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
|
|||||||
struct ingenic_ecc *ecc;
|
struct ingenic_ecc *ecc;
|
||||||
|
|
||||||
pdev = of_find_device_by_node(np);
|
pdev = of_find_device_by_node(np);
|
||||||
if (!pdev || !platform_get_drvdata(pdev))
|
if (!pdev)
|
||||||
return ERR_PTR(-EPROBE_DEFER);
|
return ERR_PTR(-EPROBE_DEFER);
|
||||||
|
|
||||||
|
if (!platform_get_drvdata(pdev)) {
|
||||||
|
put_device(&pdev->dev);
|
||||||
|
return ERR_PTR(-EPROBE_DEFER);
|
||||||
|
}
|
||||||
|
|
||||||
ecc = platform_get_drvdata(pdev);
|
ecc = platform_get_drvdata(pdev);
|
||||||
clk_prepare_enable(ecc->clk);
|
clk_prepare_enable(ecc->clk);
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
@@ -3063,10 +3062,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
|||||||
if (dma_mapping_error(dev, nandc->base_dma))
|
if (dma_mapping_error(dev, nandc->base_dma))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
ret = qcom_nandc_alloc(nandc);
|
|
||||||
if (ret)
|
|
||||||
goto err_nandc_alloc;
|
|
||||||
|
|
||||||
ret = clk_prepare_enable(nandc->core_clk);
|
ret = clk_prepare_enable(nandc->core_clk);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_core_clk;
|
goto err_core_clk;
|
||||||
@@ -3075,6 +3070,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_aon_clk;
|
goto err_aon_clk;
|
||||||
|
|
||||||
|
ret = qcom_nandc_alloc(nandc);
|
||||||
|
if (ret)
|
||||||
|
goto err_nandc_alloc;
|
||||||
|
|
||||||
ret = qcom_nandc_setup(nandc);
|
ret = qcom_nandc_setup(nandc);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_setup;
|
goto err_setup;
|
||||||
@@ -3086,15 +3085,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_setup:
|
err_setup:
|
||||||
|
qcom_nandc_unalloc(nandc);
|
||||||
|
err_nandc_alloc:
|
||||||
clk_disable_unprepare(nandc->aon_clk);
|
clk_disable_unprepare(nandc->aon_clk);
|
||||||
err_aon_clk:
|
err_aon_clk:
|
||||||
clk_disable_unprepare(nandc->core_clk);
|
clk_disable_unprepare(nandc->core_clk);
|
||||||
err_core_clk:
|
err_core_clk:
|
||||||
qcom_nandc_unalloc(nandc);
|
|
||||||
err_nandc_alloc:
|
|
||||||
dma_unmap_resource(dev, res->start, resource_size(res),
|
dma_unmap_resource(dev, res->start, resource_size(res),
|
||||||
DMA_BIDIRECTIONAL, 0);
|
DMA_BIDIRECTIONAL, 0);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
|||||||
const struct mtd_partition **pparts,
|
const struct mtd_partition **pparts,
|
||||||
struct mtd_part_parser_data *data)
|
struct mtd_part_parser_data *data)
|
||||||
{
|
{
|
||||||
|
size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
|
||||||
|
int ret, i, j, tmpparts, numparts = 0;
|
||||||
struct smem_flash_pentry *pentry;
|
struct smem_flash_pentry *pentry;
|
||||||
struct smem_flash_ptable *ptable;
|
struct smem_flash_ptable *ptable;
|
||||||
size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
|
|
||||||
struct mtd_partition *parts;
|
struct mtd_partition *parts;
|
||||||
int ret, i, numparts;
|
|
||||||
char *name, *c;
|
char *name, *c;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
|
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
|
||||||
@@ -87,8 +87,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure that # of partitions is less than the max we have allocated */
|
/* Ensure that # of partitions is less than the max we have allocated */
|
||||||
numparts = le32_to_cpu(ptable->numparts);
|
tmpparts = le32_to_cpu(ptable->numparts);
|
||||||
if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
|
if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
|
||||||
pr_err("Partition numbers exceed the max limit\n");
|
pr_err("Partition numbers exceed the max limit\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -116,11 +116,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
|||||||
return PTR_ERR(ptable);
|
return PTR_ERR(ptable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < tmpparts; i++) {
|
||||||
|
pentry = &ptable->pentry[i];
|
||||||
|
if (pentry->name[0] != '\0')
|
||||||
|
numparts++;
|
||||||
|
}
|
||||||
|
|
||||||
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
|
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
|
||||||
if (!parts)
|
if (!parts)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (i = 0; i < numparts; i++) {
|
for (i = 0, j = 0; i < tmpparts; i++) {
|
||||||
pentry = &ptable->pentry[i];
|
pentry = &ptable->pentry[i];
|
||||||
if (pentry->name[0] == '\0')
|
if (pentry->name[0] == '\0')
|
||||||
continue;
|
continue;
|
||||||
@@ -135,24 +141,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
|||||||
for (c = name; *c != '\0'; c++)
|
for (c = name; *c != '\0'; c++)
|
||||||
*c = tolower(*c);
|
*c = tolower(*c);
|
||||||
|
|
||||||
parts[i].name = name;
|
parts[j].name = name;
|
||||||
parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
|
parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
|
||||||
parts[i].mask_flags = pentry->attr;
|
parts[j].mask_flags = pentry->attr;
|
||||||
parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
|
parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
|
||||||
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
|
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
|
||||||
i, pentry->name, le32_to_cpu(pentry->offset),
|
i, pentry->name, le32_to_cpu(pentry->offset),
|
||||||
le32_to_cpu(pentry->length), pentry->attr);
|
le32_to_cpu(pentry->length), pentry->attr);
|
||||||
|
j++;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("SMEM partition table found: ver: %d len: %d\n",
|
pr_debug("SMEM partition table found: ver: %d len: %d\n",
|
||||||
le32_to_cpu(ptable->version), numparts);
|
le32_to_cpu(ptable->version), tmpparts);
|
||||||
*pparts = parts;
|
*pparts = parts;
|
||||||
|
|
||||||
return numparts;
|
return numparts;
|
||||||
|
|
||||||
out_free_parts:
|
out_free_parts:
|
||||||
while (--i >= 0)
|
while (--j >= 0)
|
||||||
kfree(parts[i].name);
|
kfree(parts[j].name);
|
||||||
kfree(parts);
|
kfree(parts);
|
||||||
*pparts = NULL;
|
*pparts = NULL;
|
||||||
|
|
||||||
@@ -166,6 +173,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
|
|||||||
|
|
||||||
for (i = 0; i < nr_parts; i++)
|
for (i = 0; i < nr_parts; i++)
|
||||||
kfree(pparts[i].name);
|
kfree(pparts[i].name);
|
||||||
|
|
||||||
|
kfree(pparts);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct of_device_id qcomsmem_of_match_table[] = {
|
static const struct of_device_id qcomsmem_of_match_table[] = {
|
||||||
|
|||||||
@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
|
|||||||
if (bond == NULL)
|
if (bond == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
|
return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -1995,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
|
|||||||
*/
|
*/
|
||||||
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
|
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
|
||||||
{
|
{
|
||||||
BOND_AD_INFO(bond).agg_select_timer = timeout;
|
atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -2278,6 +2278,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
|
|||||||
spin_unlock_bh(&bond->mode_lock);
|
spin_unlock_bh(&bond->mode_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bond_agg_timer_advance - advance agg_select_timer
|
||||||
|
* @bond: bonding structure
|
||||||
|
*
|
||||||
|
* Return true when agg_select_timer reaches 0.
|
||||||
|
*/
|
||||||
|
static bool bond_agg_timer_advance(struct bonding *bond)
|
||||||
|
{
|
||||||
|
int val, nval;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
|
||||||
|
if (!val)
|
||||||
|
return false;
|
||||||
|
nval = val - 1;
|
||||||
|
if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
|
||||||
|
val, nval) == val)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return nval == 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bond_3ad_state_machine_handler - handle state machines timeout
|
* bond_3ad_state_machine_handler - handle state machines timeout
|
||||||
* @work: work context to fetch bonding struct to work on from
|
* @work: work context to fetch bonding struct to work on from
|
||||||
@@ -2313,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
|
|||||||
if (!bond_has_slaves(bond))
|
if (!bond_has_slaves(bond))
|
||||||
goto re_arm;
|
goto re_arm;
|
||||||
|
|
||||||
/* check if agg_select_timer timer after initialize is timed out */
|
if (bond_agg_timer_advance(bond)) {
|
||||||
if (BOND_AD_INFO(bond).agg_select_timer &&
|
|
||||||
!(--BOND_AD_INFO(bond).agg_select_timer)) {
|
|
||||||
slave = bond_first_slave_rcu(bond);
|
slave = bond_first_slave_rcu(bond);
|
||||||
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
|
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
|
||||||
|
|
||||||
|
|||||||
@@ -2377,10 +2377,9 @@ static int __bond_release_one(struct net_device *bond_dev,
|
|||||||
bond_select_active_slave(bond);
|
bond_select_active_slave(bond);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!bond_has_slaves(bond)) {
|
|
||||||
bond_set_carrier(bond);
|
bond_set_carrier(bond);
|
||||||
|
if (!bond_has_slaves(bond))
|
||||||
eth_hw_addr_random(bond_dev);
|
eth_hw_addr_random(bond_dev);
|
||||||
}
|
|
||||||
|
|
||||||
unblock_netpoll_tx();
|
unblock_netpoll_tx();
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|||||||
@@ -81,6 +81,7 @@ config NET_DSA_REALTEK_SMI
|
|||||||
|
|
||||||
config NET_DSA_SMSC_LAN9303
|
config NET_DSA_SMSC_LAN9303
|
||||||
tristate
|
tristate
|
||||||
|
depends on VLAN_8021Q || VLAN_8021Q=n
|
||||||
select NET_DSA_TAG_LAN9303
|
select NET_DSA_TAG_LAN9303
|
||||||
select REGMAP
|
select REGMAP
|
||||||
help
|
help
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
#include <linux/mii.h>
|
#include <linux/mii.h>
|
||||||
#include <linux/phy.h>
|
#include <linux/phy.h>
|
||||||
#include <linux/if_bridge.h>
|
#include <linux/if_bridge.h>
|
||||||
|
#include <linux/if_vlan.h>
|
||||||
#include <linux/etherdevice.h>
|
#include <linux/etherdevice.h>
|
||||||
|
|
||||||
#include "lan9303.h"
|
#include "lan9303.h"
|
||||||
@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
|
|||||||
static int lan9303_port_enable(struct dsa_switch *ds, int port,
|
static int lan9303_port_enable(struct dsa_switch *ds, int port,
|
||||||
struct phy_device *phy)
|
struct phy_device *phy)
|
||||||
{
|
{
|
||||||
|
struct dsa_port *dp = dsa_to_port(ds, port);
|
||||||
struct lan9303 *chip = ds->priv;
|
struct lan9303 *chip = ds->priv;
|
||||||
|
|
||||||
if (!dsa_is_user_port(ds, port))
|
if (!dsa_port_is_user(dp))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
|
||||||
|
|
||||||
return lan9303_enable_processing_port(chip, port);
|
return lan9303_enable_processing_port(chip, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lan9303_port_disable(struct dsa_switch *ds, int port)
|
static void lan9303_port_disable(struct dsa_switch *ds, int port)
|
||||||
{
|
{
|
||||||
|
struct dsa_port *dp = dsa_to_port(ds, port);
|
||||||
struct lan9303 *chip = ds->priv;
|
struct lan9303 *chip = ds->priv;
|
||||||
|
|
||||||
if (!dsa_is_user_port(ds, port))
|
if (!dsa_port_is_user(dp))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
|
||||||
|
|
||||||
lan9303_disable_processing_port(chip, port);
|
lan9303_disable_processing_port(chip, port);
|
||||||
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
|
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
|
||||||
}
|
}
|
||||||
@@ -1309,7 +1316,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
|
|||||||
struct device_node *np)
|
struct device_node *np)
|
||||||
{
|
{
|
||||||
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
|
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
|
||||||
GPIOD_OUT_LOW);
|
GPIOD_OUT_HIGH);
|
||||||
if (IS_ERR(chip->reset_gpio))
|
if (IS_ERR(chip->reset_gpio))
|
||||||
return PTR_ERR(chip->reset_gpio);
|
return PTR_ERR(chip->reset_gpio);
|
||||||
|
|
||||||
|
|||||||
@@ -2201,8 +2201,8 @@ static int gswip_remove(struct platform_device *pdev)
|
|||||||
|
|
||||||
if (priv->ds->slave_mii_bus) {
|
if (priv->ds->slave_mii_bus) {
|
||||||
mdiobus_unregister(priv->ds->slave_mii_bus);
|
mdiobus_unregister(priv->ds->slave_mii_bus);
|
||||||
mdiobus_free(priv->ds->slave_mii_bus);
|
|
||||||
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
|
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
|
||||||
|
mdiobus_free(priv->ds->slave_mii_bus);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < priv->num_gphy_fw; i++)
|
for (i = 0; i < priv->num_gphy_fw; i++)
|
||||||
|
|||||||
@@ -2291,6 +2291,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
|
|||||||
if (!mv88e6xxx_max_vid(chip))
|
if (!mv88e6xxx_max_vid(chip))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
/* The ATU removal procedure needs the FID to be mapped in the VTU,
|
||||||
|
* but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
|
||||||
|
* switchdev workqueue to ensure that all FDB entries are deleted
|
||||||
|
* before we remove the VLAN.
|
||||||
|
*/
|
||||||
|
dsa_flush_workqueue();
|
||||||
|
|
||||||
mv88e6xxx_reg_lock(chip);
|
mv88e6xxx_reg_lock(chip);
|
||||||
|
|
||||||
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
|
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
|
||||||
|
|||||||
@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
|
|||||||
atl1c_clean_buffer(pdev, buffer_info);
|
atl1c_clean_buffer(pdev, buffer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
netdev_reset_queue(adapter->netdev);
|
netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
|
||||||
|
|
||||||
/* Zero out Tx-buffers */
|
/* Zero out Tx-buffers */
|
||||||
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
|
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
|
||||||
|
|||||||
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
struct device_node *np = pdev->dev.of_node;
|
struct device_node *np = pdev->dev.of_node;
|
||||||
struct bgmac *bgmac;
|
struct bgmac *bgmac;
|
||||||
|
struct resource *regs;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
bgmac = bgmac_alloc(&pdev->dev);
|
bgmac = bgmac_alloc(&pdev->dev);
|
||||||
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
|
|||||||
if (IS_ERR(bgmac->plat.base))
|
if (IS_ERR(bgmac->plat.base))
|
||||||
return PTR_ERR(bgmac->plat.base);
|
return PTR_ERR(bgmac->plat.base);
|
||||||
|
|
||||||
bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
|
/* The idm_base resource is optional for some platforms */
|
||||||
|
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
|
||||||
|
if (regs) {
|
||||||
|
bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
|
||||||
if (IS_ERR(bgmac->plat.idm_base))
|
if (IS_ERR(bgmac->plat.idm_base))
|
||||||
return PTR_ERR(bgmac->plat.idm_base);
|
return PTR_ERR(bgmac->plat.idm_base);
|
||||||
else
|
|
||||||
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
|
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
|
/* The nicpm_base resource is optional for some platforms */
|
||||||
|
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
|
||||||
|
if (regs) {
|
||||||
|
bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
|
||||||
|
regs);
|
||||||
if (IS_ERR(bgmac->plat.nicpm_base))
|
if (IS_ERR(bgmac->plat.nicpm_base))
|
||||||
return PTR_ERR(bgmac->plat.nicpm_base);
|
return PTR_ERR(bgmac->plat.nicpm_base);
|
||||||
|
}
|
||||||
|
|
||||||
bgmac->read = platform_bgmac_read;
|
bgmac->read = platform_bgmac_read;
|
||||||
bgmac->write = platform_bgmac_write;
|
bgmac->write = platform_bgmac_write;
|
||||||
|
|||||||
@@ -4739,7 +4739,7 @@ static int macb_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||||
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
|
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
|
||||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
|
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
|
||||||
bp->hw_dma_cap |= HW_DMA_CAP_64B;
|
bp->hw_dma_cap |= HW_DMA_CAP_64B;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -4329,7 +4329,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
|
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
|
||||||
|
mutex_init(&priv->onestep_tstamp_lock);
|
||||||
skb_queue_head_init(&priv->tx_skbs);
|
skb_queue_head_init(&priv->tx_skbs);
|
||||||
|
|
||||||
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
|
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
|
||||||
|
|||||||
@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
|
|||||||
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
|
||||||
struct flow_dissector *dissector = rule->match.dissector;
|
struct flow_dissector *dissector = rule->match.dissector;
|
||||||
struct netlink_ext_ack *extack = cls->common.extack;
|
struct netlink_ext_ack *extack = cls->common.extack;
|
||||||
|
int ret = -EOPNOTSUPP;
|
||||||
|
|
||||||
if (dissector->used_keys &
|
if (dissector->used_keys &
|
||||||
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
|
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
|
||||||
@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*vlan = (u16)match.key->vlan_id;
|
*vlan = (u16)match.key->vlan_id;
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|||||||
@@ -1521,6 +1521,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
|
|||||||
if (status)
|
if (status)
|
||||||
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
|
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
|
||||||
vsi_num, ice_stat_str(status));
|
vsi_num, ice_stat_str(status));
|
||||||
|
|
||||||
|
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
|
||||||
|
ICE_FLOW_SEG_HDR_ESP);
|
||||||
|
if (status)
|
||||||
|
dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
|
||||||
|
vsi_num, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
|
|||||||
skb_put(skb, byte_cnt - ETH_FCS_LEN);
|
skb_put(skb, byte_cnt - ETH_FCS_LEN);
|
||||||
eth_skb_pad(skb);
|
eth_skb_pad(skb);
|
||||||
skb->protocol = eth_type_trans(skb, netdev);
|
skb->protocol = eth_type_trans(skb, netdev);
|
||||||
netif_rx(skb);
|
|
||||||
netdev->stats.rx_bytes += skb->len;
|
netdev->stats.rx_bytes += skb->len;
|
||||||
netdev->stats.rx_packets++;
|
netdev->stats.rx_packets++;
|
||||||
|
netif_rx(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sparx5_inject(struct sparx5 *sparx5,
|
static int sparx5_inject(struct sparx5 *sparx5,
|
||||||
|
|||||||
@@ -100,6 +100,7 @@ struct at86rf230_local {
|
|||||||
unsigned long cal_timeout;
|
unsigned long cal_timeout;
|
||||||
bool is_tx;
|
bool is_tx;
|
||||||
bool is_tx_from_off;
|
bool is_tx_from_off;
|
||||||
|
bool was_tx;
|
||||||
u8 tx_retry;
|
u8 tx_retry;
|
||||||
struct sk_buff *tx_skb;
|
struct sk_buff *tx_skb;
|
||||||
struct at86rf230_state_change tx;
|
struct at86rf230_state_change tx;
|
||||||
@@ -343,8 +344,12 @@ at86rf230_async_error_recover_complete(void *context)
|
|||||||
if (ctx->free)
|
if (ctx->free)
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
|
|
||||||
|
if (lp->was_tx) {
|
||||||
|
lp->was_tx = 0;
|
||||||
|
dev_kfree_skb_any(lp->tx_skb);
|
||||||
ieee802154_wake_queue(lp->hw);
|
ieee802154_wake_queue(lp->hw);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
at86rf230_async_error_recover(void *context)
|
at86rf230_async_error_recover(void *context)
|
||||||
@@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context)
|
|||||||
struct at86rf230_state_change *ctx = context;
|
struct at86rf230_state_change *ctx = context;
|
||||||
struct at86rf230_local *lp = ctx->lp;
|
struct at86rf230_local *lp = ctx->lp;
|
||||||
|
|
||||||
|
if (lp->is_tx) {
|
||||||
|
lp->was_tx = 1;
|
||||||
lp->is_tx = 0;
|
lp->is_tx = 0;
|
||||||
|
}
|
||||||
|
|
||||||
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
|
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
|
||||||
at86rf230_async_error_recover_complete);
|
at86rf230_async_error_recover_complete);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2977,8 +2977,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
|
|||||||
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
|
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
|
||||||
ca8210_hw->phy->cca_ed_level = -9800;
|
ca8210_hw->phy->cca_ed_level = -9800;
|
||||||
ca8210_hw->phy->symbol_duration = 16;
|
ca8210_hw->phy->symbol_duration = 16;
|
||||||
ca8210_hw->phy->lifs_period = 40;
|
ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
|
||||||
ca8210_hw->phy->sifs_period = 12;
|
ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
|
||||||
ca8210_hw->flags =
|
ca8210_hw->flags =
|
||||||
IEEE802154_HW_AFILT |
|
IEEE802154_HW_AFILT |
|
||||||
IEEE802154_HW_OMIT_CKSUM |
|
IEEE802154_HW_OMIT_CKSUM |
|
||||||
|
|||||||
@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_fib6_rt_nh_del;
|
goto err_fib6_rt_nh_del;
|
||||||
|
|
||||||
fib6_event->rt_arr[i]->trap = true;
|
WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_fib6_rt_nh_del:
|
err_fib6_rt_nh_del:
|
||||||
for (i--; i >= 0; i--) {
|
for (i--; i >= 0; i--) {
|
||||||
fib6_event->rt_arr[i]->trap = false;
|
WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
|
||||||
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
|
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
|
||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
|
|||||||
|
|
||||||
static int mt7531_phy_config_init(struct phy_device *phydev)
|
static int mt7531_phy_config_init(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
mtk_gephy_config_init(phydev);
|
mtk_gephy_config_init(phydev);
|
||||||
|
|
||||||
/* PHY link down power saving enable */
|
/* PHY link down power saving enable */
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user