Merge 5.15.71 into android14-5.15

Changes in 5.15.71
	drm/amdgpu: Separate vf2pf work item init from virt data exchange
	drm/amdgpu: make sure to init common IP before gmc
	staging: r8188eu: Remove support for devices with 8188FU chipset (0bda:f179)
	staging: r8188eu: Add Rosewill USB-N150 Nano to device tables
	usb: dwc3: gadget: Avoid starting DWC3 gadget during UDC unbind
	usb: dwc3: Issue core soft reset before enabling run/stop
	usb: dwc3: gadget: Prevent repeat pullup()
	usb: dwc3: gadget: Refactor pullup()
	usb: dwc3: gadget: Don't modify GEVNTCOUNT in pullup()
	usb: dwc3: gadget: Avoid duplicate requests to enable Run/Stop
	usb: add quirks for Lenovo OneLink+ Dock
	usb: gadget: udc-xilinx: replace memcpy with memcpy_toio
	Revert "usb: add quirks for Lenovo OneLink+ Dock"
	Revert "usb: gadget: udc-xilinx: replace memcpy with memcpy_toio"
	drivers/base: Fix unsigned comparison to -1 in CPUMAP_FILE_MAX_BYTES
	USB: core: Fix RST error in hub.c
	USB: serial: option: add Quectel BG95 0x0203 composition
	USB: serial: option: add Quectel RM520N
	Revert "ALSA: usb-audio: Split endpoint setups for hw_params and prepare"
	ALSA: core: Fix double-free at snd_card_new()
	ALSA: hda/tegra: set depop delay for tegra
	ALSA: hda: add Intel 5 Series / 3400 PCI DID
	ALSA: hda/realtek: Add quirk for Huawei WRT-WX9
	ALSA: hda/realtek: Enable 4-speaker output Dell Precision 5570 laptop
	ALSA: hda/realtek: Re-arrange quirk table entries
	ALSA: hda/realtek: Add pincfg for ASUS G513 HP jack
	ALSA: hda/realtek: Add pincfg for ASUS G533Z HP jack
	ALSA: hda/realtek: Add quirk for ASUS GA503R laptop
	ALSA: hda/realtek: Enable 4-speaker output Dell Precision 5530 laptop
	iommu/vt-d: Check correct capability for sagaw determination
	btrfs: fix hang during unmount when stopping block group reclaim worker
	btrfs: fix hang during unmount when stopping a space reclaim worker
	media: flexcop-usb: fix endpoint type check
	usb: dwc3: core: leave default DMA if the controller does not support 64-bit DMA
	thunderbolt: Add support for Intel Maple Ridge single port controller
	efi: x86: Wipe setup_data on pure EFI boot
	efi: libstub: check Shim mode using MokSBStateRT
	wifi: mt76: fix reading current per-tid starting sequence number for aggregation
	gpio: mockup: fix NULL pointer dereference when removing debugfs
	gpio: mockup: Fix potential resource leakage when register a chip
	gpiolib: cdev: Set lineevent_state::irq after IRQ register successfully
	riscv: fix a nasty sigreturn bug...
	kasan: call kasan_malloc() from __kmalloc_*track_caller()
	can: flexcan: flexcan_mailbox_read() fix return value for drop = true
	net: mana: Add rmb after checking owner bits
	mm/slub: fix to return errno if kmalloc() fails
	mm: slub: fix flush_cpu_slab()/__free_slab() invocations in task context.
	KVM: x86: Inject #UD on emulated XSETBV if XSAVES isn't enabled
	arm64: topology: fix possible overflow in amu_fie_setup()
	vmlinux.lds.h: CFI: Reduce alignment of jump-table to function alignment
	xfs: reorder iunlink remove operation in xfs_ifree
	xfs: fix xfs_ifree() error handling to not leak perag ref
	xfs: validate inode fork size against fork format
	firmware: arm_scmi: Harden accesses to the reset domains
	firmware: arm_scmi: Fix the asynchronous reset requests
	arm64: dts: rockchip: Pull up wlan wake# on Gru-Bob
	arm64: dts: rockchip: Fix typo in lisense text for PX30.Core
	drm/mediatek: dsi: Add atomic {destroy,duplicate}_state, reset callbacks
	arm64: dts: rockchip: Set RK3399-Gru PCLK_EDP to 24 MHz
	dmaengine: ti: k3-udma-private: Fix refcount leak bug in of_xudma_dev_get()
	arm64: dts: rockchip: Remove 'enable-active-low' from rk3399-puma
	netfilter: nf_conntrack_sip: fix ct_sip_walk_headers
	netfilter: nf_conntrack_irc: Tighten matching on DCC message
	netfilter: nfnetlink_osf: fix possible bogus match in nf_osf_find()
	ice: Don't double unplug aux on peer initiated reset
	iavf: Fix cached head and tail value for iavf_get_tx_pending
	ipvlan: Fix out-of-bound bugs caused by unset skb->mac_header
	net: core: fix flow symmetric hash
	net: phy: aquantia: wait for the suspend/resume operations to finish
	scsi: qla2xxx: Fix memory leak in __qlt_24xx_handle_abts()
	scsi: mpt3sas: Fix return value check of dma_get_required_mask()
	net: bonding: Share lacpdu_mcast_addr definition
	net: bonding: Unsync device addresses on ndo_stop
	net: team: Unsync device addresses on ndo_stop
	drm/panel: simple: Fix innolux_g121i1_l01 bus_format
	MIPS: lantiq: export clk_get_io() for lantiq_wdt.ko
	MIPS: Loongson32: Fix PHY-mode being left unspecified
	um: fix default console kernel parameter
	iavf: Fix bad page state
	mlxbf_gige: clear MDIO gateway lock after read
	iavf: Fix set max MTU size with port VLAN and jumbo frames
	i40e: Fix VF set max MTU size
	i40e: Fix set max_tx_rate when it is lower than 1 Mbps
	sfc: fix TX channel offset when using legacy interrupts
	sfc: fix null pointer dereference in efx_hard_start_xmit
	drm/hisilicon/hibmc: Allow to be built if COMPILE_TEST is enabled
	drm/hisilicon: Add depends on MMU
	of: mdio: Add of_node_put() when breaking out of for_each_xx
	net: ipa: properly limit modem routing table use
	wireguard: ratelimiter: disable timings test by default
	wireguard: netlink: avoid variable-sized memcpy on sockaddr
	net: enetc: move enetc_set_psfp() out of the common enetc_set_features()
	net: enetc: deny offload of tc-based TSN features on VF interfaces
	net/sched: taprio: avoid disabling offload when it was never enabled
	net/sched: taprio: make qdisc_leaf() see the per-netdev-queue pfifo child qdiscs
	netfilter: nf_tables: fix nft_counters_enabled underflow at nf_tables_addchain()
	netfilter: nf_tables: fix percpu memory leak at nf_tables_addchain()
	netfilter: ebtables: fix memory leak when blob is malformed
	net: ravb: Fix PHY state warning splat during system resume
	net: sh_eth: Fix PHY state warning splat during system resume
	can: gs_usb: gs_can_open(): fix race dev->can.state condition
	perf stat: Fix BPF program section name
	perf jit: Include program header in ELF files
	perf kcore_copy: Do not check /proc/modules is unchanged
	perf tools: Honor namespace when synthesizing build-ids
	drm/mediatek: dsi: Move mtk_dsi_stop() call back to mtk_dsi_poweroff()
	net/smc: Stop the CLC flow if no link to map buffers on
	bonding: fix NULL deref in bond_rr_gen_slave_id
	net: sunhme: Fix packet reception for len < RX_COPY_THRESHOLD
	net: sched: fix possible refcount leak in tc_new_tfilter()
	bnxt: prevent skb UAF after handing over to PTP worker
	selftests: forwarding: add shebang for sch_red.sh
	KVM: x86/mmu: Fold rmap_recycle into rmap_add
	serial: fsl_lpuart: Reset prior to registration
	serial: Create uart_xmit_advance()
	serial: tegra: Use uart_xmit_advance(), fixes icount.tx accounting
	serial: tegra-tcu: Use uart_xmit_advance(), fixes icount.tx accounting
	s390/dasd: fix Oops in dasd_alias_get_start_dev due to missing pavgroup
	drm/amd/amdgpu: fixing read wrong pf2vf data in SRIOV
	Drivers: hv: Never allocate anything besides framebuffer from framebuffer memory region
	drm/gma500: Fix BUG: sleeping function called from invalid context errors
	drm/amd/pm: disable BACO entry/exit completely on several sienna cichlid cards
	drm/amdgpu: use dirty framebuffer helper
	drm/amd/display: Limit user regamma to a valid value
	drm/amd/display: Reduce number of arguments of dml31's CalculateWatermarksAndDRAMSpeedChangeSupport()
	drm/amd/display: Reduce number of arguments of dml31's CalculateFlipSchedule()
	drm/amd/display: Mark dml30's UseMinimumDCFCLK() as noinline for stack usage
	drm/rockchip: Fix return type of cdn_dp_connector_mode_valid
	fsdax: Fix infinite loop in dax_iomap_rw()
	workqueue: don't skip lockdep work dependency in cancel_work_sync()
	i2c: imx: If pm_runtime_get_sync() returned 1 device access is possible
	i2c: mlxbf: incorrect base address passed during io write
	i2c: mlxbf: prevent stack overflow in mlxbf_i2c_smbus_start_transaction()
	i2c: mlxbf: Fix frequency calculation
	drm/amdgpu: don't register a dirty callback for non-atomic
	NFSv4: Fixes for nfs4_inode_return_delegation()
	devdax: Fix soft-reservation memory description
	ext4: make directory inode spreading reflect flexbg size
	ext4: fix bug in extents parsing when eh_entries == 0 and eh_depth > 0
	ext4: limit the number of retries after discarding preallocations blocks
	ext4: make mballoc try target group first even with mb_optimize_scan
	ext4: avoid unnecessary spreading of allocations among groups
	ext4: use locality group preallocation for small closed files
	Linux 5.15.71

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie66ba67f788b7ce6ffd766544f9ec0286bec5d9f
This commit is contained in:
Greg Kroah-Hartman
2022-09-28 13:32:32 +02:00
120 changed files with 1017 additions and 822 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 70
SUBLEVEL = 71
EXTRAVERSION =
NAME = Trick or Treat

View File

@@ -2,8 +2,8 @@
/*
* Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
* Copyright (c) 2020 Engicam srl
* Copyright (c) 2020 Amarula Solutons
* Copyright (c) 2020 Amarula Solutons(India)
* Copyright (c) 2020 Amarula Solutions
* Copyright (c) 2020 Amarula Solutions(India)
*/
#include <dt-bindings/gpio/gpio.h>

View File

@@ -87,3 +87,8 @@
};
};
};
&wlan_host_wake_l {
/* Kevin has an external pull up, but Bob does not. */
rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
};

View File

@@ -237,6 +237,14 @@
&edp {
status = "okay";
/*
* eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
* set this here, because rk3399-gru.dtsi ensures we can generate this
* off GPLL=600MHz, whereas some other RK3399 boards may not.
*/
assigned-clocks = <&cru PCLK_EDP>;
assigned-clock-rates = <24000000>;
ports {
edp_out: port@1 {
reg = <1>;
@@ -395,6 +403,7 @@ ap_i2c_tp: &i2c5 {
};
wlan_host_wake_l: wlan-host-wake-l {
/* Kevin has an external pull up, but Bob does not */
rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};

View File

@@ -62,7 +62,6 @@
vcc5v0_host: vcc5v0-host-regulator {
compatible = "regulator-fixed";
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
enable-active-low;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_host_en>;
regulator-name = "vcc5v0_host";

View File

@@ -249,7 +249,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
for_each_cpu(cpu, cpus) {
if (!freq_counters_valid(cpu) ||
freq_inv_set_max_ratio(cpu,
cpufreq_get_hw_max_freq(cpu) * 1000,
cpufreq_get_hw_max_freq(cpu) * 1000ULL,
arch_timer_get_rate()))
return;
}

View File

@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
{
return &cpu_clk_generic[2];
}
EXPORT_SYMBOL_GPL(clk_get_io);
struct clk *clk_get_ppe(void)
{

View File

@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
if (plat_dat->bus_id) {
__raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
GMAC1_USE_UART0, LS1X_MUX_CTRL0);
switch (plat_dat->interface) {
switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
break;
default:
pr_err("unsupported mii mode %d\n",
plat_dat->interface);
plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC1_SHUT;
} else {
switch (plat_dat->interface) {
switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
break;
default:
pr_err("unsupported mii mode %d\n",
plat_dat->interface);
plat_dat->phy_interface);
return -ENOTSUPP;
}
val &= ~GMAC0_SHUT;
@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
plat_dat = dev_get_platdata(&pdev->dev);
val &= ~PHY_INTF_SELI;
if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
val |= 0x4 << PHY_INTF_SELI_SHIFT;
__raw_writel(val, LS1X_MUX_CTRL1);
@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
.bus_id = 0,
.phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B)
.interface = PHY_INTERFACE_MODE_MII,
.phy_interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C)
.interface = PHY_INTERFACE_MODE_RMII,
.phy_interface = PHY_INTERFACE_MODE_RMII,
#endif
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
.bus_id = 1,
.phy_addr = -1,
.interface = PHY_INTERFACE_MODE_MII,
.phy_interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,

View File

@@ -121,6 +121,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
regs->cause = -1UL;
return regs->a0;
badframe:

View File

@@ -31,7 +31,7 @@
#include <os.h>
#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
/* Changed in add_arg and setup_arch, which run before SMP is started */
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };

View File

@@ -4122,6 +4122,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ecx, edx;
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
return emulate_ud(ctxt);
eax = reg_read(ctxt, VCPU_REGS_RAX);
edx = reg_read(ctxt, VCPU_REGS_RDX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);

View File

@@ -1071,20 +1071,6 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
return kvm_mmu_memory_cache_nr_free_objects(mc);
}
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head;
sp = sptep_to_sp(spte);
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
return pte_list_add(vcpu, spte, rmap_head);
}
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
struct kvm_memslots *slots;
@@ -1097,9 +1083,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
/*
* Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
* context of a vCPU so have to determine which memslots to use based
* on context information in sp->role.
* Unlike rmap_add, rmap_remove does not run in the context of a vCPU
* so we have to determine which memslots to use based on context
* information in sp->role.
*/
slots = kvm_memslots_for_spte_role(kvm, sp->role);
@@ -1639,19 +1625,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
#define RMAP_RECYCLE_THRESHOLD 1000
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head;
int rmap_count;
sp = sptep_to_sp(spte);
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
rmap_count = pte_list_add(vcpu, spte, rmap_head);
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
KVM_PAGES_PER_HPAGE(sp->role.level));
if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
kvm_flush_remote_tlbs_with_address(
vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
}
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -2718,7 +2709,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
bool host_writable)
{
int was_rmapped = 0;
int rmap_count;
int set_spte_ret;
int ret = RET_PF_FIXED;
bool flush = false;
@@ -2778,9 +2768,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!was_rmapped) {
kvm_update_page_stats(vcpu->kvm, level, 1);
rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
rmap_add(vcpu, sptep, gfn);
}
return ret;

View File

@@ -1021,6 +1021,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
{
/* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
__kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
kvm_inject_gp(vcpu, 0);

View File

@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
.start = r->start,
.end = r->end,
.flags = IORESOURCE_MEM,
.desc = IORES_DESC_SOFT_RESERVED,
};
struct platform_device *pdev;
struct memregion_info info;

View File

@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
pdev = of_find_device_by_node(udma_node);
if (np != udma_node)
of_node_put(udma_node);
if (!pdev) {
pr_debug("UDMA device not found\n");
return ERR_PTR(-EPROBE_DEFER);
}
if (np != udma_node)
of_node_put(udma_node);
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");

View File

@@ -152,9 +152,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *rdom = pi->dom_info + domain;
struct reset_dom_info *rdom;
if (rdom->async_reset)
if (domain >= pi->num_domains)
return -EINVAL;
rdom = pi->dom_info + domain;
if (rdom->async_reset && flags & AUTONOMOUS_RESET)
flags |= ASYNCHRONOUS_RESET;
ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
@@ -166,7 +170,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
dom->flags = cpu_to_le32(flags);
dom->reset_state = cpu_to_le32(state);
if (rdom->async_reset)
if (flags & ASYNCHRONOUS_RESET)
ret = ph->xops->do_xfer_with_response(ph, t);
else
ret = ph->xops->do_xfer(ph, t);

View File

@@ -14,7 +14,7 @@
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
unsigned long *data_size, void *data)
@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/*
* See if a user has put the shim into insecure mode. If so, and if the
* variable doesn't have the runtime attribute set, we might as well
* honor that.
* variable doesn't have the non-volatile attribute set, we might as
* well honor that.
*/
size = sizeof(moksbstate);
status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/* If it fails, we don't care why. Default to secure */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:

View File

@@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
/*
* Disregard any setup data that was provided by the bootloader:
* setup_data could be pointing anywhere, and we have no way of
* authenticating or validating the payload.
*/
hdr->setup_data = 0;
efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */

View File

@@ -554,8 +554,10 @@ static int __init gpio_mockup_register_chip(int idx)
}
fwnode = fwnode_create_software_node(properties, NULL);
if (IS_ERR(fwnode))
if (IS_ERR(fwnode)) {
kfree_strarray(line_names, ngpio);
return PTR_ERR(fwnode);
}
pdevinfo.name = "gpio-mockup";
pdevinfo.id = idx;
@@ -618,9 +620,9 @@ static int __init gpio_mockup_init(void)
static void __exit gpio_mockup_exit(void)
{
gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
gpio_mockup_unregister_pdevs();
}
module_init(gpio_mockup_init);

View File

@@ -1784,7 +1784,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
ret = -ENODEV;
goto out_free_le;
}
le->irq = irq;
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -1798,7 +1797,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
init_waitqueue_head(&le->wait);
/* Request a thread to read the events */
ret = request_threaded_irq(le->irq,
ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
@@ -1807,6 +1806,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
le->irq = irq;
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;

View File

@@ -2388,8 +2388,20 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
/* need to do common hw init early so everything is set up for gmc */
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
/* need to do gmc hw init early so we can allocate gpu mem */
/* Try to reserve bad pages early */
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
@@ -3033,8 +3045,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
int i, r;
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};

View File

@@ -35,6 +35,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -492,6 +494,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = drm_atomic_helper_dirtyfb,
};
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1109,7 +1117,10 @@ int amdgpu_display_gem_fb_verify_and_init(
if (ret)
goto err;
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (drm_drv_uses_atomic_modeset(dev))
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
else
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;

View File

@@ -614,16 +614,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
uint64_t bp_block_offset = 0;
uint32_t bp_block_size = 0;
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
if (adev->mman.fw_vram_usage_va != NULL) {
adev->virt.vf2pf_update_interval_ms = 2000;
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
} else if (adev->bios != NULL) {
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
amdgpu_virt_read_pf2vf_data(adev);
}
}
void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
{
uint64_t bp_block_offset = 0;
uint32_t bp_block_size = 0;
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
if (adev->mman.fw_vram_usage_va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
@@ -649,22 +667,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
} else if (adev->bios != NULL) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
amdgpu_virt_read_pf2vf_data(adev);
return;
}
if (adev->virt.vf2pf_update_interval_ms != 0) {
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
}
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;

View File

@@ -308,6 +308,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);

View File

@@ -6658,8 +6658,7 @@ static double CalculateUrgentLatency(
return ret;
}
static void UseMinimumDCFCLK(
static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,

View File

@@ -259,33 +259,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
bool GPUVMEnable,
double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
double LineTime,
double VRatio,
double VRatioChroma,
double Tno_bw,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
unsigned int dpte_row_height_chroma,
unsigned int meta_row_height_chroma,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe);
double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -319,64 +299,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool SynchronizedVBlank,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double SRExitZ8Time,
double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported);
double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -2959,33 +2903,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
v->GPUVMMaxPageTableLevels,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMEnable,
v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
v->BandwidthAvailableForImmediateFlip,
v->TotImmediateFlipBytes,
v->SourcePixelFormat[k],
v->HTotal[k] / v->PixelClock[k],
v->VRatio[k],
v->VRatioChroma[k],
v->Tno_bw[k],
v->DCCEnable[k],
v->dpte_row_height[k],
v->meta_row_height[k],
v->dpte_row_height_chroma[k],
v->meta_row_height_chroma[k],
&v->DestinationLinesToRequestVMInImmediateFlip[k],
&v->DestinationLinesToRequestRowInImmediateFlip[k],
&v->final_flip_bw[k],
&v->ImmediateFlipSupportedForPipe[k]);
v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3072,64 +2996,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
v->SynchronizedVBlank,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLK,
v->DRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->SRExitZ8Time,
v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
&v->Z8StutterEnterPlusExitWatermark,
&v->MinActiveDRAMClockChangeLatencySupported);
&v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3741,61 +3629,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
unsigned int GPUVMMaxPageTableLevels,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
bool GPUVMEnable,
double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
double DPTEBytesPerRow,
double BandwidthAvailableForImmediateFlip,
unsigned int TotImmediateFlipBytes,
enum source_format_class SourcePixelFormat,
double LineTime,
double VRatio,
double VRatioChroma,
double Tno_bw,
bool DCCEnable,
unsigned int dpte_row_height,
unsigned int meta_row_height,
unsigned int dpte_row_height_chroma,
unsigned int meta_row_height_chroma,
double *DestinationLinesToRequestVMInImmediateFlip,
double *DestinationLinesToRequestRowInImmediateFlip,
double *final_flip_bw,
bool *ImmediateFlipSupportedForPipe)
double DPTEBytesPerRow)
{
struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
if (GPUVMEnable == true || DCCEnable == true) {
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
if (GPUVMEnable == true) {
if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
*DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
if ((GPUVMEnable == true || DCCEnable == true)) {
v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3804,54 +3674,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
if (GPUVMEnable == true) {
*final_flip_bw = dml_max(
PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
} else if ((GPUVMEnable == true || DCCEnable == true)) {
*final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
if (v->GPUVMEnable == true) {
v->final_flip_bw[k] = dml_max(
PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
} else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
*final_flip_bw = 0;
v->final_flip_bw[k] = 0;
}
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
dpte_row_height * LineTime / VRatio,
meta_row_height * LineTime / VRatio,
dpte_row_height_chroma * LineTime / VRatioChroma,
meta_row_height_chroma * LineTime / VRatioChroma);
v->dpte_row_height[k] * LineTime / v->VRatio[k],
v->meta_row_height[k] * LineTime / v->VRatio[k],
v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dpte_row_height * LineTime / VRatio;
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = meta_row_height * LineTime / VRatio;
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
*ImmediateFlipSupportedForPipe = false;
v->ImmediateFlipSupportedForPipe[k] = false;
} else {
*ImmediateFlipSupportedForPipe = true;
v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -5477,33 +5347,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
v->GPUVMMaxPageTableLevels,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->GPUVMEnable,
v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
v->DPTEBytesPerRow[i][j][k],
v->BandwidthAvailableForImmediateFlip,
v->TotImmediateFlipBytes,
v->SourcePixelFormat[k],
v->HTotal[k] / v->PixelClock[k],
v->VRatio[k],
v->VRatioChroma[k],
v->Tno_bw[k],
v->DCCEnable[k],
v->dpte_row_height[k],
v->meta_row_height[k],
v->dpte_row_height_chroma[k],
v->meta_row_height_chroma[k],
&v->DestinationLinesToRequestVMInImmediateFlip[k],
&v->DestinationLinesToRequestRowInImmediateFlip[k],
&v->final_flip_bw[k],
&v->ImmediateFlipSupportedForPipe[k]);
v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5561,64 +5411,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
v->SynchronizedVBlank,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLKPerState[i],
v->DRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->SRExitZ8Time,
v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&dummy,
&dummy,
&dummy,
&dummy,
&v->MinActiveDRAMClockChangeLatencySupported);
&dummy);
}
}
@@ -5743,64 +5557,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool SynchronizedVBlank,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double SRExitZ8Time,
double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
double *Z8StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported)
double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5820,103 +5598,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
*UrgentWatermark = UrgentLatency + ExtraLatency;
v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
*DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k] == true) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
*WritebackUrgentWatermark = WritebackLatency;
v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
*WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
+ DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
/ (HTotal[k] / PixelClock[k]);
+ DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
/ (v->HTotal[k] / v->PixelClock[k]);
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
(double) MaxLineBufferLines,
dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
(double) v->MaxLineBufferLines,
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
(double) MaxLineBufferLines,
dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
(double) v->MaxLineBufferLines,
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
if (WritebackEnable[k] == true) {
WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
/ (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64) {
if (v->WritebackEnable[k] == true) {
WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
/ (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5926,14 +5704,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
if (BlendingAndTiming[k] == k) {
if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5941,11 +5719,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
*MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5953,25 +5731,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (BlendingAndTiming[k] == k) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
} else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
*StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
*Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
*Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
*Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
*Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);

View File

@@ -1597,6 +1597,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1625,6 +1626,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
} else
hw_x = coordinates_x[i].x;
if (dc_fixpt_le(one, hw_x))
hw_x = one;
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)

View File

@@ -358,6 +358,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
false;
/*
* Disable BACO entry/exit completely on below SKUs to
* avoid hardware intermittent failures.
*/
if (((adev->pdev->device == 0x73A1) &&
(adev->pdev->revision == 0x00)) ||
((adev->pdev->device == 0x73BF) &&
(adev->pdev->revision == 0xCF)))
smu_baco->platform_support = false;
}
}

View File

@@ -529,15 +529,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
gma_crtc->page_flip_event = NULL;
drm_crtc_vblank_put(crtc);
spin_lock_irqsave(&dev->event_lock, flags);
if (gma_crtc->page_flip_event) {
gma_crtc->page_flip_event = NULL;
drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}

View File

@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && ARM64
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM

View File

@@ -673,6 +673,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
/*
* mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
* mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
* which needs irq for vblank, and mtk_dsi_stop() will disable irq.
* mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
* after dsi is fully set.
*/
mtk_dsi_stop(dsi);
mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -723,17 +733,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
/*
* mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
* mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
* which needs irq for vblank, and mtk_dsi_stop() will disable irq.
* mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
* after dsi is fully set.
*/
mtk_dsi_stop(dsi);
mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
dsi->enabled = false;
}
@@ -796,10 +795,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
.atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};

View File

@@ -2579,7 +2579,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.enable = 200,
.disable = 20,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};

View File

@@ -277,8 +277,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
static enum drm_mode_status
cdn_dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;

View File

@@ -2340,7 +2340,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
resource_size_t range_min, range_max, start;
resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
@@ -2375,6 +2375,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
end = start + size - 1;
/* Skip the whole fb_mmio region if not fb_overlap_ok */
if (!fb_overlap_ok && fb_mmio &&
(((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
((end >= fb_mmio->start) && (end <= fb_mmio->end))))
continue;
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)

View File

@@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
if (ret == 0) {
if (ret >= 0) {
/* setup chip registers to defaults */
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);

View File

@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -63,13 +64,14 @@
*/
#define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
/* Reference clock for Bluefield - 156 MHz. */
#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
/* Constant used to determine the PLL frequency. */
#define MLNXBF_I2C_COREPLL_CONST 16384
#define MLNXBF_I2C_COREPLL_CONST 16384ULL
#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
/* PLL registers. */
#define MLXBF_I2C_CORE_PLL_REG0 0x0
#define MLXBF_I2C_CORE_PLL_REG1 0x4
#define MLXBF_I2C_CORE_PLL_REG2 0x8
@@ -181,22 +183,15 @@
#define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
/* Core PLL TYU configuration. */
#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
/* Core PLL YU configuration. */
#define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
#define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
/* Core PLL frequency. */
static u64 mlxbf_i2c_corepll_frequency;
@@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock;
#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
/*
* Function to poll a set of bits at a specific address; it checks whether
* the bits are equal to zero when eq_zero is set to 'true', and not equal
@@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
/* Clear status bits. */
writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
/* Set the cause data. */
writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
/* Zero PEC byte. */
writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
/* Zero byte count. */
@@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (flags & MLXBF_I2C_F_WRITE) {
write_en = 1;
write_len += operation->length;
if (data_idx + operation->length >
MLXBF_I2C_MASTER_DATA_DESC_SIZE)
return -ENOBUFS;
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
@@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
return 0;
}
static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
{
u64 core_frequency, pad_frequency;
u64 core_frequency;
u8 core_od, core_r;
u32 corepll_val;
u16 core_f;
pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
/* Get Core PLL configuration bits. */
core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
/*
* Compute PLL output frequency as follow:
@@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
core_frequency = pad_frequency * (++core_f);
core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
core_frequency /= (++core_r) * (++core_od);
return core_frequency;
}
static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
{
u32 corepll_reg1_val, corepll_reg2_val;
u64 corepll_frequency, pad_frequency;
u64 corepll_frequency;
u8 core_od, core_r;
u32 core_f;
pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
/* Get Core PLL configuration bits */
core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
/*
* Compute PLL output frequency as follow:
@@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
corepll_frequency /= (++core_r) * (++core_od);
return corepll_frequency;
@@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
[1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
[2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
},
.calculate_freq = mlxbf_calculate_freq_from_tyu
.calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
},
[MLXBF_I2C_CHIP_TYPE_2] = {
.type = MLXBF_I2C_CHIP_TYPE_2,
.shared_res = {
[0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
},
.calculate_freq = mlxbf_calculate_freq_from_yu
.calculate_freq = mlxbf_i2c_calculate_freq_from_yu
}
};

View File

@@ -539,7 +539,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{
unsigned long fl_sagaw, sl_sagaw;
fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */

View File

@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
return -ENODEV;
switch (fc_usb->udev->speed) {

View File

@@ -87,8 +87,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
MULTICAST_LACPDU_ADDR;
const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);

View File

@@ -862,12 +862,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
dev_mc_del(slave_dev, lacpdu_multicast);
}
if (BOND_MODE(bond) == BOND_MODE_8023AD)
dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -887,7 +883,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
bond_hw_addr_flush(bond->dev, old_active->dev);
if (bond->dev->flags & IFF_UP)
bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -898,10 +895,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
netif_addr_lock_bh(bond->dev);
dev_uc_sync(new_active->dev, bond->dev);
dev_mc_sync(new_active->dev, bond->dev);
netif_addr_unlock_bh(bond->dev);
if (bond->dev->flags & IFF_UP) {
netif_addr_lock_bh(bond->dev);
dev_uc_sync(new_active->dev, bond->dev);
dev_mc_sync(new_active->dev, bond->dev);
netif_addr_unlock_bh(bond->dev);
}
}
}
@@ -2134,16 +2133,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
netif_addr_lock_bh(bond_dev);
dev_mc_sync_multiple(slave_dev, bond_dev);
dev_uc_sync_multiple(slave_dev, bond_dev);
netif_addr_unlock_bh(bond_dev);
if (bond_dev->flags & IFF_UP) {
netif_addr_lock_bh(bond_dev);
dev_mc_sync_multiple(slave_dev, bond_dev);
dev_uc_sync_multiple(slave_dev, bond_dev);
netif_addr_unlock_bh(bond_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
dev_mc_add(slave_dev, lacpdu_multicast);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2415,7 +2412,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
bond_hw_addr_flush(bond_dev, slave_dev);
if (old_flags & IFF_UP)
bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -3932,6 +3930,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
bond->rr_tx_counter = alloc_percpu(u32);
if (!bond->rr_tx_counter)
return -ENOMEM;
}
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -3969,6 +3973,9 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
bond_for_each_slave(bond, slave, iter)
dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -3980,6 +3987,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -3987,6 +3995,19 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
if (bond_uses_primary(bond)) {
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_hw_addr_flush(bond_dev, slave->dev);
rcu_read_unlock();
} else {
struct list_head *iter;
bond_for_each_slave(bond, slave, iter)
bond_hw_addr_flush(bond_dev, slave->dev);
}
return 0;
}
@@ -5892,15 +5913,6 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
bond->rr_tx_counter = alloc_percpu(u32);
if (!bond->rr_tx_counter) {
destroy_workqueue(bond->wq);
bond->wq = NULL;
return -ENOMEM;
}
}
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);

View File

@@ -1036,11 +1036,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
if (unlikely(drop)) {
skb = ERR_PTR(-ENOBUFS);
goto mark_as_read;
}
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
@@ -1069,6 +1064,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
if (unlikely(drop)) {
skb = ERR_PTR(-ENOBUFS);
goto mark_as_read;
}
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else

View File

@@ -680,6 +680,7 @@ static int gs_can_open(struct net_device *netdev)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
/* finally start device */
dev->can.state = CAN_STATE_ERROR_ACTIVE;
dm->mode = cpu_to_le32(GS_CAN_MODE_START);
dm->flags = cpu_to_le32(flags);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
@@ -696,13 +697,12 @@ static int gs_can_open(struct net_device *netdev)
if (rc < 0) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
kfree(dm);
dev->can.state = CAN_STATE_STOPPED;
return rc;
}
kfree(dm);
dev->can.state = CAN_STATE_ERROR_ACTIVE;
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);

View File

@@ -709,7 +709,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -718,6 +717,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
tx_bytes += skb->len;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
@@ -738,8 +739,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
/* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
compl_deferred = true;
skb = NULL;
else
atomic_inc(&bp->ptp_cfg->tx_avail);
}
@@ -748,9 +750,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
tx_bytes += skb->len;
if (!compl_deferred)
dev_kfree_skb_any(skb);
dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);

View File

@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o

View File

@@ -2142,7 +2142,7 @@ int enetc_close(struct net_device *ndev)
return 0;
}
static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
@@ -2196,25 +2196,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return enetc_setup_tc_mqprio(ndev, type_data);
case TC_SETUP_QDISC_TAPRIO:
return enetc_setup_tc_taprio(ndev, type_data);
case TC_SETUP_QDISC_CBS:
return enetc_setup_tc_cbs(ndev, type_data);
case TC_SETUP_QDISC_ETF:
return enetc_setup_tc_txtime(ndev, type_data);
case TC_SETUP_BLOCK:
return enetc_setup_tc_psfp(ndev, type_data);
default:
return -EOPNOTSUPP;
}
}
static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -2307,29 +2288,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
static int enetc_set_psfp(struct net_device *ndev, int en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err;
if (en) {
err = enetc_psfp_enable(priv);
if (err)
return err;
priv->active_offloads |= ENETC_F_QCI;
return 0;
}
err = enetc_psfp_disable(priv);
if (err)
return err;
priv->active_offloads &= ~ENETC_F_QCI;
return 0;
}
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2348,11 +2306,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
}
int enetc_set_features(struct net_device *ndev,
netdev_features_t features)
void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2364,11 +2320,6 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
if (changed & NETIF_F_HW_TC)
err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK

View File

@@ -385,11 +385,9 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
int enetc_set_features(struct net_device *ndev,
netdev_features_t features);
void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
@@ -421,6 +419,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
@@ -496,4 +495,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
static inline int enetc_set_psfp(struct net_device *ndev, bool en)
{
return 0;
}
#endif

View File

@@ -708,6 +708,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err;
if (changed & NETIF_F_HW_TC) {
err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
if (err)
return err;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -721,7 +728,28 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
return enetc_set_features(ndev, features);
enetc_set_features(ndev, features);
return 0;
}
static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return enetc_setup_tc_mqprio(ndev, type_data);
case TC_SETUP_QDISC_TAPRIO:
return enetc_setup_tc_taprio(ndev, type_data);
case TC_SETUP_QDISC_CBS:
return enetc_setup_tc_cbs(ndev, type_data);
case TC_SETUP_QDISC_ETF:
return enetc_setup_tc_txtime(ndev, type_data);
case TC_SETUP_BLOCK:
return enetc_setup_tc_psfp(ndev, type_data);
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -738,7 +766,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
.ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
};

View File

@@ -1529,6 +1529,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
int enetc_set_psfp(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err;
if (en) {
err = enetc_psfp_enable(priv);
if (err)
return err;
priv->active_offloads |= ENETC_F_QCI;
return 0;
}
err = enetc_psfp_disable(priv);
if (err)
return err;
priv->active_offloads &= ~ENETC_F_QCI;
return 0;
}
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)

View File

@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
return enetc_set_features(ndev, features);
enetc_set_features(ndev, features);
return 0;
}
static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
return enetc_setup_tc_mqprio(ndev, type_data);
default:
return -EOPNOTSUPP;
}
}
/* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
.ndo_setup_tc = enetc_vf_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,

View File

@@ -5766,6 +5766,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
}
/**
* i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
* @vsi: Pointer to vsi structure
* @max_tx_rate: max TX rate in bytes to be converted into Mbits
*
* Helper function to convert units before send to set BW limit
**/
static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
{
if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
dev_warn(&vsi->back->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
max_tx_rate = I40E_BW_CREDIT_DIVISOR;
} else {
do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
}
return max_tx_rate;
}
/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
@@ -5788,10 +5808,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
if (max_tx_rate && max_tx_rate < 50) {
if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
max_tx_rate = 50;
max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -8082,9 +8102,9 @@ config_tc:
if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
vsi->mqprio_qopt.max_rate[0]);
do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -10829,10 +10849,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;

View File

@@ -2038,6 +2038,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
}
/**
* i40e_vc_get_max_frame_size
* @vf: pointer to the VF
*
* Max frame size is determined based on the current port's max frame size and
* whether a port VLAN is configured on this VF. The VF is not aware whether
* it's in a port VLAN so the PF needs to account for this in max frame size
* checks and sending the max frame size to the VF.
**/
static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
{
u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
if (vf->port_vlan_id)
max_frame_size -= VLAN_HLEN;
return max_frame_size;
}
/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;

View File

@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
/* underlying hardware might not allow access and/or always return
* 0 for the head/tail registers so just use the cached values
*/
head = ring->next_to_clean;
tail = readl(ring->tail);
tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -1355,7 +1358,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
if (!rx_buffer)
if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1513,7 +1516,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
if (rx_buffer)
if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}

View File

@@ -244,11 +244,14 @@ out:
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi;
int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
int i, max_frame = IAVF_MAX_RXBUFFER;
struct virtchnl_queue_pair_info *vqpi;
size_t len;
if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
max_frame = IAVF_MAX_RXBUFFER;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",

View File

@@ -2255,8 +2255,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
ice_unplug_aux_dev(pf);
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);

View File

@@ -178,6 +178,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
/* Only return ad bits of the gw register */
ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
/* The MDIO lock is set on read. To release it, clear gw register */
writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
return ret;
}
@@ -201,6 +204,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
/* The MDIO lock is set on read. To release it, clear gw register */
writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
return ret;
}

View File

@@ -368,6 +368,11 @@ static void mana_gd_process_eq_events(void *arg)
break;
}
/* Per GDMA spec, rmb is necessary after checking owner_bits, before
* reading eqe.
*/
rmb();
mana_gd_process_eqe(eq);
eq->head++;
@@ -1096,6 +1101,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
if (WARN_ON_ONCE(owner_bits != new_bits))
return -1;
/* Per GDMA spec, rmb is necessary after checking owner_bits, before
* reading completion info
*/
rmb();
comp->wq_num = cqe->cqe_info.wq_num;
comp->is_sq = cqe->cqe_info.is_sq;
memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);

View File

@@ -1115,6 +1115,8 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
/* Indicate that the MAC is responsible for managing PHY PM */
phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;

View File

@@ -2033,6 +2033,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
}
}
/* Indicate that the MAC is responsible for managing PHY PM */
phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;

View File

@@ -329,7 +329,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->tx_channel_offset = 1;
efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;

View File

@@ -548,7 +548,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
efx_tx_send_pending(tx_queue->channel);
efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}

View File

@@ -2039,9 +2039,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),

View File

@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);

View File

@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
.ei_array = ipa_mem_array_ei,
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
.ei_array = ipa_mem_array_ei,
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
.ei_array = ipa_mem_array_ei,
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
.ei_array = ipa_mem_array_ei,
.ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,

View File

@@ -86,9 +86,11 @@ enum ipa_platform_type {
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
/* This defines the start and end offset of a range of memory. Both
* fields are offsets relative to the start of IPA shared memory.
* The end value is the last addressable byte *within* the range.
/* This defines the start and end offset of a range of memory. The start
* value is a byte offset relative to the start of IPA shared memory. The
* end value is the last addressable unit *within* the range. Typically
* the end value is in units of bytes, however it can also be a maximum
* array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
/* Routing table information. These define the location and size of
* non-hashable IPv4 and IPv6 filter tables. The start values are
* offsets relative to the start of IPA shared memory.
/* Routing table information. These define the location and maximum
* *index* (not byte) for the modem portion of non-hashable IPv4 and
* IPv6 routing tables. The start values are byte offsets relative
* to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
struct ipa_mem_array v4_route_tbl_info;
struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
struct ipa_mem_array v6_route_tbl_info;
struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
* offsets relative to the start of IPA shared memory.
* byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
/* Routing table information. These define the location and size
* of hashable IPv4 and IPv6 filter tables. The start values are
* offsets relative to the start of IPA shared memory.
/* Routing table information. These define the location and maximum
* *index* (not byte) for the modem portion of hashable IPv4 and IPv6
* routing tables (if supported by hardware). The start values are
* byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
struct ipa_mem_array v4_hash_route_tbl_info;
struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
struct ipa_mem_array v6_hash_route_tbl_info;
struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
* of hashable IPv4 and IPv6 filter tables. The start values are
* offsets relative to the start of IPA shared memory.
* of hashable IPv4 and IPv6 filter tables (if supported by hardware).
* The start values are byte offsets relative to the start of IPA
* shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;

View File

@@ -108,8 +108,6 @@
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
#define IPA_ROUTE_MODEM_COUNT 8
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)

View File

@@ -13,6 +13,9 @@ struct ipa;
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
/* The number of route table entries allotted to the modem */
#define IPA_ROUTE_MODEM_COUNT 8
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15

View File

@@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb)
{
struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
struct ethhdr *ethh = eth_hdr(skb);
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -590,7 +591,7 @@ out:
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ethhdr *eth = eth_hdr(skb);
struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;

View File

@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
return 0;
unregister:
of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}

View File

@@ -90,6 +90,9 @@
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
#define VEND1_GLOBAL_GEN_STAT2 0xc831
#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
@@ -124,6 +127,12 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
/* Sleep and timeout for checking if the Processor-Intensive
* MDIO operation is finished
*/
#define AQR107_OP_IN_PROG_SLEEP 1000
#define AQR107_OP_IN_PROG_TIMEOUT 100000
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -598,16 +607,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
{
int val, err;
/* The datasheet notes to wait at least 1ms after issuing a
* processor intensive operation before checking.
* We cannot use the 'sleep_before_read' parameter of read_poll_timeout
* because that just determines the maximum time slept, not the minimum.
*/
usleep_range(1000, 5000);
err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
VEND1_GLOBAL_GEN_STAT2, val,
!(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
AQR107_OP_IN_PROG_SLEEP,
AQR107_OP_IN_PROG_TIMEOUT, false);
if (err) {
phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
return err;
}
return 0;
}
static int aqr107_suspend(struct phy_device *phydev)
{
return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
MDIO_CTRL1_LPOWER);
int err;
err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
MDIO_CTRL1_LPOWER);
if (err)
return err;
return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_resume(struct phy_device *phydev)
{
return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
MDIO_CTRL1_LPOWER);
int err;
err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
MDIO_CTRL1_LPOWER);
if (err)
return err;
return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_probe(struct phy_device *phydev)

View File

@@ -1270,10 +1270,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
netif_addr_unlock_bh(dev);
if (dev->flags & IFF_UP) {
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
netif_addr_unlock_bh(dev);
}
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1344,8 +1346,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
dev_uc_unsync(port_dev, dev);
dev_mc_unsync(port_dev, dev);
if (dev->flags & IFF_UP) {
dev_uc_unsync(port_dev, dev);
dev_mc_unsync(port_dev, dev);
}
dev_close(port_dev);
team_port_leave(team, port);
@@ -1695,6 +1699,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
list_for_each_entry(port, &team->port_list, list) {
dev_uc_unsync(port->dev, dev);
dev_mc_unsync(port->dev, dev);
}
return 0;
}

View File

@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
struct endpoint endpoint = { { { 0 } } };
if ((len == sizeof(struct sockaddr_in) &&
addr->sa_family == AF_INET) ||
(len == sizeof(struct sockaddr_in6) &&
addr->sa_family == AF_INET6)) {
struct endpoint endpoint = { { { 0 } } };
memcpy(&endpoint.addr, addr, len);
if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
endpoint.addr4 = *(struct sockaddr_in *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
} else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}

View File

@@ -6,29 +6,28 @@
#ifdef DEBUG
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
static const struct {
bool result;
u64 nsec_to_sleep_before;
unsigned int msec_to_sleep_before;
} expected_results[] __initconst = {
[0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
[PACKETS_BURSTABLE] = { false, 0 },
[PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 2] = { false, 0 },
[PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 4] = { true, 0 },
[PACKETS_BURSTABLE + 5] = { false, 0 }
};
static __init unsigned int maximum_jiffies_at_index(int index)
{
u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
int i;
for (i = 0; i <= index; ++i)
total_nsecs += expected_results[i].nsec_to_sleep_before;
return nsecs_to_jiffies(total_nsecs);
total_msecs += expected_results[i].msec_to_sleep_before;
return msecs_to_jiffies(total_msecs);
}
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
if (expected_results[i].nsec_to_sleep_before) {
ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
ns_to_ktime(expected_results[i].nsec_to_sleep_before));
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
}
if (expected_results[i].msec_to_sleep_before)
msleep(expected_results[i].msec_to_sleep_before);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init())
goto out;
@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
++test;
#endif
for (trials = TRIALS_BEFORE_GIVING_UP;;) {
for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);

View File

@@ -1038,7 +1038,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
offset %= 32;
val = mt76_rr(dev, addr);
val >>= (tid % 32);
val >>= offset;
if (offset > 20) {
addr += 4;

View File

@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_eckd_private *alias_priv, *private = base_device->private;
struct alias_pav_group *group = private->pavgroup;
struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
struct alias_pav_group *group;
unsigned long flags;
if (!group || !lcu)
if (!lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
}
spin_lock_irqsave(&lcu->lock, flags);
group = private->pavgroup;
if (!group) {
spin_unlock_irqrestore(&lcu->lock, flags);
return NULL;
}
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {

View File

@@ -3005,7 +3005,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
if (ioc->is_mcpu_endpoint ||
sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
dma_get_required_mask(&pdev->dev) <= 32)
dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
ioc->dma_mask = 32;
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)

View File

@@ -2166,8 +2166,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
le32_to_cpu(abts->exchange_addr_to_abort));
if (!abort_cmd)
if (!abort_cmd) {
mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
return -EIO;
}
mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
if (abort_cmd->qpair) {

View File

@@ -30,7 +30,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Realtek demoboard ===*/
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xf179)}, /* 8188FU */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */

View File

@@ -2522,6 +2522,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
tb->cm_ops = &icm_icl_ops;
break;
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;

View File

@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware.
*/
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e

View File

@@ -2726,14 +2726,15 @@ static int lpuart_probe(struct platform_device *pdev)
lpuart_reg.cons = LPUART_CONSOLE;
handler = lpuart_int;
}
ret = uart_add_one_port(&lpuart_reg, &sport->port);
if (ret)
goto failed_attach_port;
ret = lpuart_global_reset(sport);
if (ret)
goto failed_reset;
ret = uart_add_one_port(&lpuart_reg, &sport->port);
if (ret)
goto failed_attach_port;
ret = uart_get_rs485_mode(&sport->port);
if (ret)
goto failed_get_rs485;
@@ -2756,9 +2757,9 @@ static int lpuart_probe(struct platform_device *pdev)
failed_irq_request:
failed_get_rs485:
failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
failed_reset:
lpuart_disable_clks(sport);
return ret;
}

View File

@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
static void tegra_uart_stop_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned int count;
@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
}

View File

@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
uart_xmit_advance(port, count);
}
uart_write_wakeup(port);

View File

@@ -6050,7 +6050,7 @@ re_enumerate_no_bos:
*
* Return: The same as for usb_reset_and_verify_device().
* However, if a reset is already in progress (for instance, if a
* driver doesn't have pre_ or post_reset() callbacks, and while
* driver doesn't have pre_reset() or post_reset() callbacks, and while
* being unbound or re-bound during the ongoing reset its disconnect()
* or probe() routine tries to perform a second, nested reset), the
* routine returns -EINPROGRESS.

View File

@@ -1612,12 +1612,6 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
if (!dwc->sysdev_is_parent) {
ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
}
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
@@ -1654,6 +1648,13 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
if (!dwc->sysdev_is_parent &&
DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
if (ret)
goto disable_clks;
}
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);

View File

@@ -2445,13 +2445,41 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
{
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
dwc->connected = false;
/*
* In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
* Section 4.1.8 Table 4-7, it states that for a device-initiated
* disconnect, the SW needs to ensure that it sends "a DEPENDXFER
* command for any active transfers" before clearing the RunStop
* bit.
*/
dwc3_stop_active_transfers(dwc);
__dwc3_gadget_stop(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
/*
* Note: if the GEVNTCOUNT indicates events in the event buffer, the
* driver needs to acknowledge them before the controller can halt.
* Simply let the interrupt handler acknowledges and handle the
* remaining event generated by the controller while polling for
* DSTS.DEVCTLHLT.
*/
return dwc3_gadget_run_stop(dwc, false, false);
}
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
int ret;
is_on = !!is_on;
dwc->softconnect = is_on;
/*
* Per databook, when we want to stop the gadget, if a control transfer
@@ -2488,42 +2516,13 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
/*
* Synchronize and disable any further event handling while controller
* is being enabled/disabled.
*/
disable_irq(dwc->irq_gadget);
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->pullups_connected == is_on) {
pm_runtime_put(dwc->dev);
return 0;
}
if (!is_on) {
u32 count;
dwc->connected = false;
/*
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
* Section 4.1.8 Table 4-7, it states that for a device-initiated
* disconnect, the SW needs to ensure that it sends "a DEPENDXFER
* command for any active transfers" before clearing the RunStop
* bit.
*/
dwc3_stop_active_transfers(dwc);
__dwc3_gadget_stop(dwc);
/*
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
* Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
* "software needs to acknowledge the events that are generated
* (by writing to GEVNTCOUNTn) while it is waiting for this bit
* to be set to '1'."
*/
count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
count &= DWC3_GEVNTCOUNT_MASK;
if (count > 0) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
ret = dwc3_gadget_soft_disconnect(dwc);
} else {
/*
* In the Synopsys DWC_usb31 1.90a programming guide section
@@ -2531,18 +2530,13 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
* device-initiated disconnect requires a core soft reset
* (DCTL.CSftRst) before enabling the run/stop bit.
*/
spin_unlock_irqrestore(&dwc->lock, flags);
dwc3_core_soft_reset(dwc);
spin_lock_irqsave(&dwc->lock, flags);
dwc3_event_buffers_setup(dwc);
__dwc3_gadget_start(dwc);
ret = dwc3_gadget_run_stop(dwc, true, false);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
enable_irq(dwc->irq_gadget);
pm_runtime_put(dwc->dev);
return ret;

View File

@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
.driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },

View File

@@ -4297,6 +4297,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
/*
* If we had UNFINISHED_DROPS we could still be processing them, so
* clear that bit and wake up relocation so it can stop.
* We must do this before stopping the block group reclaim task, because
* at btrfs_relocate_block_group() we wait for this bit, and after the
* wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
* have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
* return 1.
*/
btrfs_wake_unfinished_drop(fs_info);
/*
* We may have the reclaim task running and relocating a data block group,
* in which case it may create delayed iputs. So stop it before we park
@@ -4315,12 +4326,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
*/
kthread_park(fs_info->cleaner_kthread);
/*
* If we had UNFINISHED_DROPS we could still be processing them, so
* clear that bit and wake up relocation so it can stop.
*/
btrfs_wake_unfinished_drop(fs_info);
/* wait for the qgroup rescan worker to stop */
btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -4343,6 +4348,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
/* clear out the rbtree of defraggable inodes */
btrfs_cleanup_defrag_inodes(fs_info);
/*
* After we parked the cleaner kthread, ordered extents may have
* completed and created new delayed iputs. If one of the async reclaim
* tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
* can hang forever trying to stop it, because if a delayed iput is
* added after it ran btrfs_run_delayed_iputs() and before it called
* btrfs_wait_on_delayed_iputs(), it will hang forever since there is
* no one else to run iputs.
*
* So wait for all ongoing ordered extents to complete and then run
* delayed iputs. This works because once we reach this point no one
* can either create new ordered extents nor create delayed iputs
* through some other means.
*
* Also note that btrfs_wait_ordered_roots() is not safe here, because
* it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
* but the delayed iput for the respective inode is made only when doing
* the final btrfs_put_ordered_extent() (which must happen at
* btrfs_finish_ordered_io() when we are unmounting).
*/
btrfs_flush_workqueue(fs_info->endio_write_workers);
/* Ordered extents for free space inodes. */
btrfs_flush_workqueue(fs_info->endio_freespace_worker);
btrfs_run_delayed_iputs(fs_info);
cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
cancel_work_sync(&fs_info->preempt_reclaim_work);

View File

@@ -1279,6 +1279,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
loff_t done = 0;
int ret;
if (!iomi.len)
return 0;
if (iov_iter_rw(iter) == WRITE) {
lockdep_assert_held_write(&iomi.inode->i_rwsem);
iomi.flags |= IOMAP_WRITE;

View File

@@ -460,6 +460,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid eh_entries";
goto corrupted;
}
if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
error_msg = "eh_entries is 0 but eh_depth is > 0";
goto corrupted;
}
if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
error_msg = "invalid extent entries";
goto corrupted;

View File

@@ -510,7 +510,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
goto fallback;
}
max_dirs = ndirs / ngroups + inodes_per_group / 16;
max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
min_inodes = avefreei - inodes_per_group*flex_size / 4;
if (min_inodes < 1)
min_inodes = 1;

View File

@@ -1052,8 +1052,10 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
{
*new_cr = ac->ac_criteria;
if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
*group = next_linear_group(ac, *group, ngroups);
return;
}
if (*new_cr == 0) {
ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
@@ -1078,23 +1080,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
struct ext4_sb_info *sbi = EXT4_SB(sb);
int i;
if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
if (grp->bb_counters[i] > 0)
break;
/* No need to move between order lists? */
if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
i == grp->bb_largest_free_order) {
grp->bb_largest_free_order = i;
return;
}
if (grp->bb_largest_free_order >= 0) {
write_lock(&sbi->s_mb_largest_free_orders_locks[
grp->bb_largest_free_order]);
list_del_init(&grp->bb_largest_free_order_node);
write_unlock(&sbi->s_mb_largest_free_orders_locks[
grp->bb_largest_free_order]);
}
grp->bb_largest_free_order = -1; /* uninit */
for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
if (grp->bb_counters[i] > 0) {
grp->bb_largest_free_order = i;
break;
}
}
if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
grp->bb_largest_free_order >= 0 && grp->bb_free) {
grp->bb_largest_free_order = i;
if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
write_lock(&sbi->s_mb_largest_free_orders_locks[
grp->bb_largest_free_order]);
list_add_tail(&grp->bb_largest_free_order_node,
@@ -2633,7 +2637,7 @@ static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
ext4_group_t prefetch_grp = 0, ngroups, group, i;
int cr = -1;
int cr = -1, new_cr;
int err = 0, first_err = 0;
unsigned int nr = 0, prefetch_ios = 0;
struct ext4_sb_info *sbi;
@@ -2708,13 +2712,11 @@ repeat:
ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
prefetch_grp = group;
for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
i++) {
int ret = 0, new_cr;
for (i = 0, new_cr = cr; i < ngroups; i++,
ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
int ret = 0;
cond_resched();
ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
if (new_cr != cr) {
cr = new_cr;
goto repeat;
@@ -5167,6 +5169,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
int bsbits = ac->ac_sb->s_blocksize_bits;
loff_t size, isize;
bool inode_pa_eligible, group_pa_eligible;
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
return;
@@ -5174,25 +5177,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
return;
group_pa_eligible = sbi->s_mb_group_prealloc > 0;
inode_pa_eligible = true;
size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;
/* No point in using inode preallocation for closed files */
if ((size == isize) && !ext4_fs_is_busy(sbi) &&
!inode_is_open_for_write(ac->ac_inode)) {
ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
return;
}
!inode_is_open_for_write(ac->ac_inode))
inode_pa_eligible = false;
if (sbi->s_mb_group_prealloc <= 0) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
return;
}
/* don't use group allocation for large files */
size = max(size, isize);
if (size > sbi->s_mb_stream_request) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
/* Don't use group allocation for large files */
if (size > sbi->s_mb_stream_request)
group_pa_eligible = false;
if (!group_pa_eligible) {
if (inode_pa_eligible)
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
else
ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
return;
}
@@ -5539,6 +5544,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
ext4_fsblk_t block = 0;
unsigned int inquota = 0;
unsigned int reserv_clstrs = 0;
int retries = 0;
u64 seq;
might_sleep();
@@ -5641,7 +5647,8 @@ repeat:
ar->len = ac->ac_b_ex.fe_len;
}
} else {
if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
if (++retries < 3 &&
ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
goto repeat;
/*
* If block allocation fails then the pa allocated above

View File

@@ -755,11 +755,13 @@ int nfs4_inode_return_delegation(struct inode *inode)
struct nfs_delegation *delegation;
delegation = nfs_start_delegation_return(nfsi);
/* Synchronous recall of any application leases */
break_lease(inode, O_WRONLY | O_RDWR);
nfs_wb_all(inode);
if (delegation != NULL)
if (delegation != NULL) {
/* Synchronous recall of any application leases */
break_lease(inode, O_WRONLY | O_RDWR);
if (S_ISREG(inode->i_mode))
nfs_wb_all(inode);
return nfs_end_delegation_return(inode, delegation, 1);
}
return 0;
}

View File

@@ -337,19 +337,36 @@ xfs_dinode_verify_fork(
int whichfork)
{
uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
mode_t mode = be16_to_cpu(dip->di_mode);
uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork);
uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork);
switch (XFS_DFORK_FORMAT(dip, whichfork)) {
case XFS_DINODE_FMT_LOCAL:
/*
* no local regular files yet
*/
if (whichfork == XFS_DATA_FORK) {
if (S_ISREG(be16_to_cpu(dip->di_mode)))
return __this_address;
if (be64_to_cpu(dip->di_size) >
XFS_DFORK_SIZE(dip, mp, whichfork))
/*
* For fork types that can contain local data, check that the fork
* format matches the size of local data contained within the fork.
*
* For all types, check that when the size says the should be in extent
* or btree format, the inode isn't claiming it is in local format.
*/
if (whichfork == XFS_DATA_FORK) {
if (S_ISDIR(mode) || S_ISLNK(mode)) {
if (be64_to_cpu(dip->di_size) <= fork_size &&
fork_format != XFS_DINODE_FMT_LOCAL)
return __this_address;
}
if (be64_to_cpu(dip->di_size) > fork_size &&
fork_format == XFS_DINODE_FMT_LOCAL)
return __this_address;
}
switch (fork_format) {
case XFS_DINODE_FMT_LOCAL:
/*
* No local regular files yet.
*/
if (S_ISREG(mode) && whichfork == XFS_DATA_FORK)
return __this_address;
if (di_nextents)
return __this_address;
break;

View File

@@ -2599,14 +2599,13 @@ xfs_ifree_cluster(
}
/*
* This is called to return an inode to the inode free list.
* The inode should already be truncated to 0 length and have
* no pages associated with it. This routine also assumes that
* the inode is already a part of the transaction.
* This is called to return an inode to the inode free list. The inode should
* already be truncated to 0 length and have no pages associated with it. This
* routine also assumes that the inode is already a part of the transaction.
*
* The on-disk copy of the inode will have been added to the list
* of unlinked inodes in the AGI. We need to remove the inode from
* that list atomically with respect to freeing it here.
* The on-disk copy of the inode will have been added to the list of unlinked
* inodes in the AGI. We need to remove the inode from that list atomically with
* respect to freeing it here.
*/
int
xfs_ifree(
@@ -2628,13 +2627,16 @@ xfs_ifree(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
/*
* Pull the on-disk inode from the AGI unlinked list.
* Free the inode first so that we guarantee that the AGI lock is going
* to be taken before we remove the inode from the unlinked list. This
* makes the AGI lock -> unlinked list modification order the same as
* used in O_TMPFILE creation.
*/
error = xfs_iunlink_remove(tp, pag, ip);
error = xfs_difree(tp, pag, ip->i_ino, &xic);
if (error)
goto out;
error = xfs_difree(tp, pag, ip->i_ino, &xic);
error = xfs_iunlink_remove(tp, pag, ip);
if (error)
goto out;

View File

@@ -549,10 +549,9 @@
*/
#ifdef CONFIG_CFI_CLANG
#define TEXT_CFI_JT \
. = ALIGN(PMD_SIZE); \
ALIGN_FUNCTION(); \
__cfi_jt_start = .; \
*(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
. = ALIGN(PMD_SIZE); \
__cfi_jt_end = .;
#else
#define TEXT_CFI_JT

View File

@@ -1057,9 +1057,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
* cover a worst-case of every other cpu being on one of two nodes for a
* very large NR_CPUS.
*
* Use PAGE_SIZE as a minimum for smaller configurations.
* Use PAGE_SIZE as a minimum for smaller configurations while avoiding
* unsigned comparison to -1.
*/
#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)

View File

@@ -307,6 +307,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
/**
* uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
* @up: uart_port structure describing the port
* @chars: number of characters sent
*
* This function advances the tail of circular xmit buffer by the number of
* @chars transmitted and handles accounting of transmitted bytes (into
* @up's icount.tx).
*/
static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
{
struct circ_buf *xmit = &up->state->xmit;
xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
up->icount.tx += chars;
}
struct module;
struct tty_driver;

View File

@@ -15,8 +15,6 @@
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
#define AD_LACP_SLOW 0
#define AD_LACP_FAST 1

View File

@@ -757,6 +757,9 @@ extern struct rtnl_link_ops bond_link_ops;
/* exported from bond_sysfs_slave.c */
extern const struct sysfs_ops slave_sysfs_ops;
/* exported from bond_3ad.c */
extern const u8 lacpdu_mcast_addr[];
static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
{
atomic_long_inc(&dev->tx_dropped);

View File

@@ -3090,10 +3090,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
if (WARN_ON(!work->func))
return false;
if (!from_cancel) {
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
}
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
if (start_flush_work(work, &barr, from_cancel)) {
wait_for_completion(&barr.done);

Some files were not shown because too many files have changed in this diff Show More