Merge bec4c2968f ("Merge tag 'ecryptfs-5.11-rc6-setxattr-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs") into android-mainline
Steps on the way to 5.11-rc6 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ia363f5845035c40bea3f034b258054827ed56701
This commit is contained in:
@@ -1807,12 +1807,24 @@ seg6_flowlabel - INTEGER
|
||||
``conf/default/*``:
|
||||
Change the interface-specific default settings.
|
||||
|
||||
These settings would be used during creating new interfaces.
|
||||
|
||||
|
||||
``conf/all/*``:
|
||||
Change all the interface-specific settings.
|
||||
|
||||
[XXX: Other special features than forwarding?]
|
||||
|
||||
conf/all/disable_ipv6 - BOOLEAN
|
||||
Changing this value is same as changing ``conf/default/disable_ipv6``
|
||||
setting and also all per-interface ``disable_ipv6`` settings to the same
|
||||
value.
|
||||
|
||||
Reading this value does not have any particular meaning. It does not say
|
||||
whether IPv6 support is enabled or disabled. Returned value can be 1
|
||||
also in the case when some interface has ``disable_ipv6`` set to 0 and
|
||||
has configured IPv6 addresses.
|
||||
|
||||
conf/all/forwarding - BOOLEAN
|
||||
Enable global IPv6 forwarding between all interfaces.
|
||||
|
||||
|
||||
@@ -3239,6 +3239,7 @@ L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
F: drivers/net/bonding/
|
||||
F: include/net/bonding.h
|
||||
F: include/uapi/linux/if_bonding.h
|
||||
|
||||
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
|
||||
@@ -3411,7 +3412,7 @@ F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
|
||||
F: drivers/pci/controller/pcie-brcmstb.c
|
||||
F: drivers/staging/vc04_services
|
||||
N: bcm2711
|
||||
N: bcm2835
|
||||
N: bcm283*
|
||||
|
||||
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
@@ -8433,11 +8434,8 @@ F: drivers/i3c/
|
||||
F: include/linux/i3c/
|
||||
|
||||
IA64 (Itanium) PLATFORM
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
M: Fenghua Yu <fenghua.yu@intel.com>
|
||||
L: linux-ia64@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
|
||||
S: Orphan
|
||||
F: Documentation/ia64/
|
||||
F: arch/ia64/
|
||||
|
||||
@@ -12422,6 +12420,7 @@ F: tools/testing/selftests/net/ipsec.c
|
||||
NETWORKING [IPv4/IPv6]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
|
||||
M: David Ahern <dsahern@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
|
||||
@@ -16,6 +16,13 @@
|
||||
stdout-path = &uart1;
|
||||
};
|
||||
|
||||
aliases {
|
||||
mmc0 = &usdhc2;
|
||||
mmc1 = &usdhc3;
|
||||
mmc2 = &usdhc4;
|
||||
/delete-property/ mmc3;
|
||||
};
|
||||
|
||||
memory@10000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x10000000 0x80000000>;
|
||||
|
||||
@@ -418,7 +418,7 @@
|
||||
|
||||
/* VDD_AUD_1P8: Audio codec */
|
||||
reg_aud_1p8v: ldo3 {
|
||||
regulator-name = "vdd1p8";
|
||||
regulator-name = "vdd1p8a";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-boot-on;
|
||||
|
||||
@@ -137,7 +137,7 @@
|
||||
|
||||
lcd_backlight: lcd-backlight {
|
||||
compatible = "pwm-backlight";
|
||||
pwms = <&pwm4 0 5000000>;
|
||||
pwms = <&pwm4 0 5000000 0>;
|
||||
pwm-names = "LCD_BKLT_PWM";
|
||||
|
||||
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
|
||||
@@ -167,7 +167,7 @@
|
||||
i2c-gpio,delay-us = <2>; /* ~100 kHz */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
status = "disabld";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
i2c_cam: i2c-gpio-cam {
|
||||
@@ -179,7 +179,7 @@
|
||||
i2c-gpio,delay-us = <2>; /* ~100 kHz */
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
status = "disabld";
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -53,7 +53,6 @@
|
||||
&fec {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
|
||||
phy-handle = <&phy>;
|
||||
phy-mode = "rgmii-id";
|
||||
phy-reset-duration = <2>;
|
||||
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
|
||||
@@ -63,10 +62,19 @@
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
phy: ethernet-phy@0 {
|
||||
/*
|
||||
* The PHY can appear at either address 0 or 4 due to the
|
||||
* configuration (LED) pin not being pulled sufficiently.
|
||||
*/
|
||||
ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
qca,clk-out-frequency = <125000000>;
|
||||
};
|
||||
|
||||
ethernet-phy@4 {
|
||||
reg = <4>;
|
||||
qca,clk-out-frequency = <125000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -115,6 +115,7 @@
|
||||
compatible = "nxp,pcf2127";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <2000000>;
|
||||
reset-source;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -12,4 +12,42 @@
|
||||
200000 0>;
|
||||
};
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
/* Modem trace memory */
|
||||
ram@06000000 {
|
||||
reg = <0x06000000 0x00f00000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* Modem shared memory */
|
||||
ram@06f00000 {
|
||||
reg = <0x06f00000 0x00100000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* Modem private memory */
|
||||
ram@07000000 {
|
||||
reg = <0x07000000 0x01000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
/*
|
||||
* Initial Secure Software ISSW memory
|
||||
*
|
||||
* This is probably only used if the kernel tries
|
||||
* to actually call into trustzone to run secure
|
||||
* applications, which the mainline kernel probably
|
||||
* will not do on this old chipset. But you can never
|
||||
* be too careful, so reserve this memory anyway.
|
||||
*/
|
||||
ram@17f00000 {
|
||||
reg = <0x17f00000 0x00100000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -12,4 +12,42 @@
|
||||
200000 0>;
|
||||
};
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
/* Modem trace memory */
|
||||
ram@06000000 {
|
||||
reg = <0x06000000 0x00f00000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* Modem shared memory */
|
||||
ram@06f00000 {
|
||||
reg = <0x06f00000 0x00100000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* Modem private memory */
|
||||
ram@07000000 {
|
||||
reg = <0x07000000 0x01000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
/*
|
||||
* Initial Secure Software ISSW memory
|
||||
*
|
||||
* This is probably only used if the kernel tries
|
||||
* to actually call into trustzone to run secure
|
||||
* applications, which the mainline kernel probably
|
||||
* will not do on this old chipset. But you can never
|
||||
* be too careful, so reserve this memory anyway.
|
||||
*/
|
||||
ram@17f00000 {
|
||||
reg = <0x17f00000 0x00100000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
35
arch/arm/boot/dts/ste-db9500.dtsi
Normal file
35
arch/arm/boot/dts/ste-db9500.dtsi
Normal file
@@ -0,0 +1,35 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "ste-dbx5x0.dtsi"
|
||||
|
||||
/ {
|
||||
cpus {
|
||||
cpu@300 {
|
||||
/* cpufreq controls */
|
||||
operating-points = <1152000 0
|
||||
800000 0
|
||||
400000 0
|
||||
200000 0>;
|
||||
};
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
/*
|
||||
* Initial Secure Software ISSW memory
|
||||
*
|
||||
* This is probably only used if the kernel tries
|
||||
* to actually call into trustzone to run secure
|
||||
* applications, which the mainline kernel probably
|
||||
* will not do on this old chipset. But you can never
|
||||
* be too careful, so reserve this memory anyway.
|
||||
*/
|
||||
ram@17f00000 {
|
||||
reg = <0x17f00000 0x00100000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
/dts-v1/;
|
||||
#include "ste-db8500.dtsi"
|
||||
#include "ste-db9500.dtsi"
|
||||
#include "ste-href-ab8500.dtsi"
|
||||
#include "ste-href-family-pinctrl.dtsi"
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@
|
||||
#define MX6Q_CCM_CCR 0x0
|
||||
|
||||
.align 3
|
||||
.arm
|
||||
|
||||
.macro sync_l2_cache
|
||||
|
||||
|
||||
@@ -4,11 +4,16 @@
|
||||
*/
|
||||
usb {
|
||||
compatible = "simple-bus";
|
||||
dma-ranges;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
|
||||
|
||||
/*
|
||||
* Internally, USB bus to the interconnect can only address up
|
||||
* to 40-bit
|
||||
*/
|
||||
dma-ranges = <0 0 0 0 0x100 0x0>;
|
||||
|
||||
usbphy0: usb-phy@0 {
|
||||
compatible = "brcm,sr-usb-combo-phy";
|
||||
reg = <0x0 0x00000000 0x0 0x100>;
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
reboot {
|
||||
compatible ="syscon-reboot";
|
||||
regmap = <&rst>;
|
||||
offset = <0xb0>;
|
||||
offset = <0>;
|
||||
mask = <0x02>;
|
||||
};
|
||||
|
||||
|
||||
@@ -253,7 +253,7 @@
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
spba: bus@30000000 {
|
||||
spba: spba-bus@30000000 {
|
||||
compatible = "fsl,spba-bus", "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
@@ -266,7 +266,7 @@
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
|
||||
gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
|
||||
};
|
||||
|
||||
gpio4: gpio@30230000 {
|
||||
|
||||
@@ -991,8 +991,6 @@ CONFIG_ARCH_TEGRA_210_SOC=y
|
||||
CONFIG_ARCH_TEGRA_186_SOC=y
|
||||
CONFIG_ARCH_TEGRA_194_SOC=y
|
||||
CONFIG_ARCH_TEGRA_234_SOC=y
|
||||
CONFIG_ARCH_K3_AM6_SOC=y
|
||||
CONFIG_ARCH_K3_J721E_SOC=y
|
||||
CONFIG_TI_SCI_PM_DOMAINS=y
|
||||
CONFIG_EXTCON_PTN5150=m
|
||||
CONFIG_EXTCON_USB_GPIO=y
|
||||
|
||||
@@ -54,7 +54,7 @@ extern void ia64_xchg_called_with_bad_pointer(void);
|
||||
})
|
||||
|
||||
#define xchg(ptr, x) \
|
||||
((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
|
||||
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
|
||||
|
||||
/*
|
||||
* Atomic compare and exchange. Compare OLD with MEM, if identical,
|
||||
|
||||
@@ -171,30 +171,35 @@ void vtime_account_hardirq(struct task_struct *tsk)
|
||||
static irqreturn_t
|
||||
timer_interrupt (int irq, void *dev_id)
|
||||
{
|
||||
unsigned long cur_itm, new_itm, ticks;
|
||||
unsigned long new_itm;
|
||||
|
||||
if (cpu_is_offline(smp_processor_id())) {
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
new_itm = local_cpu_data->itm_next;
|
||||
cur_itm = ia64_get_itc();
|
||||
|
||||
if (!time_after(cur_itm, new_itm)) {
|
||||
if (!time_after(ia64_get_itc(), new_itm))
|
||||
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
|
||||
cur_itm, new_itm);
|
||||
ticks = 1;
|
||||
} else {
|
||||
ticks = DIV_ROUND_UP(cur_itm - new_itm,
|
||||
local_cpu_data->itm_delta);
|
||||
new_itm += ticks * local_cpu_data->itm_delta;
|
||||
ia64_get_itc(), new_itm);
|
||||
|
||||
while (1) {
|
||||
new_itm += local_cpu_data->itm_delta;
|
||||
|
||||
legacy_timer_tick(smp_processor_id() == time_keeper_id);
|
||||
|
||||
local_cpu_data->itm_next = new_itm;
|
||||
|
||||
if (time_after(new_itm, ia64_get_itc()))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Allow IPIs to interrupt the timer loop.
|
||||
*/
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
if (smp_processor_id() != time_keeper_id)
|
||||
ticks = 0;
|
||||
|
||||
legacy_timer_tick(ticks);
|
||||
|
||||
do {
|
||||
/*
|
||||
* If we're too close to the next clock tick for
|
||||
|
||||
@@ -202,9 +202,8 @@ config PREFETCH
|
||||
depends on PA8X00 || PA7200
|
||||
|
||||
config MLONGCALLS
|
||||
bool "Enable the -mlong-calls compiler option for big kernels"
|
||||
default y if !MODULES || UBSAN || FTRACE
|
||||
default n
|
||||
def_bool y if !MODULES || UBSAN || FTRACE
|
||||
bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
|
||||
depends on PA8X00
|
||||
help
|
||||
If you configure the kernel to include many drivers built-in instead
|
||||
|
||||
@@ -47,7 +47,4 @@ extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
|
||||
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
|
||||
extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
|
||||
|
||||
/* soft power switch support (power.c) */
|
||||
extern struct tasklet_struct power_tasklet;
|
||||
|
||||
#endif /* _ASM_PARISC_IRQ_H */
|
||||
|
||||
@@ -997,10 +997,17 @@ intr_do_preempt:
|
||||
bb,<,n %r20, 31 - PSW_SM_I, intr_restore
|
||||
nop
|
||||
|
||||
/* ssm PSW_SM_I done later in intr_restore */
|
||||
#ifdef CONFIG_MLONGCALLS
|
||||
ldil L%intr_restore, %r2
|
||||
load32 preempt_schedule_irq, %r1
|
||||
bv %r0(%r1)
|
||||
ldo R%intr_restore(%r2), %r2
|
||||
#else
|
||||
ldil L%intr_restore, %r1
|
||||
BL preempt_schedule_irq, %r2
|
||||
nop
|
||||
|
||||
b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
|
||||
ldo R%intr_restore(%r1), %r2
|
||||
#endif
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
|
||||
/*
|
||||
|
||||
@@ -613,6 +613,7 @@ DECLARE_IDTENTRY_VC(X86_TRAP_VC, exc_vmm_communication);
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
|
||||
DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER, exc_xen_unknown_trap);
|
||||
#endif
|
||||
|
||||
/* Device interrupts common/spurious */
|
||||
|
||||
@@ -583,6 +583,13 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
|
||||
exc_debug(regs);
|
||||
}
|
||||
|
||||
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
|
||||
{
|
||||
/* This should never happen and there is no way to handle it. */
|
||||
pr_err("Unknown trap in Xen PV mode.");
|
||||
BUG();
|
||||
}
|
||||
|
||||
struct trap_array_entry {
|
||||
void (*orig)(void);
|
||||
void (*xen)(void);
|
||||
@@ -631,6 +638,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
|
||||
{
|
||||
unsigned int nr;
|
||||
bool ist_okay = false;
|
||||
bool found = false;
|
||||
|
||||
/*
|
||||
* Replace trap handler addresses by Xen specific ones.
|
||||
@@ -645,6 +653,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
|
||||
if (*addr == entry->orig) {
|
||||
*addr = entry->xen;
|
||||
ist_okay = entry->ist_okay;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -655,9 +664,13 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
|
||||
nr = (*addr - (void *)early_idt_handler_array[0]) /
|
||||
EARLY_IDT_HANDLER_SIZE;
|
||||
*addr = (void *)xen_early_idt_handler_array[nr];
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (WARN_ON(ist != 0 && !ist_okay))
|
||||
if (!found)
|
||||
*addr = (void *)xen_asm_exc_xen_unknown_trap;
|
||||
|
||||
if (WARN_ON(found && ist != 0 && !ist_okay))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
||||
@@ -178,6 +178,7 @@ xen_pv_trap asm_exc_simd_coprocessor_error
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
xen_pv_trap entry_INT80_compat
|
||||
#endif
|
||||
xen_pv_trap asm_exc_xen_unknown_trap
|
||||
xen_pv_trap asm_exc_xen_hypervisor_callback
|
||||
|
||||
__INIT
|
||||
|
||||
@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
|
||||
if (info->feature_discard) {
|
||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
|
||||
blk_queue_max_discard_sectors(rq, get_capacity(gd));
|
||||
rq->limits.discard_granularity = info->discard_granularity;
|
||||
rq->limits.discard_granularity = info->discard_granularity ?:
|
||||
info->physical_sector_size;
|
||||
rq->limits.discard_alignment = info->discard_alignment;
|
||||
if (info->feature_secdiscard)
|
||||
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
|
||||
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
|
||||
|
||||
static void blkfront_setup_discard(struct blkfront_info *info)
|
||||
{
|
||||
int err;
|
||||
unsigned int discard_granularity;
|
||||
unsigned int discard_alignment;
|
||||
|
||||
info->feature_discard = 1;
|
||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||
"discard-granularity", "%u", &discard_granularity,
|
||||
"discard-alignment", "%u", &discard_alignment,
|
||||
NULL);
|
||||
if (!err) {
|
||||
info->discard_granularity = discard_granularity;
|
||||
info->discard_alignment = discard_alignment;
|
||||
}
|
||||
info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"discard-granularity",
|
||||
0);
|
||||
info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"discard-alignment", 0);
|
||||
info->feature_secdiscard =
|
||||
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
|
||||
0);
|
||||
|
||||
@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
|
||||
ret = of_platform_default_populate(child, NULL, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to populate module\n");
|
||||
of_node_put(child);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ config MXC_CLK
|
||||
|
||||
config MXC_CLK_SCU
|
||||
tristate
|
||||
depends on ARCH_MXC
|
||||
depends on IMX_SCU && HAVE_ARM_SMCCC
|
||||
|
||||
config CLK_IMX1
|
||||
def_bool SOC_IMX1
|
||||
|
||||
@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
|
||||
#ifdef CONFIG_PM
|
||||
static int mmp2_audio_clk_suspend(struct device *dev)
|
||||
{
|
||||
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
|
||||
|
||||
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
|
||||
static int mmp2_audio_clk_resume(struct device *dev)
|
||||
{
|
||||
struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
|
||||
|
||||
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
|
||||
|
||||
@@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_camera_ahb_clk = {
|
||||
.halt_reg = 0xb008,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.hwcg_reg = 0xb008,
|
||||
.hwcg_bit = 1,
|
||||
.clkr = {
|
||||
.enable_reg = 0xb008,
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "gcc_camera_ahb_clk",
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_camera_hf_axi_clk = {
|
||||
.halt_reg = 0xb020,
|
||||
.halt_check = BRANCH_HALT,
|
||||
@@ -2317,7 +2302,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
|
||||
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
|
||||
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
|
||||
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
|
||||
[GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
|
||||
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
|
||||
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
|
||||
[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
|
||||
@@ -2519,11 +2503,12 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
|
||||
|
||||
/*
|
||||
* Keep the clocks always-ON
|
||||
* GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
|
||||
* GCC_GPU_CFG_AHB_CLK
|
||||
* GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
|
||||
* GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
|
||||
*/
|
||||
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
|
||||
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
|
||||
regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
|
||||
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
|
||||
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
|
||||
|
||||
|
||||
@@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
|
||||
.name = "gcc_sdcc2_apps_clk_src",
|
||||
.parent_data = gcc_parent_data_4,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_floor_ops,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
|
||||
.name = "gcc_sdcc4_apps_clk_src",
|
||||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_floor_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ config IMX_DSP
|
||||
config IMX_SCU
|
||||
bool "IMX SCU Protocol driver"
|
||||
depends on IMX_MBOX
|
||||
select SOC_BUS
|
||||
help
|
||||
The System Controller Firmware (SCFW) is a low-level system function
|
||||
which runs on a dedicated Cortex-M core to provide power, clock, and
|
||||
|
||||
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||
MT_STORE_FIELD(inrange_state);
|
||||
return 1;
|
||||
case HID_DG_CONFIDENCE:
|
||||
if (cls->name == MT_CLS_WIN_8 &&
|
||||
if ((cls->name == MT_CLS_WIN_8 ||
|
||||
cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
|
||||
(field->application == HID_DG_TOUCHPAD ||
|
||||
field->application == HID_DG_TOUCHSCREEN))
|
||||
app->quirks |= MT_QUIRK_CONFIDENCE;
|
||||
|
||||
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
|
||||
}
|
||||
|
||||
if (flush)
|
||||
wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
|
||||
wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
|
||||
else if (insert)
|
||||
wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
|
||||
wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
|
||||
raw_data, report_size);
|
||||
|
||||
return insert && !flush;
|
||||
@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
|
||||
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
|
||||
{
|
||||
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
|
||||
struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
|
||||
struct kfifo_rec_ptr_2 *pen_fifo;
|
||||
int error;
|
||||
|
||||
pen_fifo = devres_alloc(wacom_devm_kfifo_release,
|
||||
@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
|
||||
}
|
||||
|
||||
devres_add(&wacom->hdev->dev, pen_fifo);
|
||||
wacom_wac->pen_fifo = pen_fifo;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -342,7 +342,7 @@ struct wacom_wac {
|
||||
struct input_dev *pen_input;
|
||||
struct input_dev *touch_input;
|
||||
struct input_dev *pad_input;
|
||||
struct kfifo_rec_ptr_2 pen_fifo;
|
||||
struct kfifo_rec_ptr_2 *pen_fifo;
|
||||
int pid;
|
||||
int num_contacts_left;
|
||||
u8 bt_features;
|
||||
|
||||
@@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
|
||||
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
|
||||
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
|
||||
init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
|
||||
init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
|
||||
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
|
||||
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
|
||||
return 0;
|
||||
|
||||
@@ -532,7 +532,7 @@ struct hns_roce_qp_table {
|
||||
struct hns_roce_hem_table sccc_table;
|
||||
struct mutex scc_mutex;
|
||||
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
|
||||
spinlock_t bank_lock;
|
||||
struct mutex bank_mutex;
|
||||
};
|
||||
|
||||
struct hns_roce_cq_table {
|
||||
|
||||
@@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
||||
|
||||
hr_qp->doorbell_qpn = 1;
|
||||
} else {
|
||||
spin_lock(&qp_table->bank_lock);
|
||||
mutex_lock(&qp_table->bank_mutex);
|
||||
bankid = get_least_load_bankid_for_qp(qp_table->bank);
|
||||
|
||||
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
|
||||
@@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
||||
if (ret) {
|
||||
ibdev_err(&hr_dev->ib_dev,
|
||||
"failed to alloc QPN, ret = %d\n", ret);
|
||||
spin_unlock(&qp_table->bank_lock);
|
||||
mutex_unlock(&qp_table->bank_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
qp_table->bank[bankid].inuse++;
|
||||
spin_unlock(&qp_table->bank_lock);
|
||||
mutex_unlock(&qp_table->bank_mutex);
|
||||
|
||||
hr_qp->doorbell_qpn = (u32)num;
|
||||
}
|
||||
@@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
||||
|
||||
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
|
||||
|
||||
spin_lock(&hr_dev->qp_table.bank_lock);
|
||||
mutex_lock(&hr_dev->qp_table.bank_mutex);
|
||||
hr_dev->qp_table.bank[bankid].inuse--;
|
||||
spin_unlock(&hr_dev->qp_table.bank_lock);
|
||||
mutex_unlock(&hr_dev->qp_table.bank_mutex);
|
||||
}
|
||||
|
||||
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
|
||||
@@ -1371,6 +1371,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
|
||||
unsigned int i;
|
||||
|
||||
mutex_init(&qp_table->scc_mutex);
|
||||
mutex_init(&qp_table->bank_mutex);
|
||||
xa_init(&hr_dev->qp_table_xa);
|
||||
|
||||
reserved_from_bot = hr_dev->caps.reserved_qps;
|
||||
|
||||
@@ -3311,8 +3311,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
|
||||
int err;
|
||||
|
||||
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
|
||||
err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
|
||||
&dev->port[port_num].roce.nb);
|
||||
err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
|
||||
if (err) {
|
||||
dev->port[port_num].roce.nb.notifier_call = NULL;
|
||||
return err;
|
||||
@@ -3324,8 +3323,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
|
||||
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
|
||||
{
|
||||
if (dev->port[port_num].roce.nb.notifier_call) {
|
||||
unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
|
||||
&dev->port[port_num].roce.nb);
|
||||
unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
|
||||
dev->port[port_num].roce.nb.notifier_call = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
|
||||
res_chunk = qp_grp->res_chunk_list[i];
|
||||
for (j = 0; j < res_chunk->cnt; j++) {
|
||||
vnic_res = res_chunk->res[j];
|
||||
len += sysfs_emit_at(
|
||||
buf, len, "%s[%d] ",
|
||||
len += sysfs_emit_at(buf, len, " %s[%d]",
|
||||
usnic_vnic_res_type_to_str(vnic_res->type),
|
||||
vnic_res->vnic_idx);
|
||||
}
|
||||
}
|
||||
|
||||
len = sysfs_emit_at(buf, len, "\n");
|
||||
len += sysfs_emit_at(buf, len, "\n");
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
|
||||
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
|
||||
}
|
||||
|
||||
static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case PVRDMA_NETWORK_ROCE_V1:
|
||||
return RDMA_NETWORK_ROCE_V1;
|
||||
case PVRDMA_NETWORK_IPV4:
|
||||
return RDMA_NETWORK_IPV4;
|
||||
case PVRDMA_NETWORK_IPV6:
|
||||
return RDMA_NETWORK_IPV6;
|
||||
default:
|
||||
return RDMA_NETWORK_IPV6;
|
||||
}
|
||||
}
|
||||
|
||||
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
|
||||
const struct pvrdma_qp_cap *src);
|
||||
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
|
||||
|
||||
@@ -367,7 +367,7 @@ retry:
|
||||
wc->dlid_path_bits = cqe->dlid_path_bits;
|
||||
wc->port_num = cqe->port_num;
|
||||
wc->vendor_err = cqe->vendor_err;
|
||||
wc->network_hdr_type = cqe->network_hdr_type;
|
||||
wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
|
||||
|
||||
/* Update shared ring state */
|
||||
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/udp_tunnel.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <linux/netfilter.h>
|
||||
@@ -153,9 +154,14 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct udphdr *udph;
|
||||
struct net_device *ndev = skb->dev;
|
||||
struct net_device *rdev = ndev;
|
||||
struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
|
||||
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
|
||||
|
||||
if (!rxe && is_vlan_dev(rdev)) {
|
||||
rdev = vlan_dev_real_dev(ndev);
|
||||
rxe = rxe_get_dev_from_net(rdev);
|
||||
}
|
||||
if (!rxe)
|
||||
goto drop;
|
||||
|
||||
|
||||
@@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
|
||||
else
|
||||
wc->network_hdr_type = RDMA_NETWORK_IPV6;
|
||||
|
||||
if (is_vlan_dev(skb->dev)) {
|
||||
wc->wc_flags |= IB_WC_WITH_VLAN;
|
||||
wc->vlan_id = vlan_dev_vlan_id(skb->dev);
|
||||
}
|
||||
|
||||
if (pkt->mask & RXE_IMMDT_MASK) {
|
||||
wc->wc_flags |= IB_WC_WITH_IMM;
|
||||
wc->ex.imm_data = immdt_imm(pkt);
|
||||
|
||||
@@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO) += meson/
|
||||
obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
|
||||
obj-$(CONFIG_CEC_SECO) += seco/
|
||||
obj-$(CONFIG_CEC_STI) += sti/
|
||||
obj-$(CONFIG_CEC_STM32) += stm32/
|
||||
obj-$(CONFIG_CEC_TEGRA) += tegra/
|
||||
|
||||
|
||||
@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
length = (b->memory == VB2_MEMORY_USERPTR ||
|
||||
b->memory == VB2_MEMORY_DMABUF)
|
||||
length = (b->memory == VB2_MEMORY_USERPTR)
|
||||
? b->length : vb->planes[0].length;
|
||||
|
||||
if (b->bytesused > length)
|
||||
|
||||
@@ -772,14 +772,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
|
||||
|
||||
switch (pll->bus_type) {
|
||||
case CCS_PLL_BUS_TYPE_CSI2_DPHY:
|
||||
/* CSI transfers 2 bits per clock per lane; thus times 2 */
|
||||
op_sys_clk_freq_hz_sdr = pll->link_freq * 2
|
||||
* (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
|
||||
1 : pll->csi2.lanes);
|
||||
break;
|
||||
case CCS_PLL_BUS_TYPE_CSI2_CPHY:
|
||||
op_sys_clk_freq_hz_sdr =
|
||||
pll->link_freq
|
||||
op_sys_clk_freq_hz_sdr = pll->link_freq * 2
|
||||
* (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
|
||||
1 : pll->csi2.lanes);
|
||||
break;
|
||||
|
||||
@@ -152,7 +152,7 @@ static int ccs_data_parse_version(struct bin_container *bin,
|
||||
vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
|
||||
v->static_data_version_major[1];
|
||||
vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
|
||||
v->static_data_version_major[1];
|
||||
v->static_data_version_minor[1];
|
||||
vv->date_year = ((u16)v->year[0] << 8) + v->year[1];
|
||||
vv->date_month = v->month;
|
||||
vv->date_day = v->day;
|
||||
|
||||
@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
|
||||
if (!q->sensor)
|
||||
return -ENODEV;
|
||||
|
||||
freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
|
||||
freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
|
||||
if (freq < 0) {
|
||||
dev_err(dev, "error %lld, invalid link_freq\n", freq);
|
||||
return freq;
|
||||
|
||||
@@ -349,8 +349,10 @@ static void venus_core_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct venus_core *core = platform_get_drvdata(pdev);
|
||||
|
||||
pm_runtime_get_sync(core->dev);
|
||||
venus_shutdown(core);
|
||||
venus_firmware_deinit(core);
|
||||
pm_runtime_put_sync(core->dev);
|
||||
}
|
||||
|
||||
static __maybe_unused int venus_runtime_suspend(struct device *dev)
|
||||
|
||||
@@ -654,7 +654,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
|
||||
out:
|
||||
fwnode_handle_put(fwnode);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rvin_parallel_init(struct rvin_dev *vin)
|
||||
|
||||
@@ -320,7 +320,7 @@ again:
|
||||
data->body);
|
||||
spin_lock(&data->keylock);
|
||||
if (scancode) {
|
||||
delay = nsecs_to_jiffies(dev->timeout) +
|
||||
delay = usecs_to_jiffies(dev->timeout) +
|
||||
msecs_to_jiffies(100);
|
||||
mod_timer(&data->rx_timeout, jiffies + delay);
|
||||
} else {
|
||||
|
||||
@@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
|
||||
rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
|
||||
/* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
|
||||
rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
|
||||
itdev->params.sample_period;
|
||||
itdev->params.sample_period / 1000;
|
||||
rdev->timeout = IR_DEFAULT_TIMEOUT;
|
||||
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
|
||||
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
|
||||
|
||||
@@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol)
|
||||
void rc_repeat(struct rc_dev *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
|
||||
unsigned int timeout = usecs_to_jiffies(dev->timeout) +
|
||||
msecs_to_jiffies(repeat_period(dev->last_protocol));
|
||||
struct lirc_scancode sc = {
|
||||
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
|
||||
@@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
|
||||
ir_do_keydown(dev, protocol, scancode, keycode, toggle);
|
||||
|
||||
if (dev->keypressed) {
|
||||
dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
|
||||
dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
|
||||
msecs_to_jiffies(repeat_period(protocol));
|
||||
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
|
||||
}
|
||||
@@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev)
|
||||
goto out_raw;
|
||||
}
|
||||
|
||||
dev->registered = true;
|
||||
|
||||
rc = device_add(&dev->dev);
|
||||
if (rc)
|
||||
goto out_rx_free;
|
||||
@@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev)
|
||||
dev->device_name ?: "Unspecified device", path ?: "N/A");
|
||||
kfree(path);
|
||||
|
||||
dev->registered = true;
|
||||
|
||||
/*
|
||||
* once the the input device is registered in rc_setup_rx_device,
|
||||
* userspace can open the input device and rc_open() will be called
|
||||
|
||||
@@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
|
||||
} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
|
||||
|
||||
mod_timer(&serial_ir.timeout_timer,
|
||||
jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
|
||||
jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
|
||||
|
||||
ir_raw_event_handle(serial_ir.rcdev);
|
||||
|
||||
|
||||
@@ -442,7 +442,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
|
||||
|
||||
s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
|
||||
s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
|
||||
unsigned int div)
|
||||
{
|
||||
struct v4l2_ctrl *ctrl;
|
||||
@@ -473,4 +473,4 @@ s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
|
||||
|
||||
return freq > 0 ? freq : -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
|
||||
EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
|
||||
|
||||
@@ -1163,7 +1163,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
|
||||
struct can_berr_counter bec;
|
||||
struct can_berr_counter bec = { };
|
||||
enum can_state state = priv->state;
|
||||
|
||||
if (priv->do_get_state)
|
||||
|
||||
@@ -509,15 +509,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
|
||||
/* Find our integrated MDIO bus node */
|
||||
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
|
||||
priv->master_mii_bus = of_mdio_find_bus(dn);
|
||||
if (!priv->master_mii_bus)
|
||||
if (!priv->master_mii_bus) {
|
||||
of_node_put(dn);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
get_device(&priv->master_mii_bus->dev);
|
||||
priv->master_mii_dn = dn;
|
||||
|
||||
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
|
||||
if (!priv->slave_mii_bus)
|
||||
if (!priv->slave_mii_bus) {
|
||||
of_node_put(dn);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->slave_mii_bus->priv = priv;
|
||||
priv->slave_mii_bus->name = "sf2 slave mii";
|
||||
|
||||
@@ -1187,6 +1187,20 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
|
||||
.port_cnt = 5, /* total cpu and user ports */
|
||||
},
|
||||
{
|
||||
/*
|
||||
* WARNING
|
||||
* =======
|
||||
* KSZ8794 is similar to KSZ8795, except the port map
|
||||
* contains a gap between external and CPU ports, the
|
||||
* port map is NOT continuous. The per-port register
|
||||
* map is shifted accordingly too, i.e. registers at
|
||||
* offset 0x40 are NOT used on KSZ8794 and they ARE
|
||||
* used on KSZ8795 for external port 3.
|
||||
* external cpu
|
||||
* KSZ8794 0,1,2 4
|
||||
* KSZ8795 0,1,2,3 4
|
||||
* KSZ8765 0,1,2,3 4
|
||||
*/
|
||||
.chip_id = 0x8794,
|
||||
.dev_name = "KSZ8794",
|
||||
.num_vlans = 4096,
|
||||
@@ -1220,9 +1234,13 @@ static int ksz8795_switch_init(struct ksz_device *dev)
|
||||
dev->num_vlans = chip->num_vlans;
|
||||
dev->num_alus = chip->num_alus;
|
||||
dev->num_statics = chip->num_statics;
|
||||
dev->port_cnt = chip->port_cnt;
|
||||
dev->port_cnt = fls(chip->cpu_ports);
|
||||
dev->cpu_port = fls(chip->cpu_ports) - 1;
|
||||
dev->phy_port_cnt = dev->port_cnt - 1;
|
||||
dev->cpu_ports = chip->cpu_ports;
|
||||
|
||||
dev->host_mask = chip->cpu_ports;
|
||||
dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
|
||||
chip->cpu_ports;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1231,17 +1249,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
|
||||
if (!dev->cpu_ports)
|
||||
return -ENODEV;
|
||||
|
||||
dev->port_mask = BIT(dev->port_cnt) - 1;
|
||||
dev->port_mask |= dev->host_mask;
|
||||
|
||||
dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
|
||||
dev->mib_cnt = ARRAY_SIZE(mib_names);
|
||||
|
||||
dev->phy_port_cnt = dev->port_cnt - 1;
|
||||
|
||||
dev->cpu_port = dev->port_cnt - 1;
|
||||
dev->host_mask = BIT(dev->cpu_port);
|
||||
|
||||
dev->ports = devm_kzalloc(dev->dev,
|
||||
dev->port_cnt * sizeof(struct ksz_port),
|
||||
GFP_KERNEL);
|
||||
|
||||
@@ -400,7 +400,7 @@ int ksz_switch_register(struct ksz_device *dev,
|
||||
gpiod_set_value_cansleep(dev->reset_gpio, 1);
|
||||
usleep_range(10000, 12000);
|
||||
gpiod_set_value_cansleep(dev->reset_gpio, 0);
|
||||
usleep_range(100, 1000);
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
mutex_init(&dev->dev_mutex);
|
||||
@@ -434,7 +434,7 @@ int ksz_switch_register(struct ksz_device *dev,
|
||||
if (of_property_read_u32(port, "reg",
|
||||
&port_num))
|
||||
continue;
|
||||
if (port_num >= dev->port_cnt)
|
||||
if (!(dev->port_mask & BIT(port_num)))
|
||||
return -EINVAL;
|
||||
of_get_phy_mode(port,
|
||||
&dev->ports[port_num].interface);
|
||||
|
||||
@@ -1158,11 +1158,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
#endif
|
||||
}
|
||||
if (!n || !n->dev)
|
||||
goto free_sk;
|
||||
goto free_dst;
|
||||
|
||||
ndev = n->dev;
|
||||
if (!ndev)
|
||||
goto free_dst;
|
||||
if (is_vlan_dev(ndev))
|
||||
ndev = vlan_dev_real_dev(ndev);
|
||||
|
||||
@@ -1250,6 +1248,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
free_csk:
|
||||
chtls_sock_release(&csk->kref);
|
||||
free_dst:
|
||||
if (n)
|
||||
neigh_release(n);
|
||||
dst_release(dst);
|
||||
free_sk:
|
||||
|
||||
@@ -462,6 +462,11 @@ struct bufdesc_ex {
|
||||
*/
|
||||
#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
|
||||
|
||||
/* Some link partners do not tolerate the momentary reset of the REF_CLK
|
||||
* frequency when the RNCTL register is cleared by hardware reset.
|
||||
*/
|
||||
#define FEC_QUIRK_NO_HARD_RESET (1 << 18)
|
||||
|
||||
struct bufdesc_prop {
|
||||
int qid;
|
||||
/* Address of Rx and Tx buffers */
|
||||
|
||||
@@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = {
|
||||
static const struct fec_devinfo fec_imx28_info = {
|
||||
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
|
||||
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
|
||||
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
|
||||
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
|
||||
FEC_QUIRK_NO_HARD_RESET,
|
||||
};
|
||||
|
||||
static const struct fec_devinfo fec_imx6q_info = {
|
||||
@@ -953,7 +954,8 @@ fec_restart(struct net_device *ndev)
|
||||
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
|
||||
* instead of reset MAC itself.
|
||||
*/
|
||||
if (fep->quirks & FEC_QUIRK_HAS_AVB) {
|
||||
if (fep->quirks & FEC_QUIRK_HAS_AVB ||
|
||||
((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
|
||||
writel(0, fep->hwp + FEC_ECNTRL);
|
||||
} else {
|
||||
writel(1, fep->hwp + FEC_ECNTRL);
|
||||
@@ -2165,9 +2167,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
||||
fep->mii_bus->parent = &pdev->dev;
|
||||
|
||||
err = of_mdiobus_register(fep->mii_bus, node);
|
||||
of_node_put(node);
|
||||
if (err)
|
||||
goto err_out_free_mdiobus;
|
||||
of_node_put(node);
|
||||
|
||||
mii_cnt++;
|
||||
|
||||
@@ -2180,6 +2182,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
||||
err_out_free_mdiobus:
|
||||
mdiobus_free(fep->mii_bus);
|
||||
err_out:
|
||||
of_node_put(node);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -5084,6 +5084,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
|
||||
while (!done) {
|
||||
/* Pull all the valid messages off the CRQ */
|
||||
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
|
||||
/* This barrier makes sure ibmvnic_next_crq()'s
|
||||
* crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
|
||||
* before ibmvnic_handle_crq()'s
|
||||
* switch(gen_crq->first) and switch(gen_crq->cmd).
|
||||
*/
|
||||
dma_rmb();
|
||||
ibmvnic_handle_crq(crq, adapter);
|
||||
crq->generic.first = 0;
|
||||
}
|
||||
|
||||
@@ -4046,20 +4046,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
goto error_param;
|
||||
|
||||
vf = &pf->vf[vf_id];
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
|
||||
/* When the VF is resetting wait until it is done.
|
||||
* It can take up to 200 milliseconds,
|
||||
* but wait for up to 300 milliseconds to be safe.
|
||||
* If the VF is indeed in reset, the vsi pointer has
|
||||
* to show on the newly loaded vsi under pf->vsi[id].
|
||||
* Acquire the VSI pointer only after the VF has been
|
||||
* properly initialized.
|
||||
*/
|
||||
for (i = 0; i < 15; i++) {
|
||||
if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
|
||||
if (i > 0)
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
|
||||
break;
|
||||
}
|
||||
msleep(20);
|
||||
}
|
||||
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
|
||||
@@ -4068,6 +4064,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
ret = -EAGAIN;
|
||||
goto error_param;
|
||||
}
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
|
||||
if (is_multicast_ether_addr(mac)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
|
||||
@@ -68,7 +68,9 @@
|
||||
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
|
||||
#define ICE_AQ_LEN 64
|
||||
#define ICE_MBXSQ_LEN 64
|
||||
#define ICE_MIN_MSIX 2
|
||||
#define ICE_MIN_LAN_TXRX_MSIX 1
|
||||
#define ICE_MIN_LAN_OICR_MSIX 1
|
||||
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
|
||||
#define ICE_FDIR_MSIX 1
|
||||
#define ICE_NO_VSI 0xffff
|
||||
#define ICE_VSI_MAP_CONTIG 0
|
||||
|
||||
@@ -3258,8 +3258,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
|
||||
*/
|
||||
static int ice_get_max_txq(struct ice_pf *pf)
|
||||
{
|
||||
return min_t(int, num_online_cpus(),
|
||||
pf->hw.func_caps.common_cap.num_txq);
|
||||
return min3(pf->num_lan_msix, (u16)num_online_cpus(),
|
||||
(u16)pf->hw.func_caps.common_cap.num_txq);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3268,8 +3268,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
|
||||
*/
|
||||
static int ice_get_max_rxq(struct ice_pf *pf)
|
||||
{
|
||||
return min_t(int, num_online_cpus(),
|
||||
pf->hw.func_caps.common_cap.num_rxq);
|
||||
return min3(pf->num_lan_msix, (u16)num_online_cpus(),
|
||||
(u16)pf->hw.func_caps.common_cap.num_rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
|
||||
sizeof(struct in6_addr));
|
||||
input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
|
||||
input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
|
||||
|
||||
/* if no protocol requested, use IPPROTO_NONE */
|
||||
if (!fsp->m_u.usr_ip6_spec.l4_proto)
|
||||
input->ip.v6.proto = IPPROTO_NONE;
|
||||
else
|
||||
input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
|
||||
|
||||
memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
|
||||
sizeof(struct in6_addr));
|
||||
memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
|
||||
|
||||
@@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
|
||||
num_online_cpus());
|
||||
vsi->alloc_txq = min3(pf->num_lan_msix,
|
||||
ice_get_avail_txq_count(pf),
|
||||
(u16)num_online_cpus());
|
||||
if (vsi->req_txq) {
|
||||
vsi->alloc_txq = vsi->req_txq;
|
||||
vsi->num_txq = vsi->req_txq;
|
||||
@@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
|
||||
vsi->alloc_rxq = 1;
|
||||
} else {
|
||||
vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
|
||||
num_online_cpus());
|
||||
vsi->alloc_rxq = min3(pf->num_lan_msix,
|
||||
ice_get_avail_rxq_count(pf),
|
||||
(u16)num_online_cpus());
|
||||
if (vsi->req_rxq) {
|
||||
vsi->alloc_rxq = vsi->req_rxq;
|
||||
vsi->num_rxq = vsi->req_rxq;
|
||||
@@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
|
||||
|
||||
pf->num_lan_rx = vsi->alloc_rxq;
|
||||
|
||||
vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
|
||||
vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
|
||||
max_t(int, vsi->alloc_rxq,
|
||||
vsi->alloc_txq));
|
||||
break;
|
||||
case ICE_VSI_VF:
|
||||
vf = &pf->vf[vsi->vf_id];
|
||||
|
||||
@@ -3430,18 +3430,14 @@ static int ice_ena_msix_range(struct ice_pf *pf)
|
||||
if (v_actual < v_budget) {
|
||||
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
|
||||
v_budget, v_actual);
|
||||
/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
|
||||
#define ICE_MIN_LAN_VECS 2
|
||||
#define ICE_MIN_RDMA_VECS 2
|
||||
#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
|
||||
|
||||
if (v_actual < ICE_MIN_LAN_VECS) {
|
||||
if (v_actual < ICE_MIN_MSIX) {
|
||||
/* error if we can't get minimum vectors */
|
||||
pci_disable_msix(pf->pdev);
|
||||
err = -ERANGE;
|
||||
goto msix_err;
|
||||
} else {
|
||||
pf->num_lan_msix = ICE_MIN_LAN_VECS;
|
||||
pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4884,9 +4880,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
||||
goto err_update_filters;
|
||||
}
|
||||
|
||||
/* Add filter for new MAC. If filter exists, just return success */
|
||||
/* Add filter for new MAC. If filter exists, return success */
|
||||
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
|
||||
if (status == ICE_ERR_ALREADY_EXISTS) {
|
||||
/* Although this MAC filter is already present in hardware it's
|
||||
* possible in some cases (e.g. bonding) that dev_addr was
|
||||
* modified outside of the driver and needs to be restored back
|
||||
* to this value.
|
||||
*/
|
||||
memcpy(netdev->dev_addr, mac, netdev->addr_len);
|
||||
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1923,12 +1923,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
|
||||
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
|
||||
l4_proto = ip.v4->protocol;
|
||||
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
|
||||
int ret;
|
||||
|
||||
tunnel |= ICE_TX_CTX_EIPT_IPV6;
|
||||
exthdr = ip.hdr + sizeof(*ip.v6);
|
||||
l4_proto = ip.v6->nexthdr;
|
||||
if (l4.hdr != exthdr)
|
||||
ipv6_skip_exthdr(skb, exthdr - skb->data,
|
||||
ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
|
||||
&l4_proto, &frag_off);
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* define outer transport */
|
||||
|
||||
@@ -1675,11 +1675,17 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
|
||||
cmd->base.phy_address = hw->phy.addr;
|
||||
|
||||
/* advertising link modes */
|
||||
if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
|
||||
if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
|
||||
if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
|
||||
if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
|
||||
if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
|
||||
if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
|
||||
|
||||
/* set autoneg settings */
|
||||
@@ -1792,6 +1798,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
|
||||
|
||||
ethtool_convert_link_mode_to_legacy_u32(&advertising,
|
||||
cmd->link_modes.advertising);
|
||||
/* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
|
||||
* We have to check this and convert it to ADVERTISE_2500_FULL
|
||||
* (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
|
||||
*/
|
||||
if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
|
||||
advertising |= ADVERTISE_2500_FULL;
|
||||
|
||||
if (cmd->base.autoneg == AUTONEG_ENABLE) {
|
||||
hw->mac.autoneg = 1;
|
||||
|
||||
@@ -478,10 +478,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
|
||||
dma_addr_t iova;
|
||||
u8 *buf;
|
||||
|
||||
buf = napi_alloc_frag(pool->rbsize);
|
||||
buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
|
||||
if (unlikely(!buf))
|
||||
return -ENOMEM;
|
||||
|
||||
buf = PTR_ALIGN(buf, OTX2_ALIGN);
|
||||
iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
|
||||
|
||||
@@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
|
||||
|
||||
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
|
||||
if (err)
|
||||
return err;
|
||||
goto free_page;
|
||||
|
||||
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
|
||||
if (IS_ERR(cmd)) {
|
||||
|
||||
@@ -167,6 +167,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
|
||||
.min_size = 16 * 1024,
|
||||
};
|
||||
|
||||
static bool
|
||||
mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
|
||||
{
|
||||
return !!(entry->tuple_nat_node.next);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
|
||||
{
|
||||
@@ -911,10 +917,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
|
||||
err_insert:
|
||||
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
|
||||
err_rules:
|
||||
if (mlx5_tc_ct_entry_has_nat(entry))
|
||||
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
||||
&entry->tuple_nat_node, tuples_nat_ht_params);
|
||||
err_tuple_nat:
|
||||
if (entry->tuple_node.next)
|
||||
rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
|
||||
&entry->tuple_node,
|
||||
tuples_ht_params);
|
||||
@@ -932,7 +938,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
|
||||
{
|
||||
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
|
||||
mutex_lock(&ct_priv->shared_counter_lock);
|
||||
if (entry->tuple_node.next)
|
||||
if (mlx5_tc_ct_entry_has_nat(entry))
|
||||
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
|
||||
&entry->tuple_nat_node,
|
||||
tuples_nat_ht_params);
|
||||
|
||||
@@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
|
||||
{
|
||||
return NUM_IPSEC_SW_COUNTERS;
|
||||
return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
|
||||
}
|
||||
|
||||
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
|
||||
@@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
|
||||
{
|
||||
return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
|
||||
return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
|
||||
}
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
|
||||
|
||||
@@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
|
||||
{
|
||||
struct mlx5e_channels new_channels = {};
|
||||
bool reset_channels = true;
|
||||
bool opened;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
@@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
|
||||
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
|
||||
trust_state);
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
priv->channels.params = new_channels.params;
|
||||
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
if (!opened)
|
||||
reset_channels = false;
|
||||
}
|
||||
|
||||
/* Skip if tx_min_inline is the same */
|
||||
if (new_channels.params.tx_min_inline_mode ==
|
||||
priv->channels.params.tx_min_inline_mode)
|
||||
reset_channels = false;
|
||||
|
||||
if (reset_channels)
|
||||
if (reset_channels) {
|
||||
err = mlx5e_safe_switch_channels(priv, &new_channels,
|
||||
mlx5e_update_trust_state_hw,
|
||||
&trust_state);
|
||||
else
|
||||
} else {
|
||||
err = mlx5e_update_trust_state_hw(priv, &trust_state);
|
||||
if (!err && !opened)
|
||||
priv->channels.params = new_channels.params;
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
|
||||
@@ -447,12 +447,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
|
||||
goto out;
|
||||
}
|
||||
|
||||
new_channels.params = priv->channels.params;
|
||||
new_channels.params = *cur_params;
|
||||
new_channels.params.num_channels = count;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
struct mlx5e_params old_params;
|
||||
|
||||
old_params = *cur_params;
|
||||
*cur_params = new_channels.params;
|
||||
err = mlx5e_num_channels_changed(priv);
|
||||
if (err)
|
||||
*cur_params = old_params;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
@@ -3614,7 +3614,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
|
||||
new_channels.params.num_tc = tc ? tc : 1;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
struct mlx5e_params old_params;
|
||||
|
||||
old_params = priv->channels.params;
|
||||
priv->channels.params = new_channels.params;
|
||||
err = mlx5e_num_channels_changed(priv);
|
||||
if (err)
|
||||
priv->channels.params = old_params;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -3757,7 +3764,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_channels new_channels = {};
|
||||
struct mlx5e_params *old_params;
|
||||
struct mlx5e_params *cur_params;
|
||||
int err = 0;
|
||||
bool reset;
|
||||
|
||||
@@ -3770,8 +3777,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
|
||||
goto out;
|
||||
}
|
||||
|
||||
old_params = &priv->channels.params;
|
||||
if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
|
||||
cur_params = &priv->channels.params;
|
||||
if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
|
||||
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
@@ -3779,18 +3786,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
|
||||
|
||||
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
|
||||
new_channels.params = *old_params;
|
||||
new_channels.params = *cur_params;
|
||||
new_channels.params.lro_en = enable;
|
||||
|
||||
if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
|
||||
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
|
||||
if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
|
||||
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
|
||||
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
|
||||
reset = false;
|
||||
}
|
||||
|
||||
if (!reset) {
|
||||
*old_params = new_channels.params;
|
||||
struct mlx5e_params old_params;
|
||||
|
||||
old_params = *cur_params;
|
||||
*cur_params = new_channels.params;
|
||||
err = mlx5e_modify_tirs_lro(priv);
|
||||
if (err)
|
||||
*cur_params = old_params;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -4067,9 +4079,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
|
||||
}
|
||||
|
||||
if (!reset) {
|
||||
unsigned int old_mtu = params->sw_mtu;
|
||||
|
||||
params->sw_mtu = new_mtu;
|
||||
if (preactivate)
|
||||
preactivate(priv, NULL);
|
||||
if (preactivate) {
|
||||
err = preactivate(priv, NULL);
|
||||
if (err) {
|
||||
params->sw_mtu = old_mtu;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
netdev->mtu = params->sw_mtu;
|
||||
goto out;
|
||||
}
|
||||
@@ -5027,7 +5046,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
FT_CAP(modify_root) &&
|
||||
FT_CAP(identified_miss_table_mode) &&
|
||||
FT_CAP(flow_table_modify)) {
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
#endif
|
||||
#ifdef CONFIG_MLX5_EN_ARFS
|
||||
|
||||
@@ -737,7 +737,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
||||
|
||||
netdev->features |= NETIF_F_NETNS_LOCAL;
|
||||
|
||||
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
|
||||
netdev->hw_features |= NETIF_F_HW_TC;
|
||||
#endif
|
||||
netdev->hw_features |= NETIF_F_SG;
|
||||
netdev->hw_features |= NETIF_F_IP_CSUM;
|
||||
netdev->hw_features |= NETIF_F_IPV6_CSUM;
|
||||
|
||||
@@ -67,6 +67,7 @@
|
||||
#include "lib/geneve.h"
|
||||
#include "lib/fs_chains.h"
|
||||
#include "diag/en_tc_tracepoint.h"
|
||||
#include <asm/div64.h>
|
||||
|
||||
#define nic_chains(priv) ((priv)->fs.tc.chains)
|
||||
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
|
||||
@@ -1162,6 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
|
||||
struct mlx5_flow_handle *rule;
|
||||
|
||||
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
|
||||
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
|
||||
|
||||
if (flow_flag_test(flow, CT)) {
|
||||
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
|
||||
|
||||
@@ -1192,6 +1196,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
|
||||
{
|
||||
flow_flag_clear(flow, OFFLOADED);
|
||||
|
||||
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
|
||||
goto offload_rule_0;
|
||||
|
||||
if (flow_flag_test(flow, CT)) {
|
||||
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
|
||||
return;
|
||||
@@ -1200,6 +1207,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
|
||||
if (attr->esw_attr->split_count)
|
||||
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
|
||||
|
||||
offload_rule_0:
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
|
||||
}
|
||||
|
||||
@@ -2269,7 +2277,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
|
||||
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
|
||||
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
|
||||
netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
|
||||
dissector->used_keys);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -5007,13 +5015,13 @@ errout:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
|
||||
static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5_eswitch *esw;
|
||||
u32 rate_mbps = 0;
|
||||
u16 vport_num;
|
||||
u32 rate_mbps;
|
||||
int err;
|
||||
|
||||
vport_num = rpriv->rep->vport;
|
||||
@@ -5030,7 +5038,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
|
||||
* Moreover, if rate is non zero we choose to configure to a minimum of
|
||||
* 1 mbit/sec.
|
||||
*/
|
||||
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
|
||||
if (rate) {
|
||||
rate = (rate * BITS_PER_BYTE) + 500000;
|
||||
rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
|
||||
}
|
||||
|
||||
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
|
||||
if (err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
|
||||
|
||||
@@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
|
||||
destroy_ft:
|
||||
root->cmds->destroy_flow_table(root, ft);
|
||||
free_ft:
|
||||
rhltable_destroy(&ft->fgs_hash);
|
||||
kfree(ft);
|
||||
unlock_root:
|
||||
mutex_unlock(&root->chain_lock);
|
||||
|
||||
@@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
|
||||
u32 key_type, u32 *p_key_id);
|
||||
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
|
||||
|
||||
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return devlink_net(priv_to_devlink(dev));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -58,7 +58,7 @@ struct fw_page {
|
||||
struct rb_node rb_node;
|
||||
u64 addr;
|
||||
struct page *page;
|
||||
u16 func_id;
|
||||
u32 function;
|
||||
unsigned long bitmask;
|
||||
struct list_head list;
|
||||
unsigned free_count;
|
||||
@@ -74,12 +74,17 @@ enum {
|
||||
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
|
||||
};
|
||||
|
||||
static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
|
||||
static u32 get_function(u16 func_id, bool ec_function)
|
||||
{
|
||||
return func_id & (ec_function << 16);
|
||||
}
|
||||
|
||||
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
|
||||
{
|
||||
struct rb_root *root;
|
||||
int err;
|
||||
|
||||
root = xa_load(&dev->priv.page_root_xa, func_id);
|
||||
root = xa_load(&dev->priv.page_root_xa, function);
|
||||
if (root)
|
||||
return root;
|
||||
|
||||
@@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
|
||||
if (!root)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
|
||||
err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
|
||||
if (err) {
|
||||
kfree(root);
|
||||
return ERR_PTR(err);
|
||||
@@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
|
||||
return root;
|
||||
}
|
||||
|
||||
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
|
||||
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
|
||||
{
|
||||
struct rb_node *parent = NULL;
|
||||
struct rb_root *root;
|
||||
@@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
|
||||
struct fw_page *tfp;
|
||||
int i;
|
||||
|
||||
root = page_root_per_func_id(dev, func_id);
|
||||
root = page_root_per_function(dev, function);
|
||||
if (IS_ERR(root))
|
||||
return PTR_ERR(root);
|
||||
|
||||
@@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
|
||||
|
||||
nfp->addr = addr;
|
||||
nfp->page = page;
|
||||
nfp->func_id = func_id;
|
||||
nfp->function = function;
|
||||
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
|
||||
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
|
||||
set_bit(i, &nfp->bitmask);
|
||||
@@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
|
||||
}
|
||||
|
||||
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
|
||||
u32 func_id)
|
||||
u32 function)
|
||||
{
|
||||
struct fw_page *result = NULL;
|
||||
struct rb_root *root;
|
||||
struct rb_node *tmp;
|
||||
struct fw_page *tfp;
|
||||
|
||||
root = xa_load(&dev->priv.page_root_xa, func_id);
|
||||
root = xa_load(&dev->priv.page_root_xa, function);
|
||||
if (WARN_ON_ONCE(!root))
|
||||
return NULL;
|
||||
|
||||
@@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
|
||||
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
|
||||
{
|
||||
struct fw_page *fp = NULL;
|
||||
struct fw_page *iter;
|
||||
unsigned n;
|
||||
|
||||
list_for_each_entry(iter, &dev->priv.free_list, list) {
|
||||
if (iter->func_id != func_id)
|
||||
if (iter->function != function)
|
||||
continue;
|
||||
fp = iter;
|
||||
}
|
||||
@@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
|
||||
{
|
||||
struct rb_root *root;
|
||||
|
||||
root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
|
||||
root = xa_load(&dev->priv.page_root_xa, fwp->function);
|
||||
if (WARN_ON_ONCE(!root))
|
||||
return;
|
||||
|
||||
@@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
|
||||
kfree(fwp);
|
||||
}
|
||||
|
||||
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
|
||||
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
|
||||
{
|
||||
struct fw_page *fwp;
|
||||
int n;
|
||||
|
||||
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
|
||||
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
|
||||
if (!fwp) {
|
||||
mlx5_core_warn_rl(dev, "page not found\n");
|
||||
return;
|
||||
@@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
|
||||
list_add(&fwp->list, &dev->priv.free_list);
|
||||
}
|
||||
|
||||
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
|
||||
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
|
||||
{
|
||||
struct device *device = mlx5_core_dma_dev(dev);
|
||||
int nid = dev_to_node(device);
|
||||
@@ -291,7 +296,7 @@ map:
|
||||
goto map;
|
||||
}
|
||||
|
||||
err = insert_page(dev, addr, page, func_id);
|
||||
err = insert_page(dev, addr, page, function);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "failed to track allocated page\n");
|
||||
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
@@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
|
||||
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
|
||||
int notify_fail, bool ec_function)
|
||||
{
|
||||
u32 function = get_function(func_id, ec_function);
|
||||
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
|
||||
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
|
||||
u64 addr;
|
||||
@@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
retry:
|
||||
err = alloc_4k(dev, &addr, func_id);
|
||||
err = alloc_4k(dev, &addr, function);
|
||||
if (err) {
|
||||
if (err == -ENOMEM)
|
||||
err = alloc_system_page(dev, func_id);
|
||||
err = alloc_system_page(dev, function);
|
||||
if (err)
|
||||
goto out_4k;
|
||||
|
||||
@@ -384,7 +390,7 @@ retry:
|
||||
|
||||
out_4k:
|
||||
for (i--; i >= 0; i--)
|
||||
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
|
||||
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
|
||||
out_free:
|
||||
kvfree(in);
|
||||
if (notify_fail)
|
||||
@@ -392,14 +398,15 @@ out_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
|
||||
static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
|
||||
bool ec_function)
|
||||
{
|
||||
u32 function = get_function(func_id, ec_function);
|
||||
struct rb_root *root;
|
||||
struct rb_node *p;
|
||||
int npages = 0;
|
||||
|
||||
root = xa_load(&dev->priv.page_root_xa, func_id);
|
||||
root = xa_load(&dev->priv.page_root_xa, function);
|
||||
if (WARN_ON_ONCE(!root))
|
||||
return;
|
||||
|
||||
@@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
struct rb_root *root;
|
||||
struct fw_page *fwp;
|
||||
struct rb_node *p;
|
||||
bool ec_function;
|
||||
u32 func_id;
|
||||
u32 npages;
|
||||
u32 i = 0;
|
||||
@@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
/* No hard feelings, we want our pages back! */
|
||||
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
|
||||
func_id = MLX5_GET(manage_pages_in, in, function_id);
|
||||
ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
|
||||
|
||||
root = xa_load(&dev->priv.page_root_xa, func_id);
|
||||
root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
|
||||
if (WARN_ON_ONCE(!root))
|
||||
return -EEXIST;
|
||||
|
||||
@@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
|
||||
int *nclaimed, bool ec_function)
|
||||
{
|
||||
u32 function = get_function(func_id, ec_function);
|
||||
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
|
||||
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
|
||||
int num_claimed;
|
||||
@@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
}
|
||||
|
||||
for (i = 0; i < num_claimed; i++)
|
||||
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
|
||||
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
|
||||
|
||||
if (nclaimed)
|
||||
*nclaimed = num_claimed;
|
||||
|
||||
@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
|
||||
|
||||
static const
|
||||
struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
|
||||
.is_static = true,
|
||||
.can_handle = mlxsw_sp1_span_cpu_can_handle,
|
||||
.parms_set = mlxsw_sp1_span_entry_cpu_parms,
|
||||
.configure = mlxsw_sp1_span_entry_cpu_configure,
|
||||
@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
|
||||
|
||||
static const
|
||||
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
|
||||
.is_static = true,
|
||||
.can_handle = mlxsw_sp_port_dev_check,
|
||||
.parms_set = mlxsw_sp_span_entry_phys_parms,
|
||||
.configure = mlxsw_sp_span_entry_phys_configure,
|
||||
@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
|
||||
|
||||
static const
|
||||
struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
|
||||
.is_static = true,
|
||||
.can_handle = mlxsw_sp2_span_cpu_can_handle,
|
||||
.parms_set = mlxsw_sp2_span_entry_cpu_parms,
|
||||
.configure = mlxsw_sp2_span_entry_cpu_configure,
|
||||
@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
|
||||
if (!refcount_read(&curr->ref_count))
|
||||
continue;
|
||||
|
||||
if (curr->ops->is_static)
|
||||
continue;
|
||||
|
||||
err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry {
|
||||
};
|
||||
|
||||
struct mlxsw_sp_span_entry_ops {
|
||||
bool is_static;
|
||||
bool (*can_handle)(const struct net_device *to_dev);
|
||||
int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct net_device *to_dev,
|
||||
|
||||
@@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to set tx_clk\n");
|
||||
return ret;
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to set clk_ptp_ref\n");
|
||||
return ret;
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
plat->bus_id = 2;
|
||||
plat->addr64 = 32;
|
||||
return ehl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
@@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
plat->bus_id = 3;
|
||||
plat->addr64 = 32;
|
||||
return ehl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
|
||||
@@ -992,7 +992,8 @@ static void __team_compute_features(struct team *team)
|
||||
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
|
||||
IFF_XMIT_DST_RELEASE_PERM;
|
||||
|
||||
list_for_each_entry(port, &team->port_list, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(port, &team->port_list, list) {
|
||||
vlan_features = netdev_increment_features(vlan_features,
|
||||
port->dev->vlan_features,
|
||||
TEAM_VLAN_FEATURES);
|
||||
@@ -1006,6 +1007,7 @@ static void __team_compute_features(struct team *team)
|
||||
if (port->dev->hard_header_len > max_hard_header_len)
|
||||
max_hard_header_len = port->dev->hard_header_len;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
team->dev->vlan_features = vlan_features;
|
||||
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
|
||||
@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team)
|
||||
|
||||
static void team_compute_features(struct team *team)
|
||||
{
|
||||
mutex_lock(&team->lock);
|
||||
__team_compute_features(team);
|
||||
mutex_unlock(&team->lock);
|
||||
netdev_change_features(team->dev);
|
||||
}
|
||||
|
||||
|
||||
@@ -968,6 +968,12 @@ static const struct usb_device_id products[] = {
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
|
||||
@@ -1302,6 +1302,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
|
||||
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
|
||||
{QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
|
||||
|
||||
@@ -314,6 +314,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
|
||||
const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
|
||||
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
|
||||
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
|
||||
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
|
||||
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
|
||||
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
|
||||
const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
|
||||
@@ -340,6 +341,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
|
||||
.num_rbds = IWL_NUM_RBDS_22000_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_qu_b0_hr_b0 = {
|
||||
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
.num_rbds = IWL_NUM_RBDS_22000_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
|
||||
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
|
||||
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
|
||||
@@ -366,6 +379,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
|
||||
.num_rbds = IWL_NUM_RBDS_22000_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_qu_c0_hr_b0 = {
|
||||
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
.num_rbds = IWL_NUM_RBDS_22000_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
|
||||
.name = "Intel(R) Wi-Fi 6 AX201 160MHz",
|
||||
.fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
|
||||
|
||||
@@ -80,19 +80,45 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
|
||||
}
|
||||
|
||||
/*
|
||||
* Evaluate a DSM with no arguments and a single u8 return value (inside a
|
||||
* buffer object), verify and return that value.
|
||||
* Generic function to evaluate a DSM with no arguments
|
||||
* and an integer return value,
|
||||
* (as an integer object or inside a buffer object),
|
||||
* verify and assign the value in the "value" parameter.
|
||||
* return 0 in success and the appropriate errno otherwise.
|
||||
*/
|
||||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
|
||||
static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
|
||||
u64 *value, size_t expected_size)
|
||||
{
|
||||
union acpi_object *obj;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
|
||||
if (IS_ERR(obj))
|
||||
if (IS_ERR(obj)) {
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"Failed to get DSM object. func= %d\n",
|
||||
func);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (obj->type != ACPI_TYPE_BUFFER) {
|
||||
if (obj->type == ACPI_TYPE_INTEGER) {
|
||||
*value = obj->integer.value;
|
||||
} else if (obj->type == ACPI_TYPE_BUFFER) {
|
||||
__le64 le_value = 0;
|
||||
|
||||
if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
|
||||
return -EINVAL;
|
||||
|
||||
/* if the buffer size doesn't match the expected size */
|
||||
if (obj->buffer.length != expected_size)
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
|
||||
obj->buffer.length);
|
||||
|
||||
/* assuming LE from Intel BIOS spec */
|
||||
memcpy(&le_value, obj->buffer.pointer,
|
||||
min_t(size_t, expected_size, (size_t)obj->buffer.length));
|
||||
*value = le64_to_cpu(le_value);
|
||||
} else {
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM method did not return a valid object, type=%d\n",
|
||||
obj->type);
|
||||
@@ -100,15 +126,6 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (obj->buffer.length != sizeof(u8)) {
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM method returned invalid buffer, length=%d\n",
|
||||
obj->buffer.length);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = obj->buffer.pointer[0];
|
||||
IWL_DEBUG_DEV_RADIO(dev,
|
||||
"ACPI: DSM method evaluated: func=%d, ret=%d\n",
|
||||
func, ret);
|
||||
@@ -116,6 +133,24 @@ out:
|
||||
ACPI_FREE(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Evaluate a DSM with no arguments and a u8 return value,
|
||||
*/
|
||||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
|
||||
{
|
||||
int ret;
|
||||
u64 val;
|
||||
|
||||
ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* cast val (u64) to be u8 */
|
||||
*value = (u8)val;
|
||||
return 0;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
|
||||
|
||||
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_fw_acpi__
|
||||
#define __iwl_fw_acpi__
|
||||
@@ -99,7 +99,7 @@ struct iwl_fw_runtime;
|
||||
|
||||
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
|
||||
|
||||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
|
||||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
|
||||
|
||||
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
union acpi_object *data,
|
||||
@@ -159,7 +159,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
|
||||
static inline
|
||||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@@ -224,20 +224,19 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
|
||||
int iwl_pnvm_load(struct iwl_trans *trans,
|
||||
struct iwl_notif_wait_data *notif_wait)
|
||||
{
|
||||
const struct firmware *pnvm;
|
||||
struct iwl_notification_wait pnvm_wait;
|
||||
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
PNVM_INIT_COMPLETE_NTFY) };
|
||||
char pnvm_name[64];
|
||||
int ret;
|
||||
|
||||
/* if the SKU_ID is empty, there's nothing to do */
|
||||
if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
|
||||
return 0;
|
||||
|
||||
/* if we already have it, nothing to do either */
|
||||
if (trans->pnvm_loaded)
|
||||
return 0;
|
||||
/* load from disk only if we haven't done it (or tried) before */
|
||||
if (!trans->pnvm_loaded) {
|
||||
const struct firmware *pnvm;
|
||||
char pnvm_name[64];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The prefix unfortunately includes a hyphen at the end, so
|
||||
@@ -254,11 +253,18 @@ int iwl_pnvm_load(struct iwl_trans *trans,
|
||||
if (ret) {
|
||||
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
|
||||
pnvm_name, ret);
|
||||
/*
|
||||
* Pretend we've loaded it - at least we've tried and
|
||||
* couldn't load it at all, so there's no point in
|
||||
* trying again over and over.
|
||||
*/
|
||||
trans->pnvm_loaded = true;
|
||||
} else {
|
||||
iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
|
||||
|
||||
release_firmware(pnvm);
|
||||
}
|
||||
}
|
||||
|
||||
iwl_init_notification_wait(notif_wait, &pnvm_wait,
|
||||
ntf_cmds, ARRAY_SIZE(ntf_cmds),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
#ifndef __IWL_CONFIG_H__
|
||||
@@ -445,7 +445,7 @@ struct iwl_cfg {
|
||||
#define IWL_CFG_CORES_BT_GNSS 0x5
|
||||
|
||||
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
|
||||
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
|
||||
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
|
||||
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
|
||||
|
||||
struct iwl_dev_info {
|
||||
@@ -491,6 +491,7 @@ extern const char iwl9260_killer_1550_name[];
|
||||
extern const char iwl9560_killer_1550i_name[];
|
||||
extern const char iwl9560_killer_1550s_name[];
|
||||
extern const char iwl_ax200_name[];
|
||||
extern const char iwl_ax203_name[];
|
||||
extern const char iwl_ax201_name[];
|
||||
extern const char iwl_ax101_name[];
|
||||
extern const char iwl_ax200_killer_1650w_name[];
|
||||
@@ -574,6 +575,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
|
||||
extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
|
||||
extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
|
||||
extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
|
||||
extern const struct iwl_cfg iwl_qu_b0_hr_b0;
|
||||
extern const struct iwl_cfg iwl_qu_c0_hr_b0;
|
||||
extern const struct iwl_cfg iwl_ax200_cfg_cc;
|
||||
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
|
||||
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
|
||||
|
||||
@@ -180,13 +180,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
|
||||
if (le32_to_cpu(tlv->length) < sizeof(*reg))
|
||||
return -EINVAL;
|
||||
|
||||
/* For safe using a string from FW make sure we have a
|
||||
* null terminator
|
||||
*/
|
||||
reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
|
||||
|
||||
IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
|
||||
|
||||
if (id >= IWL_FW_INI_MAX_REGION_ID) {
|
||||
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
|
||||
return -EINVAL;
|
||||
|
||||
@@ -150,16 +150,17 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_read_prph);
|
||||
|
||||
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
|
||||
void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (iwl_trans_grab_nic_access(trans, &flags)) {
|
||||
mdelay(delay_ms);
|
||||
iwl_write_prph_no_grab(trans, ofs, val);
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
}
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_write_prph);
|
||||
IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
|
||||
|
||||
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
|
||||
u32 bits, u32 mask, int timeout)
|
||||
@@ -219,8 +220,8 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
|
||||
void iwl_force_nmi(struct iwl_trans *trans)
|
||||
{
|
||||
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
|
||||
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
|
||||
DEVICE_SET_NMI_VAL_DRV);
|
||||
iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
|
||||
DEVICE_SET_NMI_VAL_DRV, 1);
|
||||
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
|
||||
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
|
||||
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2018-2019 Intel Corporation
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_io_h__
|
||||
#define __iwl_io_h__
|
||||
@@ -37,7 +37,13 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
|
||||
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
|
||||
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
|
||||
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
|
||||
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
|
||||
void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
|
||||
u32 val, u32 delay_ms);
|
||||
static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
|
||||
{
|
||||
iwl_write_prph_delay(trans, ofs, val, 0);
|
||||
}
|
||||
|
||||
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
|
||||
u32 bits, u32 mask, int timeout);
|
||||
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
|
||||
|
||||
@@ -301,6 +301,12 @@
|
||||
#define RADIO_RSP_ADDR_POS (6)
|
||||
#define RADIO_RSP_RD_CMD (3)
|
||||
|
||||
/* LTR control (Qu only) */
|
||||
#define HPM_MAC_LTR_CSR 0xa0348c
|
||||
#define HPM_MAC_LRT_ENABLE_ALL 0xf
|
||||
/* also uses CSR_LTR_* for values */
|
||||
#define HPM_UMAC_LTR 0xa03480
|
||||
|
||||
/* FW monitor */
|
||||
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
|
||||
#define MON_BUFF_BASE_ADDR (0xa03c1c)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user