Merge remote-tracking branch 'stable/linux-5.15.y' into rpi-5.15.y

This commit is contained in:
Dom Cobley
2022-05-26 15:48:47 +01:00
168 changed files with 1952 additions and 1066 deletions

View File

@@ -163,6 +163,9 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 | | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+

View File

@@ -58,7 +58,7 @@ patternProperties:
$ref: "/schemas/types.yaml#/definitions/string" $ref: "/schemas/types.yaml#/definitions/string"
enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2, enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP,
GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 41 SUBLEVEL = 43
EXTRAVERSION = EXTRAVERSION =
NAME = Trick or Treat NAME = Trick or Treat

View File

@@ -231,6 +231,21 @@
gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>; gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>;
}; };
}; };
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc1 7>;
};
};
&adc1 {
status = "okay";
aspeed,int-vref-microvolt = <2500000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
&pinctrl_adc10_default &pinctrl_adc11_default
&pinctrl_adc12_default &pinctrl_adc13_default
&pinctrl_adc14_default &pinctrl_adc15_default>;
}; };
&gpio0 { &gpio0 {

View File

@@ -246,6 +246,21 @@
linux,code = <11>; linux,code = <11>;
}; };
}; };
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc1 7>;
};
};
&adc1 {
status = "okay";
aspeed,int-vref-microvolt = <2500000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
&pinctrl_adc10_default &pinctrl_adc11_default
&pinctrl_adc12_default &pinctrl_adc13_default
&pinctrl_adc14_default &pinctrl_adc15_default>;
}; };
&ehci1 { &ehci1 {

View File

@@ -117,11 +117,6 @@
groups = "FWSPID"; groups = "FWSPID";
}; };
pinctrl_fwqspid_default: fwqspid_default {
function = "FWSPID";
groups = "FWQSPID";
};
pinctrl_fwspiwp_default: fwspiwp_default { pinctrl_fwspiwp_default: fwspiwp_default {
function = "FWSPIWP"; function = "FWSPIWP";
groups = "FWSPIWP"; groups = "FWSPIWP";
@@ -653,12 +648,12 @@
}; };
pinctrl_qspi1_default: qspi1_default { pinctrl_qspi1_default: qspi1_default {
function = "QSPI1"; function = "SPI1";
groups = "QSPI1"; groups = "QSPI1";
}; };
pinctrl_qspi2_default: qspi2_default { pinctrl_qspi2_default: qspi2_default {
function = "QSPI2"; function = "SPI2";
groups = "QSPI2"; groups = "QSPI2";
}; };

View File

@@ -364,6 +364,41 @@
status = "disabled"; status = "disabled";
}; };
adc0: adc@1e6e9000 {
compatible = "aspeed,ast2600-adc0";
reg = <0x1e6e9000 0x100>;
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_ADC>;
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
#io-channel-cells = <1>;
status = "disabled";
};
adc1: adc@1e6e9100 {
compatible = "aspeed,ast2600-adc1";
reg = <0x1e6e9100 0x100>;
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_ADC>;
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
#io-channel-cells = <1>;
status = "disabled";
};
sbc: secure-boot-controller@1e6f2000 {
compatible = "aspeed,ast2600-sbc";
reg = <0x1e6f2000 0x1000>;
};
video: video@1e700000 {
compatible = "aspeed,ast2600-video-engine";
reg = <0x1e700000 0x1000>;
clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
<&syscon ASPEED_CLK_GATE_ECLK>;
clock-names = "vclk", "eclk";
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
gpio0: gpio@1e780000 { gpio0: gpio@1e780000 {
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;

View File

@@ -1038,7 +1038,7 @@ vector_bhb_loop8_\name:
@ bhb workaround @ bhb workaround
mov r0, #8 mov r0, #8
3: b . + 4 3: W(b) . + 4
subs r0, r0, #1 subs r0, r0, #1
bne 3b bne 3b
dsb dsb

View File

@@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame)
return -EINVAL; return -EINVAL;
frame->sp = frame->fp; frame->sp = frame->fp;
frame->fp = *(unsigned long *)(fp); frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = *(unsigned long *)(fp + 4); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
#else #else
/* check current frame pointer is within bounds */ /* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4) if (fp < low + 12 || fp > high - 4)
return -EINVAL; return -EINVAL;
/* restore the registers from the stack frame */ /* restore the registers from the stack frame */
frame->fp = *(unsigned long *)(fp - 12); frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
frame->sp = *(unsigned long *)(fp - 8); frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
frame->pc = *(unsigned long *)(fp - 4); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
#endif #endif
return 0; return 0;

View File

@@ -288,6 +288,7 @@ void cpu_v7_ca15_ibe(void)
{ {
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
cpu_v7_spectre_v2_init(); cpu_v7_spectre_v2_init();
cpu_v7_spectre_bhb_init();
} }
void cpu_v7_bugs_init(void) void cpu_v7_bugs_init(void)

View File

@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1286807 #ifdef CONFIG_ARM64_ERRATUM_1286807
{ {
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
}, },
#endif #endif
{}, {},

View File

@@ -73,6 +73,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
mte_sync_page_tags(page, old_pte, check_swap, mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged); pte_is_tagged);
} }
/* ensure the tags are visible before the PTE is set */
smp_wmb();
} }
int memcmp_pages(struct page *page1, struct page *page2) int memcmp_pages(struct page *page1, struct page *page2)

View File

@@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu)
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
struct pv_time_stolen_time_region { struct pv_time_stolen_time_region {
struct pvclock_vcpu_stolen_time *kaddr; struct pvclock_vcpu_stolen_time __rcu *kaddr;
}; };
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region); static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
@@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc);
/* return stolen time in ns by asking the hypervisor */ /* return stolen time in ns by asking the hypervisor */
static u64 para_steal_clock(int cpu) static u64 para_steal_clock(int cpu)
{ {
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg; struct pv_time_stolen_time_region *reg;
u64 ret = 0;
reg = per_cpu_ptr(&stolen_time_region, cpu); reg = per_cpu_ptr(&stolen_time_region, cpu);
@@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu)
* online notification callback runs. Until the callback * online notification callback runs. Until the callback
* has run we just return zero. * has run we just return zero.
*/ */
if (!reg->kaddr) rcu_read_lock();
kaddr = rcu_dereference(reg->kaddr);
if (!kaddr) {
rcu_read_unlock();
return 0; return 0;
}
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time)); ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
rcu_read_unlock();
return ret;
} }
static int stolen_time_cpu_down_prepare(unsigned int cpu) static int stolen_time_cpu_down_prepare(unsigned int cpu)
{ {
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg; struct pv_time_stolen_time_region *reg;
reg = this_cpu_ptr(&stolen_time_region); reg = this_cpu_ptr(&stolen_time_region);
if (!reg->kaddr) if (!reg->kaddr)
return 0; return 0;
memunmap(reg->kaddr); kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
memset(reg, 0, sizeof(*reg)); synchronize_rcu();
memunmap(kaddr);
return 0; return 0;
} }
static int stolen_time_cpu_online(unsigned int cpu) static int stolen_time_cpu_online(unsigned int cpu)
{ {
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg; struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res; struct arm_smccc_res res;
@@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
if (res.a0 == SMCCC_RET_NOT_SUPPORTED) if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
return -EINVAL; return -EINVAL;
reg->kaddr = memremap(res.a0, kaddr = memremap(res.a0,
sizeof(struct pvclock_vcpu_stolen_time), sizeof(struct pvclock_vcpu_stolen_time),
MEMREMAP_WB); MEMREMAP_WB);
rcu_assign_pointer(reg->kaddr, kaddr);
if (!reg->kaddr) { if (!reg->kaddr) {
pr_warn("Failed to map stolen time data structure\n"); pr_warn("Failed to map stolen time data structure\n");
return -ENOMEM; return -ENOMEM;
} }
if (le32_to_cpu(reg->kaddr->revision) != 0 || if (le32_to_cpu(kaddr->revision) != 0 ||
le32_to_cpu(reg->kaddr->attributes) != 0) { le32_to_cpu(kaddr->attributes) != 0) {
pr_warn_once("Unexpected revision or attributes in stolen time data\n"); pr_warn_once("Unexpected revision or attributes in stolen time data\n");
return -ENXIO; return -ENXIO;
} }

View File

@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
{ {
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev; clk->cl.dev_id = dev;
clk->cl.con_id = NULL; clk->cl.con_id = NULL;
clk->cl.clk = clk; clk->cl.clk = clk;

View File

@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
{ {
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev_name(dev); clk->cl.dev_id = dev_name(dev);
clk->cl.con_id = con; clk->cl.con_id = con;
clk->cl.clk = clk; clk->cl.clk = clk;

View File

@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
{ {
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev; clk->cl.dev_id = dev;
clk->cl.con_id = con; clk->cl.con_id = con;
clk->cl.clk = clk; clk->cl.clk = clk;
@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
{ {
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk)
return;
clk->cl.dev_id = dev; clk->cl.dev_id = dev;
clk->cl.con_id = con; clk->cl.con_id = con;
clk->cl.clk = clk; clk->cl.clk = clk;
@@ -356,6 +360,7 @@ static void clkdev_add_pci(void)
struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL); struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
/* main pci clock */ /* main pci clock */
if (clk) {
clk->cl.dev_id = "17000000.pci"; clk->cl.dev_id = "17000000.pci";
clk->cl.con_id = NULL; clk->cl.con_id = NULL;
clk->cl.clk = clk; clk->cl.clk = clk;
@@ -366,8 +371,10 @@ static void clkdev_add_pci(void)
clk->module = 0; clk->module = 0;
clk->bits = PMU_PCI; clk->bits = PMU_PCI;
clkdev_add(&clk->cl); clkdev_add(&clk->cl);
}
/* use internal/external bus clock */ /* use internal/external bus clock */
if (clk_ext) {
clk_ext->cl.dev_id = "17000000.pci"; clk_ext->cl.dev_id = "17000000.pci";
clk_ext->cl.con_id = "external"; clk_ext->cl.con_id = "external";
clk_ext->cl.clk = clk_ext; clk_ext->cl.clk = clk_ext;
@@ -375,6 +382,7 @@ static void clkdev_add_pci(void)
clk_ext->disable = pci_ext_disable; clk_ext->disable = pci_ext_disable;
clkdev_add(&clk_ext->cl); clkdev_add(&clk_ext->cl);
} }
}
/* xway socs can generate clocks on gpio pins */ /* xway socs can generate clocks on gpio pins */
static unsigned long valid_clkout_rates[4][5] = { static unsigned long valid_clkout_rates[4][5] = {
@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void)
char *name; char *name;
name = kzalloc(sizeof("clkout0"), GFP_KERNEL); name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
if (!name)
continue;
sprintf(name, "clkout%d", i); sprintf(name, "clkout%d", i);
clk = kzalloc(sizeof(struct clk), GFP_KERNEL); clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
if (!clk) {
kfree(name);
continue;
}
clk->cl.dev_id = "1f103000.cgu"; clk->cl.dev_id = "1f103000.cgu";
clk->cl.con_id = name; clk->cl.con_id = name;
clk->cl.clk = clk; clk->cl.clk = clk;

View File

@@ -166,7 +166,7 @@
clocks = <&prci PRCI_CLK_TLCLK>; clocks = <&prci PRCI_CLK_TLCLK>;
status = "disabled"; status = "disabled";
}; };
dma: dma@3000000 { dma: dma-controller@3000000 {
compatible = "sifive,fu540-c000-pdma"; compatible = "sifive,fu540-c000-pdma";
reg = <0x0 0x3000000 0x0 0x8000>; reg = <0x0 0x3000000 0x0 0x8000>;
interrupt-parent = <&plic0>; interrupt-parent = <&plic0>;

View File

@@ -142,10 +142,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
do_trap(regs, SIGFPE, si_code, "floating point exception"); do_trap(regs, SIGFPE, si_code, "floating point exception");
} }
static void translation_exception(struct pt_regs *regs) static void translation_specification_exception(struct pt_regs *regs)
{ {
/* May never happen. */ /* May never happen. */
panic("Translation exception"); panic("Translation-Specification Exception");
} }
static void illegal_op(struct pt_regs *regs) static void illegal_op(struct pt_regs *regs)
@@ -374,7 +374,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
[0x0f] = hfp_divide_exception, [0x0f] = hfp_divide_exception,
[0x10] = do_dat_exception, [0x10] = do_dat_exception,
[0x11] = do_dat_exception, [0x11] = do_dat_exception,
[0x12] = translation_exception, [0x12] = translation_specification_exception,
[0x13] = special_op_exception, [0x13] = special_op_exception,
[0x14] = default_trap_handler, [0x14] = default_trap_handler,
[0x15] = operand_exception, [0x15] = operand_exception,

View File

@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
list_for_each_entry(tmp, &zpci_list, entry) { list_for_each_entry(tmp, &zpci_list, entry) {
if (tmp->fid == fid) { if (tmp->fid == fid) {
zdev = tmp; zdev = tmp;
zpci_zdev_get(zdev);
break; break;
} }
} }

View File

@@ -19,6 +19,7 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
void zpci_release_device(struct kref *kref); void zpci_release_device(struct kref *kref);
static inline void zpci_zdev_put(struct zpci_dev *zdev) static inline void zpci_zdev_put(struct zpci_dev *zdev)
{ {
if (zdev)
kref_put(&zdev->kref, zpci_release_device); kref_put(&zdev->kref, zpci_release_device);
} }

View File

@@ -22,6 +22,8 @@
#include <asm/clp.h> #include <asm/clp.h>
#include <uapi/asm/clp.h> #include <uapi/asm/clp.h>
#include "pci_bus.h"
bool zpci_unique_uid; bool zpci_unique_uid;
void update_uid_checking(bool new) void update_uid_checking(bool new)
@@ -403,7 +405,10 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
return; return;
zdev = get_zdev_by_fid(entry->fid); zdev = get_zdev_by_fid(entry->fid);
if (!zdev) if (zdev) {
zpci_zdev_put(zdev);
return;
}
zpci_create_device(entry->fid, entry->fh, entry->config_state); zpci_create_device(entry->fid, entry->fh, entry->config_state);
} }

View File

@@ -62,10 +62,12 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
if (!pdev) if (!pdev)
return; goto no_pdev;
pdev->error_state = pci_channel_io_perm_failure; pdev->error_state = pci_channel_io_perm_failure;
pci_dev_put(pdev); pci_dev_put(pdev);
no_pdev:
zpci_zdev_put(zdev);
} }
void zpci_event_error(void *data) void zpci_event_error(void *data)
@@ -94,6 +96,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{ {
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
bool existing_zdev = !!zdev;
enum zpci_state state; enum zpci_state state;
zpci_err("avail CCDF:\n"); zpci_err("avail CCDF:\n");
@@ -156,6 +159,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
default: default:
break; break;
} }
if (existing_zdev)
zpci_zdev_put(zdev);
} }
void zpci_event_availability(void *data) void zpci_event_availability(void *data)

View File

@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
# xor remaining bytes from partial register into output # xor remaining bytes from partial register into output
mov %rcx,%rax mov %rcx,%rax
and $0xf,%rcx and $0xf,%rcx
jz .Ldone8 jz .Ldone2
mov %rax,%r9 mov %rax,%r9
and $~0xf,%r9 and $~0xf,%r9
@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
# xor remaining bytes from partial register into output # xor remaining bytes from partial register into output
mov %rcx,%rax mov %rcx,%rax
and $0xf,%rcx and $0xf,%rcx
jz .Ldone8 jz .Ldone4
mov %rax,%r9 mov %rax,%r9
and $~0xf,%r9 and $~0xf,%r9

View File

@@ -5590,6 +5590,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
{ {
struct kvm_mmu_page *sp, *node; struct kvm_mmu_page *sp, *node;
int nr_zapped, batch = 0; int nr_zapped, batch = 0;
bool unstable;
restart: restart:
list_for_each_entry_safe_reverse(sp, node, list_for_each_entry_safe_reverse(sp, node,
@@ -5621,12 +5622,13 @@ restart:
goto restart; goto restart;
} }
if (__kvm_mmu_prepare_zap_page(kvm, sp, unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
&kvm->arch.zapped_obsolete_pages, &nr_zapped)) { &kvm->arch.zapped_obsolete_pages, &nr_zapped);
batch += nr_zapped; batch += nr_zapped;
if (unstable)
goto restart; goto restart;
} }
}
/* /*
* Trigger a remote TLB flush before freeing the page tables to ensure * Trigger a remote TLB flush before freeing the page tables to ensure

View File

@@ -10,13 +10,12 @@
#include <linux/msg.h> #include <linux/msg.h>
#include <linux/shm.h> #include <linux/shm.h>
typedef long syscall_handler_t(void); typedef long syscall_handler_t(long, long, long, long, long, long);
extern syscall_handler_t *sys_call_table[]; extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \ #define EXECUTE_SYSCALL(syscall, regs) \
(((long (*)(long, long, long, long, long, long)) \ (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
(*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
UPT_SYSCALL_ARG2(&regs->regs), \ UPT_SYSCALL_ARG2(&regs->regs), \
UPT_SYSCALL_ARG3(&regs->regs), \ UPT_SYSCALL_ARG3(&regs->regs), \
UPT_SYSCALL_ARG4(&regs->regs), \ UPT_SYSCALL_ARG4(&regs->regs), \

View File

@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size) unsigned int set_size)
{ {
struct drbd_request *r; struct drbd_request *r;
struct drbd_request *req = NULL; struct drbd_request *req = NULL, *tmp = NULL;
int expect_epoch = 0; int expect_epoch = 0;
int expect_size = 0; int expect_size = 0;
@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
* to catch requests being barrier-acked "unexpectedly". * to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */ * It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &connection->transfer_log, tl_requests) list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch) if (req->epoch == expect_epoch) {
tmp = req;
break; break;
}
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch) if (req->epoch != expect_epoch)
break; break;

View File

@@ -509,8 +509,8 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done); static DECLARE_WAIT_QUEUE_HEAD(command_done);
/* Errors during formatting are counted here. */ /* errors encountered on the current (or last) request */
static int format_errors; static int floppy_errors;
/* Format request descriptor. */ /* Format request descriptor. */
static struct format_descr format_req; static struct format_descr format_req;
@@ -530,7 +530,6 @@ static struct format_descr format_req;
static char *floppy_track_buffer; static char *floppy_track_buffer;
static int max_buffer_sectors; static int max_buffer_sectors;
static int *errors;
typedef void (*done_f)(int); typedef void (*done_f)(int);
static const struct cont_t { static const struct cont_t {
void (*interrupt)(void); void (*interrupt)(void);
@@ -1455,7 +1454,7 @@ static int interpret_errors(void)
if (drive_params[current_drive].flags & FTD_MSG) if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n"); DPRINT("Over/Underrun - retrying\n");
bad = 0; bad = 0;
} else if (*errors >= drive_params[current_drive].max_errors.reporting) { } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors(); print_errors();
} }
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC) if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
@@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
if (!next_valid_format(current_drive)) if (!next_valid_format(current_drive))
return; return;
} }
err_count = ++(*errors); err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count); INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort) if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0); cont->done(0);
@@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
return -EINVAL; return -EINVAL;
} }
format_req = *tmp_format_req; format_req = *tmp_format_req;
format_errors = 0;
cont = &format_cont; cont = &format_cont;
errors = &format_errors; floppy_errors = 0;
ret = wait_til_done(redo_format, true); ret = wait_til_done(redo_format, true);
if (ret == -EINTR) if (ret == -EINTR)
return -EINTR; return -EINTR;
@@ -2761,10 +2759,11 @@ static int set_next_request(void)
current_req = list_first_entry_or_null(&floppy_reqs, struct request, current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist); queuelist);
if (current_req) { if (current_req) {
current_req->error_count = 0; floppy_errors = 0;
list_del_init(&current_req->queuelist); list_del_init(&current_req->queuelist);
return 1;
} }
return current_req != NULL; return 0;
} }
/* Starts or continues processing request. Will automatically unlock the /* Starts or continues processing request. Will automatically unlock the
@@ -2823,7 +2822,6 @@ do_request:
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format]; _floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else } else
probing = 0; probing = 0;
errors = &(current_req->error_count);
tmp = make_raw_rw_request(); tmp = make_raw_rw_request();
if (tmp < 2) { if (tmp < 2) {
request_done(tmp); request_done(tmp);

View File

@@ -106,6 +106,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
tmp_rate = parent_rate; tmp_rate = parent_rate;
else else
tmp_rate = parent_rate / div; tmp_rate = parent_rate / div;
if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
return;
tmp_diff = abs(req->rate - tmp_rate); tmp_diff = abs(req->rate - tmp_rate);
if (*best_diff < 0 || *best_diff >= tmp_diff) { if (*best_diff < 0 || *best_diff >= tmp_diff) {

View File

@@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else { } else {
/* copy only remaining bytes */ /* copy only remaining bytes */
memcpy(data, &val, max - currsize); memcpy(data, &val, max - currsize);
break;
} }
} while (currsize < max); } while (currsize < max);

View File

@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
struct stm32_crc *crc = platform_get_drvdata(pdev); struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev); int ret = pm_runtime_get_sync(crc->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(crc->dev);
return ret; return ret;
}
spin_lock(&crc_list.lock); spin_lock(&crc_list.lock);
list_del(&crc->list); list_del(&crc->list);

View File

@@ -436,6 +436,7 @@ static inline int is_dma_buf_file(struct file *file)
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{ {
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file; struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
@@ -445,6 +446,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
inode->i_size = dmabuf->size; inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size); inode_set_bytes(inode, dmabuf->size);
/*
* The ->i_ino acquired from get_next_ino() is not unique thus
* not suitable for using it as dentry name by dmabuf stats.
* Override ->i_ino with the unique and dmabuffs specific
* value.
*/
inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops); flags, &dma_buf_fops);
if (IS_ERR(file)) if (IS_ERR(file))

View File

@@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long flags; unsigned long flags;
unsigned int on, off; unsigned int on, off;
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle; val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
do_div(val, NSEC_PER_SEC); do_div(val, NSEC_PER_SEC);
if (val > UINT_MAX + 1ULL) if (val > UINT_MAX + 1ULL)

View File

@@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
{ {
struct vf610_gpio_port *port = gpiochip_get_data(chip); struct vf610_gpio_port *port = gpiochip_get_data(chip);
unsigned long mask = BIT(gpio); unsigned long mask = BIT(gpio);
u32 val;
if (port->sdata && port->sdata->have_paddr) if (port->sdata && port->sdata->have_paddr) {
vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR); val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
vf610_gpio_set(chip, gpio, value); vf610_gpio_set(chip, gpio, value);

View File

@@ -1411,9 +1411,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else #else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif #endif

View File

@@ -1045,6 +1045,20 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
(pm_suspend_target_state == PM_SUSPEND_MEM); (pm_suspend_target_state == PM_SUSPEND_MEM);
} }
/**
* amdgpu_acpi_should_gpu_reset
*
* @adev: amdgpu_device_pointer
*
* returns true if should reset GPU, false if not
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
/** /**
* amdgpu_acpi_is_s0ix_active * amdgpu_acpi_is_s0ix_active
* *

View File

@@ -2259,7 +2259,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev); struct amdgpu_device *adev = drm_to_adev(drm_dev);
if (!adev->in_s0ix) if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev); return amdgpu_asic_reset(adev);
return 0; return 0;

View File

@@ -4834,6 +4834,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
drm_edid_get_monitor_name(mst_edid, name, namelen); drm_edid_get_monitor_name(mst_edid, name, namelen);
kfree(mst_edid);
} }
/** /**

View File

@@ -375,6 +375,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
} }
} }
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
int header_ver, u8 dmc_id)
{
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
u32 start_range, end_range;
int i;
if (dmc_id >= DMC_FW_MAX) {
drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
return false;
}
if (header_ver == 1) {
start_range = DMC_MMIO_START_RANGE;
end_range = DMC_MMIO_END_RANGE;
} else if (dmc_id == DMC_FW_MAIN) {
start_range = TGL_MAIN_MMIO_START;
end_range = TGL_MAIN_MMIO_END;
} else if (DISPLAY_VER(i915) >= 13) {
start_range = ADLP_PIPE_MMIO_START;
end_range = ADLP_PIPE_MMIO_END;
} else if (DISPLAY_VER(i915) >= 12) {
start_range = TGL_PIPE_MMIO_START(dmc_id);
end_range = TGL_PIPE_MMIO_END(dmc_id);
} else {
drm_warn(&i915->drm, "Unknown mmio range for sanity check");
return false;
}
for (i = 0; i < mmio_count; i++) {
if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
return false;
}
return true;
}
static u32 parse_dmc_fw_header(struct intel_dmc *dmc, static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header, const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id) size_t rem_size, u8 dmc_id)
@@ -444,6 +482,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0; return 0;
} }
if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
dmc_header->header_ver, dmc_id)) {
drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
return 0;
}
for (i = 0; i < mmio_count; i++) { for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i]; dmc_info->mmiodata[i] = mmiodata[i];

View File

@@ -376,21 +376,6 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
return -EINVAL; return -EINVAL;
} }
/*
* The port numbering and mapping here is bizarre. The now-obsolete
* swsci spec supports ports numbered [0..4]. Port E is handled as a
* special case, but port F and beyond are not. The functionality is
* supposed to be obsolete for new platforms. Just bail out if the port
* number is out of bounds after mapping.
*/
if (port > 4) {
drm_dbg_kms(&dev_priv->drm,
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
intel_encoder->base.base.id, intel_encoder->base.name,
port_name(intel_encoder->port), port);
return -EINVAL;
}
if (!enable) if (!enable)
parm |= 4 << 8; parm |= 4 << 8;

View File

@@ -7818,6 +7818,22 @@ enum {
/* MMIO address range for DMC program (0x80000 - 0x82FFF) */ /* MMIO address range for DMC program (0x80000 - 0x82FFF) */
#define DMC_MMIO_START_RANGE 0x80000 #define DMC_MMIO_START_RANGE 0x80000
#define DMC_MMIO_END_RANGE 0x8FFFF #define DMC_MMIO_END_RANGE 0x8FFFF
#define DMC_V1_MMIO_START_RANGE 0x80000
#define TGL_MAIN_MMIO_START 0x8F000
#define TGL_MAIN_MMIO_END 0x8FFFF
#define _TGL_PIPEA_MMIO_START 0x92000
#define _TGL_PIPEA_MMIO_END 0x93FFF
#define _TGL_PIPEB_MMIO_START 0x96000
#define _TGL_PIPEB_MMIO_END 0x97FFF
#define ADLP_PIPE_MMIO_START 0x5F000
#define ADLP_PIPE_MMIO_END 0x5FFFF
#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
_TGL_PIPEB_MMIO_START)
#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
_TGL_PIPEB_MMIO_END)
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030) #define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C) #define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038) #define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)

View File

@@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (i2c->bus_freq == 0) { if (i2c->bus_freq == 0) {
dev_warn(i2c->dev, "clock-frequency 0 not supported\n"); dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
return -EINVAL; ret = -EINVAL;
goto err_disable_clk;
} }
adap = &i2c->adap; adap = &i2c->adap;
@@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap); ret = i2c_add_adapter(adap);
if (ret < 0) if (ret < 0)
return ret; goto err_disable_clk;
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000); dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
return 0;
err_disable_clk:
clk_disable_unprepare(i2c->clk);
return ret; return ret;
} }

View File

@@ -77,6 +77,7 @@
/* SB800 constants */ /* SB800 constants */
#define SB800_PIIX4_SMB_IDX 0xcd6 #define SB800_PIIX4_SMB_IDX 0xcd6
#define SB800_PIIX4_SMB_MAP_SIZE 2
#define KERNCZ_IMC_IDX 0x3e #define KERNCZ_IMC_IDX 0x3e
#define KERNCZ_IMC_DATA 0x3f #define KERNCZ_IMC_DATA 0x3f
@@ -97,6 +98,9 @@
#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
#define SB800_PIIX4_FCH_PM_SIZE 8
/* insmod parameters */ /* insmod parameters */
/* If force is set to anything different from 0, we forcibly enable the /* If force is set to anything different from 0, we forcibly enable the
@@ -155,6 +159,12 @@ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
}; };
static const char *piix4_aux_port_name_sb800 = " port 1"; static const char *piix4_aux_port_name_sb800 = " port 1";
struct sb800_mmio_cfg {
void __iomem *addr;
struct resource *res;
bool use_mmio;
};
struct i2c_piix4_adapdata { struct i2c_piix4_adapdata {
unsigned short smba; unsigned short smba;
@@ -162,8 +172,75 @@ struct i2c_piix4_adapdata {
bool sb800_main; bool sb800_main;
bool notify_imc; bool notify_imc;
u8 port; /* Port number, shifted */ u8 port; /* Port number, shifted */
struct sb800_mmio_cfg mmio_cfg;
}; };
static int piix4_sb800_region_request(struct device *dev,
struct sb800_mmio_cfg *mmio_cfg)
{
if (mmio_cfg->use_mmio) {
struct resource *res;
void __iomem *addr;
res = request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
SB800_PIIX4_FCH_PM_SIZE,
"sb800_piix4_smb");
if (!res) {
dev_err(dev,
"SMBus base address memory region 0x%x already in use.\n",
SB800_PIIX4_FCH_PM_ADDR);
return -EBUSY;
}
addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
SB800_PIIX4_FCH_PM_SIZE);
if (!addr) {
release_resource(res);
dev_err(dev, "SMBus base address mapping failed.\n");
return -ENOMEM;
}
mmio_cfg->res = res;
mmio_cfg->addr = addr;
return 0;
}
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE,
"sb800_piix4_smb")) {
dev_err(dev,
"SMBus base address index region 0x%x already in use.\n",
SB800_PIIX4_SMB_IDX);
return -EBUSY;
}
return 0;
}
static void piix4_sb800_region_release(struct device *dev,
struct sb800_mmio_cfg *mmio_cfg)
{
if (mmio_cfg->use_mmio) {
iounmap(mmio_cfg->addr);
release_resource(mmio_cfg->res);
return;
}
release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE);
}
static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev)
{
/*
* cd6h/cd7h port I/O accesses can be disabled on AMD processors
* w/ SMBus PCI revision ID 0x51 or greater. MMIO is supported on
* the same processors and is the recommended access method.
*/
return (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
PIIX4_dev->revision >= 0x51);
}
static int piix4_setup(struct pci_dev *PIIX4_dev, static int piix4_setup(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id) const struct pci_device_id *id)
{ {
@@ -263,12 +340,61 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
return piix4_smba; return piix4_smba;
} }
static int piix4_setup_sb800_smba(struct pci_dev *PIIX4_dev,
u8 smb_en,
u8 aux,
u8 *smb_en_status,
unsigned short *piix4_smba)
{
struct sb800_mmio_cfg mmio_cfg;
u8 smba_en_lo;
u8 smba_en_hi;
int retval;
mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
if (retval)
return retval;
if (mmio_cfg.use_mmio) {
smba_en_lo = ioread8(mmio_cfg.addr);
smba_en_hi = ioread8(mmio_cfg.addr + 1);
} else {
outb_p(smb_en, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
}
piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
if (!smb_en) {
*smb_en_status = smba_en_lo & 0x10;
*piix4_smba = smba_en_hi << 8;
if (aux)
*piix4_smba |= 0x20;
} else {
*smb_en_status = smba_en_lo & 0x01;
*piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
}
if (!*smb_en_status) {
dev_err(&PIIX4_dev->dev,
"SMBus Host Controller not enabled!\n");
return -ENODEV;
}
return 0;
}
static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id, u8 aux) const struct pci_device_id *id, u8 aux)
{ {
unsigned short piix4_smba; unsigned short piix4_smba;
u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status, port_sel; u8 smb_en, smb_en_status, port_sel;
u8 i2ccfg, i2ccfg_offset = 0x10; u8 i2ccfg, i2ccfg_offset = 0x10;
struct sb800_mmio_cfg mmio_cfg;
int retval;
/* SB800 and later SMBus does not support forcing address */ /* SB800 and later SMBus does not support forcing address */
if (force || force_addr) { if (force || force_addr) {
@@ -290,35 +416,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
else else
smb_en = (aux) ? 0x28 : 0x2c; smb_en = (aux) ? 0x28 : 0x2c;
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb")) { retval = piix4_setup_sb800_smba(PIIX4_dev, smb_en, aux, &smb_en_status,
dev_err(&PIIX4_dev->dev, &piix4_smba);
"SMB base address index region 0x%x already in use.\n",
SB800_PIIX4_SMB_IDX);
return -EBUSY;
}
outb_p(smb_en, SB800_PIIX4_SMB_IDX); if (retval)
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); return retval;
outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
release_region(SB800_PIIX4_SMB_IDX, 2);
if (!smb_en) {
smb_en_status = smba_en_lo & 0x10;
piix4_smba = smba_en_hi << 8;
if (aux)
piix4_smba |= 0x20;
} else {
smb_en_status = smba_en_lo & 0x01;
piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
}
if (!smb_en_status) {
dev_err(&PIIX4_dev->dev,
"SMBus Host Controller not enabled!\n");
return -ENODEV;
}
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV; return -ENODEV;
@@ -371,10 +473,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
} }
} else { } else {
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
"sb800_piix4_smb")) { retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
if (retval) {
release_region(piix4_smba, SMBIOSIZE); release_region(piix4_smba, SMBIOSIZE);
return -EBUSY; return retval;
} }
outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -384,7 +487,7 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
SB800_PIIX4_PORT_IDX; SB800_PIIX4_PORT_IDX;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
release_region(SB800_PIIX4_SMB_IDX, 2); piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
} }
dev_info(&PIIX4_dev->dev, dev_info(&PIIX4_dev->dev,
@@ -662,6 +765,29 @@ static void piix4_imc_wakeup(void)
release_region(KERNCZ_IMC_IDX, 2); release_region(KERNCZ_IMC_IDX, 2);
} }
static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg)
{
u8 smba_en_lo, val;
if (mmio_cfg->use_mmio) {
smba_en_lo = ioread8(mmio_cfg->addr + piix4_port_sel_sb800);
val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
if (smba_en_lo != val)
iowrite8(val, mmio_cfg->addr + piix4_port_sel_sb800);
return (smba_en_lo & piix4_port_mask_sb800);
}
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
if (smba_en_lo != val)
outb_p(val, SB800_PIIX4_SMB_IDX + 1);
return (smba_en_lo & piix4_port_mask_sb800);
}
/* /*
* Handles access to multiple SMBus ports on the SB800. * Handles access to multiple SMBus ports on the SB800.
* The port is selected by bits 2:1 of the smb_en register (0x2c). * The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -678,12 +804,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
unsigned short piix4_smba = adapdata->smba; unsigned short piix4_smba = adapdata->smba;
int retries = MAX_TIMEOUT; int retries = MAX_TIMEOUT;
int smbslvcnt; int smbslvcnt;
u8 smba_en_lo; u8 prev_port;
u8 port;
int retval; int retval;
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb")) retval = piix4_sb800_region_request(&adap->dev, &adapdata->mmio_cfg);
return -EBUSY; if (retval)
return retval;
/* Request the SMBUS semaphore, avoid conflicts with the IMC */ /* Request the SMBUS semaphore, avoid conflicts with the IMC */
smbslvcnt = inb_p(SMBSLVCNT); smbslvcnt = inb_p(SMBSLVCNT);
@@ -738,18 +864,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
} }
} }
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); prev_port = piix4_sb800_port_sel(adapdata->port, &adapdata->mmio_cfg);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
port = adapdata->port;
if ((smba_en_lo & piix4_port_mask_sb800) != port)
outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
SB800_PIIX4_SMB_IDX + 1);
retval = piix4_access(adap, addr, flags, read_write, retval = piix4_access(adap, addr, flags, read_write,
command, size, data); command, size, data);
outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); piix4_sb800_port_sel(prev_port, &adapdata->mmio_cfg);
/* Release the semaphore */ /* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT); outb_p(smbslvcnt | 0x20, SMBSLVCNT);
@@ -758,7 +878,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
piix4_imc_wakeup(); piix4_imc_wakeup();
release: release:
release_region(SB800_PIIX4_SMB_IDX, 2); piix4_sb800_region_release(&adap->dev, &adapdata->mmio_cfg);
return retval; return retval;
} }
@@ -836,6 +956,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
return -ENOMEM; return -ENOMEM;
} }
adapdata->mmio_cfg.use_mmio = piix4_sb800_use_mmio(dev);
adapdata->smba = smba; adapdata->smba = smba;
adapdata->sb800_main = sb800_main; adapdata->sb800_main = sb800_main;
adapdata->port = port << piix4_port_shift_sb800; adapdata->port = port << piix4_port_shift_sb800;

View File

@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
static const unsigned int input_max_code[EV_CNT] = {
[EV_KEY] = KEY_MAX,
[EV_REL] = REL_MAX,
[EV_ABS] = ABS_MAX,
[EV_MSC] = MSC_MAX,
[EV_SW] = SW_MAX,
[EV_LED] = LED_MAX,
[EV_SND] = SND_MAX,
[EV_FF] = FF_MAX,
};
static inline int is_event_supported(unsigned int code, static inline int is_event_supported(unsigned int code,
unsigned long *bm, unsigned int max) unsigned long *bm, unsigned int max)
{ {
@@ -2074,6 +2085,14 @@ EXPORT_SYMBOL(input_get_timestamp);
*/ */
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
{ {
if (type < EV_CNT && input_max_code[type] &&
code > input_max_code[type]) {
pr_err("%s: invalid code %u for type %u\n", __func__, code,
type);
dump_stack();
return;
}
switch (type) { switch (type) {
case EV_KEY: case EV_KEY:
__set_bit(code, dev->keybit); __set_bit(code, dev->keybit);

View File

@@ -420,9 +420,9 @@ static int ili210x_i2c_probe(struct i2c_client *client,
if (error) if (error)
return error; return error;
usleep_range(50, 100); usleep_range(12000, 15000);
gpiod_set_value_cansleep(reset_gpio, 0); gpiod_set_value_cansleep(reset_gpio, 0);
msleep(100); msleep(160);
} }
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);

View File

@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
err = pm_runtime_get_sync(&sdata->client->dev); err = pm_runtime_get_sync(&sdata->client->dev);
if (err < 0) if (err < 0)
return err; goto out;
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON); err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
if (err) if (err)
return err; goto out;
mutex_lock(&sdata->mutex); mutex_lock(&sdata->mutex);
sdata->running = true; sdata->running = true;
@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
"failed to enable touchkey\n"); "failed to enable touchkey\n");
} }
return 0; out:
pm_runtime_put_noidle(&sdata->client->dev);
return err;
} }
static void stmfts_input_close(struct input_dev *dev) static void stmfts_input_close(struct input_dev *dev)

View File

@@ -18,14 +18,9 @@
#define M_CAN_PCI_MMIO_BAR 0 #define M_CAN_PCI_MMIO_BAR 0
#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508 #define CTL_CSR_INT_CTL_OFFSET 0x508
struct m_can_pci_config {
const struct can_bittiming_const *bit_timing;
const struct can_bittiming_const *data_timing;
unsigned int clock_freq;
};
struct m_can_pci_priv { struct m_can_pci_priv {
struct m_can_classdev cdev; struct m_can_classdev cdev;
@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo, .read_fifo = iomap_read_fifo,
}; };
static const struct can_bittiming_const m_can_bittiming_const_ehl = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 64,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 128,
.sjw_max = 128,
.brp_min = 1,
.brp_max = 512,
.brp_inc = 1,
};
static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 16,
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 1,
.brp_max = 32,
.brp_inc = 1,
};
static const struct m_can_pci_config m_can_pci_ehl = {
.bit_timing = &m_can_bittiming_const_ehl,
.data_timing = &m_can_data_bittiming_const_ehl,
.clock_freq = 200000000,
};
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{ {
struct device *dev = &pci->dev; struct device *dev = &pci->dev;
const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class; struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv; struct m_can_pci_priv *priv;
void __iomem *base; void __iomem *base;
@@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class) if (!mcan_class)
return -ENOMEM; return -ENOMEM;
cfg = (const struct m_can_pci_config *)id->driver_data;
priv = cdev_to_priv(mcan_class); priv = cdev_to_priv(mcan_class);
priv->base = base; priv->base = base;
@@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev; mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0); mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1; mcan_class->pm_clock_support = 1;
mcan_class->bit_timing = cfg->bit_timing; mcan_class->can.clock.freq = id->driver_data;
mcan_class->data_timing = cfg->data_timing;
mcan_class->can.clock.freq = cfg->clock_freq;
mcan_class->ops = &m_can_pci_ops; mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class); pci_set_drvdata(pci, mcan_class);
@@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume); m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = { static const struct pci_device_id m_can_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, }, { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
{ PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, }, { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
{ } /* Terminating Entry */ { } /* Terminating Entry */
}; };
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);

View File

@@ -345,7 +345,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget) int budget)
{ {
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
bool is_rsc_completed = true;
int err = 0; int err = 0;
for (; (self->sw_head != self->hw_head) && budget; for (; (self->sw_head != self->hw_head) && budget;
@@ -363,12 +362,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
continue; continue;
if (!buff->is_eop) { if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
buff_ = buff; buff_ = buff;
do { do {
bool is_rsc_completed = true;
if (buff_->next >= self->size) { if (buff_->next >= self->size) {
err = -EIO; err = -EIO;
goto err_exit; goto err_exit;
} }
frag_cnt++;
next_ = buff_->next, next_ = buff_->next,
buff_ = &self->buff_ring[next_]; buff_ = &self->buff_ring[next_];
is_rsc_completed = is_rsc_completed =
@@ -376,18 +380,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
next_, next_,
self->hw_head); self->hw_head);
if (unlikely(!is_rsc_completed)) if (unlikely(!is_rsc_completed) ||
break; frag_cnt > MAX_SKB_FRAGS) {
err = 0;
goto err_exit;
}
buff->is_error |= buff_->is_error; buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err; buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop); } while (!buff_->is_eop);
if (!is_rsc_completed) {
err = 0;
goto err_exit;
}
if (buff->is_error || if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) { (buff->is_lro && buff->is_cso_err)) {
buff_ = buff; buff_ = buff;
@@ -445,7 +448,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
ALIGN(hdr_len, sizeof(long))); ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) { if (buff->len - hdr_len > 0) {
skb_add_rx_frag(skb, 0, buff->rxdata.page, skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len, buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len, buff->len - hdr_len,
AQ_CFG_RX_FRAME_MAX); AQ_CFG_RX_FRAME_MAX);
@@ -454,7 +457,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) { if (!buff->is_eop) {
buff_ = buff; buff_ = buff;
i = 1U;
do { do {
next_ = buff_->next; next_ = buff_->next;
buff_ = &self->buff_ring[next_]; buff_ = &self->buff_ring[next_];

View File

@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO; err = -ENXIO;
goto err_exit; goto err_exit;
} }
/* Validate that the new hw_head_ is reasonable. */
if (hw_head_ >= ring->size) {
err = -ENXIO;
goto err_exit;
}
ring->hw_head = hw_head_; ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self); err = aq_hw_err_from_flags(self);

View File

@@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1); device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
if (IS_ERR(priv->wol_clk)) if (IS_ERR(priv->wol_clk)) {
return PTR_ERR(priv->wol_clk); ret = PTR_ERR(priv->wol_clk);
goto err_deregister_fixed_link;
}
/* Set the needed headroom once and for all */ /* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);

View File

@@ -1250,7 +1250,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
rmb(); rmb();
queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry); desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) { if (!queue->rx_skbuff[entry]) {
@@ -1289,6 +1288,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb(); dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED); desc->addr &= ~MACB_BIT(RX_USED);
} }
queue->rx_prepared_head++;
} }
/* Make descriptor updates visible to hardware */ /* Make descriptor updates visible to hardware */

View File

@@ -1398,8 +1398,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* alloc_etherdev ensures aligned and zeroed private structures */ /* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp)); dev = alloc_etherdev (sizeof (*tp));
if (!dev) if (!dev) {
pci_disable_device(pdev);
return -ENOMEM; return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1778,6 +1780,7 @@ err_out_free_res:
err_out_free_netdev: err_out_free_netdev:
free_netdev (dev); free_netdev (dev);
pci_disable_device(pdev);
return -ENODEV; return -ENODEV;
} }

View File

@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
q_vector->tx.itr_mode = ITR_DYNAMIC; q_vector->tx.itr_mode = ITR_DYNAMIC;
q_vector->rx.itr_mode = ITR_DYNAMIC; q_vector->rx.itr_mode = ITR_DYNAMIC;
q_vector->tx.type = ICE_TX_CONTAINER;
q_vector->rx.type = ICE_RX_CONTAINER;
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
goto out; goto out;

View File

@@ -3466,15 +3466,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0; return 0;
} }
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
};
/** /**
* ice_get_rc_coalesce - get ITR values for specific ring container * ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings * @ec: ethtool structure to fill with driver's coalesce settings
* @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from * @rc: ring container that the ITR values will come from
* *
* Query the device for ice_ring_container specific ITR values. This is * Query the device for ice_ring_container specific ITR values. This is
@@ -3484,13 +3478,12 @@ enum ice_container_type {
* Returns 0 on success, negative otherwise. * Returns 0 on success, negative otherwise.
*/ */
static int static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
struct ice_ring_container *rc)
{ {
if (!rc->ring) if (!rc->ring)
return -EINVAL; return -EINVAL;
switch (c_type) { switch (rc->type) {
case ICE_RX_CONTAINER: case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc); ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
ec->rx_coalesce_usecs = rc->itr_setting; ec->rx_coalesce_usecs = rc->itr_setting;
@@ -3501,7 +3494,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting; ec->tx_coalesce_usecs = rc->itr_setting;
break; break;
default: default:
dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type); dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", rc->type);
return -EINVAL; return -EINVAL;
} }
@@ -3522,18 +3515,18 @@ static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{ {
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx)) &vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL; return -EINVAL;
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx)) &vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_rxq) { } else if (q_num < vsi->num_rxq) {
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx)) &vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_txq) { } else if (q_num < vsi->num_txq) {
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx)) &vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL; return -EINVAL;
} else { } else {
@@ -3585,7 +3578,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/** /**
* ice_set_rc_coalesce - set ITR values for specific ring container * ice_set_rc_coalesce - set ITR values for specific ring container
* @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings * @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from * @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container * @vsi: VSI associated to the ring container
@@ -3597,10 +3589,10 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
* Returns 0 on success, negative otherwise. * Returns 0 on success, negative otherwise.
*/ */
static int static int
ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ice_set_rc_coalesce(struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi) struct ice_ring_container *rc, struct ice_vsi *vsi)
{ {
const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx"; const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
u32 use_adaptive_coalesce, coalesce_usecs; u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
u16 itr_setting; u16 itr_setting;
@@ -3608,7 +3600,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
if (!rc->ring) if (!rc->ring)
return -EINVAL; return -EINVAL;
switch (c_type) { switch (rc->type) {
case ICE_RX_CONTAINER: case ICE_RX_CONTAINER:
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high && (ec->rx_coalesce_usecs_high &&
@@ -3641,7 +3633,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break; break;
default: default:
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n", dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
c_type); rc->type);
return -EINVAL; return -EINVAL;
} }
@@ -3690,22 +3682,22 @@ static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{ {
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx, &vsi->rx_rings[q_num]->q_vector->rx,
vsi)) vsi))
return -EINVAL; return -EINVAL;
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx, &vsi->tx_rings[q_num]->q_vector->tx,
vsi)) vsi))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_rxq) { } else if (q_num < vsi->num_rxq) {
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx, &vsi->rx_rings[q_num]->q_vector->rx,
vsi)) vsi))
return -EINVAL; return -EINVAL;
} else if (q_num < vsi->num_txq) { } else if (q_num < vsi->num_txq) {
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx, &vsi->tx_rings[q_num]->q_vector->tx,
vsi)) vsi))
return -EINVAL; return -EINVAL;

View File

@@ -2980,8 +2980,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
ice_for_each_q_vector(vsi, i) { ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
coalesce[i].itr_tx = q_vector->tx.itr_setting; coalesce[i].itr_tx = q_vector->tx.itr_settings;
coalesce[i].itr_rx = q_vector->rx.itr_setting; coalesce[i].itr_rx = q_vector->rx.itr_settings;
coalesce[i].intrl = q_vector->intrl; coalesce[i].intrl = q_vector->intrl;
if (i < vsi->num_txq) if (i < vsi->num_txq)
@@ -3037,21 +3037,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
*/ */
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) { if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
rc = &vsi->q_vectors[i]->rx; rc = &vsi->q_vectors[i]->rx;
rc->itr_setting = coalesce[i].itr_rx; rc->itr_settings = coalesce[i].itr_rx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_rxq) { } else if (i < vsi->alloc_rxq) {
rc = &vsi->q_vectors[i]->rx; rc = &vsi->q_vectors[i]->rx;
rc->itr_setting = coalesce[0].itr_rx; rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} }
if (i < vsi->alloc_txq && coalesce[i].tx_valid) { if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
rc = &vsi->q_vectors[i]->tx; rc = &vsi->q_vectors[i]->tx;
rc->itr_setting = coalesce[i].itr_tx; rc->itr_settings = coalesce[i].itr_tx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_txq) { } else if (i < vsi->alloc_txq) {
rc = &vsi->q_vectors[i]->tx; rc = &vsi->q_vectors[i]->tx;
rc->itr_setting = coalesce[0].itr_tx; rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
} }
@@ -3065,12 +3065,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
for (; i < vsi->num_q_vectors; i++) { for (; i < vsi->num_q_vectors; i++) {
/* transmit */ /* transmit */
rc = &vsi->q_vectors[i]->tx; rc = &vsi->q_vectors[i]->tx;
rc->itr_setting = coalesce[0].itr_tx; rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
/* receive */ /* receive */
rc = &vsi->q_vectors[i]->rx; rc = &vsi->q_vectors[i]->rx;
rc->itr_setting = coalesce[0].itr_rx; rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting); ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl; vsi->q_vectors[i]->intrl = coalesce[0].intrl;

View File

@@ -5656,9 +5656,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev); netif_carrier_on(vsi->netdev);
} }
/* clear this now, and the first stats read will be used as baseline */ /* Perform an initial read of the statistics registers now to
vsi->stat_offsets_loaded = false; * set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
ice_service_task_schedule(pf); ice_service_task_schedule(pf);
return 0; return 0;

View File

@@ -254,12 +254,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
* This function must be called periodically to ensure that the cached value * This function must be called periodically to ensure that the cached value
* is never more than 2 seconds old. It must also be called whenever the PHC * is never more than 2 seconds old. It must also be called whenever the PHC
* time has been changed. * time has been changed.
*
* Return:
* * 0 - OK, successfully updated
* * -EAGAIN - PF was busy, need to reschedule the update
*/ */
static void ice_ptp_update_cached_phctime(struct ice_pf *pf) static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
{ {
u64 systime; u64 systime;
int i; int i;
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
return -EAGAIN;
/* Read the current PHC time */ /* Read the current PHC time */
systime = ice_ptp_read_src_clk_reg(pf, NULL); systime = ice_ptp_read_src_clk_reg(pf, NULL);
@@ -282,6 +289,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
} }
} }
clear_bit(ICE_CFG_BUSY, pf->state);
return 0;
} }
/** /**
@@ -1418,17 +1428,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
{ {
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
if (!test_bit(ICE_FLAG_PTP, pf->flags)) if (!test_bit(ICE_FLAG_PTP, pf->flags))
return; return;
ice_ptp_update_cached_phctime(pf); err = ice_ptp_update_cached_phctime(pf);
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx); ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
/* Run twice a second */ /* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, kthread_queue_delayed_work(ptp->kworker, &ptp->work,
msecs_to_jiffies(500)); msecs_to_jiffies(err ? 10 : 500));
} }
/** /**

View File

@@ -332,6 +332,11 @@ static inline bool ice_ring_is_xdp(struct ice_ring *ring)
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
} }
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
};
struct ice_ring_container { struct ice_ring_container {
/* head of linked-list of rings */ /* head of linked-list of rings */
struct ice_ring *ring; struct ice_ring *ring;
@@ -340,10 +345,16 @@ struct ice_ring_container {
/* this matches the maximum number of ITR bits, but in usec /* this matches the maximum number of ITR bits, but in usec
* values, so it is shifted left one bit (bit zero is ignored) * values, so it is shifted left one bit (bit zero is ignored)
*/ */
union {
struct {
u16 itr_setting:13; u16 itr_setting:13;
u16 itr_reserved:2; u16 itr_reserved:2;
u16 itr_mode:1; u16 itr_mode:1;
}; };
u16 itr_settings;
};
enum ice_container_type type;
};
struct ice_coalesce_stored { struct ice_coalesce_stored {
u16 itr_tx; u16 itr_tx;

View File

@@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
break; break;
} }
if (adapter->link_speed != SPEED_1000) if (adapter->link_speed != SPEED_1000 ||
!hw->phy.ops.read_reg)
goto no_wait; goto no_wait;
/* wait for Remote receiver status OK */ /* wait for Remote receiver status OK */

View File

@@ -3542,6 +3542,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
} }
} }
if (params->xdp_prog) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with XDP\n");
features &= ~NETIF_F_LRO;
}
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH; features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH) if (netdev->features & NETIF_F_RXHASH)

View File

@@ -846,7 +846,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests, struct mlx5dr_action_dest *dests,
u32 num_of_dests, u32 num_of_dests,
bool ignore_flow_level) bool ignore_flow_level,
u32 flow_source)
{ {
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions; struct mlx5dr_action **ref_actions;
@@ -914,7 +915,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
reformat_req, reformat_req,
&action->dest_tbl->fw_tbl.id, &action->dest_tbl->fw_tbl.id,
&action->dest_tbl->fw_tbl.group_id, &action->dest_tbl->fw_tbl.group_id,
ignore_flow_level); ignore_flow_level,
flow_source);
if (ret) if (ret)
goto free_action; goto free_action;

View File

@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req, bool reformat_req,
u32 *tbl_id, u32 *tbl_id,
u32 *group_id, u32 *group_id,
bool ignore_flow_level) bool ignore_flow_level,
u32 flow_source)
{ {
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {}; struct mlx5dr_cmd_fte_info fte_info = {};
@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.val = val; fte_info.val = val;
fte_info.dest_arr = dest; fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level; fte_info.ignore_flow_level = ignore_flow_level;
fte_info.flow_context.flow_source = flow_source;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info); ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) { if (ret) {

View File

@@ -1394,7 +1394,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req, bool reformat_req,
u32 *tbl_id, u32 *tbl_id,
u32 *group_id, u32 *group_id,
bool ignore_flow_level); bool ignore_flow_level,
u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id, void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id); u32 group_id);
#endif /* _DR_TYPES_H_ */ #endif /* _DR_TYPES_H_ */

View File

@@ -492,11 +492,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
} else if (num_term_actions > 1) { } else if (num_term_actions > 1) {
bool ignore_flow_level = bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
u32 flow_source = fte->flow_context.flow_source;
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions, term_actions,
num_term_actions, num_term_actions,
ignore_flow_level); ignore_flow_level,
flow_source);
if (!tmp_action) { if (!tmp_action) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto free_actions; goto free_actions;

View File

@@ -96,7 +96,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests, struct mlx5dr_action_dest *dests,
u32 num_of_dests, u32 num_of_dests,
bool ignore_flow_level); bool ignore_flow_level,
u32 flow_source);
struct mlx5dr_action *mlx5dr_action_create_drop(void); struct mlx5dr_action *mlx5dr_action_create_drop(void);

View File

@@ -3612,7 +3612,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers; qdev->mem_map_registers;
unsigned long hw_flags; unsigned long hw_flags;
if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags); clear_bit(QL_LINK_MASTER, &qdev->flags);
/* /*

View File

@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
return -ENOMEM; return -ENOMEM;
/* Enable pci device */ /* Enable pci device */
ret = pci_enable_device(pdev); ret = pcim_enable_device(pdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
__func__); __func__);
@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
pcim_iounmap_regions(pdev, BIT(i)); pcim_iounmap_regions(pdev, BIT(i));
break; break;
} }
pci_disable_device(pdev);
} }
static int __maybe_unused stmmac_pci_suspend(struct device *dev) static int __maybe_unused stmmac_pci_suspend(struct device *dev)

View File

@@ -1370,9 +1370,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
struct gsi_event *event_done; struct gsi_event *event_done;
struct gsi_event *event; struct gsi_event *event;
struct gsi_trans *trans; struct gsi_trans *trans;
u32 trans_count = 0;
u32 byte_count = 0; u32 byte_count = 0;
u32 old_index;
u32 event_avail; u32 event_avail;
u32 old_index;
trans_info = &channel->trans_info; trans_info = &channel->trans_info;
@@ -1393,6 +1394,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
do { do {
trans->len = __le16_to_cpu(event->len); trans->len = __le16_to_cpu(event->len);
byte_count += trans->len; byte_count += trans->len;
trans_count++;
/* Move on to the next event and transaction */ /* Move on to the next event and transaction */
if (--event_avail) if (--event_avail)
@@ -1404,7 +1406,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* We record RX bytes when they are received */ /* We record RX bytes when they are received */
channel->byte_count += byte_count; channel->byte_count += byte_count;
channel->trans_count++; channel->trans_count += trans_count;
} }
/* Initialize a ring, including allocating DMA memory for its entries */ /* Initialize a ring, including allocating DMA memory for its entries */

View File

@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
path->encap.proto = htons(ETH_P_PPP_SES); path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num); path->encap.id = be16_to_cpu(po->num);
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN); memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev; path->dev = ctx->dev;
ctx->dev = dev; ctx->dev = dev;

View File

@@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) { rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb); dev_kfree_skb_any(rbi->skb);
rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++; rq->stats.rx_buf_alloc_failure++;
break; break;
} }
@@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) { rbi->dma_addr)) {
put_page(rbi->page); put_page(rbi->page);
rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++; rq->stats.rx_buf_alloc_failure++;
break; break;
} }
@@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx; u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd; struct Vmxnet3_RxDesc *rxd;
/* ring has already been cleaned up */
if (!rq->rx_ring[0].base)
return;
for (ring_idx = 0; ring_idx < 2; ring_idx++) { for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD #ifdef __BIG_ENDIAN_BITFIELD

View File

@@ -118,109 +118,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4)); mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
} }
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{
static const struct {
u32 phys;
u32 mapped;
u32 size;
} fixed_map[] = {
{ 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
if (addr < 0x100000)
return addr;
for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
u32 ofs;
if (addr < fixed_map[i].phys)
continue;
ofs = addr - fixed_map[i].phys;
if (ofs > fixed_map[i].size)
continue;
return fixed_map[i].mapped + ofs;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
(addr >= 0x70000000 && addr < 0x78000000) ||
(addr >= 0x7c000000 && addr < 0x7c400000))
return mt7921_reg_map_l1(dev, addr);
dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
addr);
return 0;
}
static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rr(mdev, addr);
}
static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
dev->bus_ops->wr(mdev, addr, val);
}
static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force) static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{ {
if (force) { if (force) {
@@ -380,20 +277,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
int mt7921_dma_init(struct mt7921_dev *dev) int mt7921_dma_init(struct mt7921_dev *dev)
{ {
struct mt76_bus_ops *bus_ops;
int ret; int ret;
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
bus_ops->rr = mt7921_rr;
bus_ops->wr = mt7921_wr;
bus_ops->rmw = mt7921_rmw;
dev->mt76.bus = bus_ops;
mt76_dma_attach(&dev->mt76); mt76_dma_attach(&dev->mt76);
ret = mt7921_dma_disable(dev, true); ret = mt7921_dma_disable(dev, true);

View File

@@ -1306,8 +1306,6 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{ {
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0; int i, err = 0;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@@ -1320,16 +1318,8 @@ int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) { if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n"); dev_err(dev->mt76.dev, "driver own failed\n");
err = -EIO; err = -EIO;
goto out;
} }
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
out:
return err; return err;
} }
@@ -1345,6 +1335,16 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
goto out; goto out;
err = __mt7921_mcu_drv_pmctrl(dev); err = __mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
goto out;
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
out: out:
mutex_unlock(&pm->mutex); mutex_unlock(&pm->mutex);

View File

@@ -88,6 +88,110 @@ static void mt7921_irq_tasklet(unsigned long data)
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]); napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
} }
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{
static const struct {
u32 phys;
u32 mapped;
u32 size;
} fixed_map[] = {
{ 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
if (addr < 0x100000)
return addr;
for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
u32 ofs;
if (addr < fixed_map[i].phys)
continue;
ofs = addr - fixed_map[i].phys;
if (ofs > fixed_map[i].size)
continue;
return fixed_map[i].mapped + ofs;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
(addr >= 0x70000000 && addr < 0x78000000) ||
(addr >= 0x7c000000 && addr < 0x7c400000))
return mt7921_reg_map_l1(dev, addr);
dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
addr);
return 0;
}
static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rr(mdev, addr);
}
static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
dev->bus_ops->wr(mdev, addr, val);
}
static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
u32 addr = __mt7921_reg_addr(dev, offset);
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
static int mt7921_pci_probe(struct pci_dev *pdev, static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id) const struct pci_device_id *id)
{ {
@@ -110,6 +214,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.sta_remove = mt7921_mac_sta_remove, .sta_remove = mt7921_mac_sta_remove,
.update_survey = mt7921_update_channel, .update_survey = mt7921_update_channel,
}; };
struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev; struct mt7921_dev *dev;
struct mt76_dev *mdev; struct mt76_dev *mdev;
int ret; int ret;
@@ -145,6 +250,22 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev); tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
bus_ops->rr = mt7921_rr;
bus_ops->wr = mt7921_wr;
bus_ops->rmw = mt7921_rmw;
dev->mt76.bus = bus_ops;
ret = __mt7921_mcu_drv_pmctrl(dev);
if (ret)
return ret;
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) | mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff); (mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_err(mdev->dev, "ASIC revision: %04x\n", mdev->rev); dev_err(mdev->dev, "ASIC revision: %04x\n", mdev->rev);

View File

@@ -4358,6 +4358,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) { if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
nvme_mpath_update(ctrl);
} }
} }
EXPORT_SYMBOL_GPL(nvme_start_ctrl); EXPORT_SYMBOL_GPL(nvme_start_ctrl);

View File

@@ -574,8 +574,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state; ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags); clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
/*
if (nvme_state_is_live(ns->ana_state)) * nvme_mpath_set_live() will trigger I/O to the multipath path device
* and in turn to this path device. However we cannot accept this I/O
* if the controller is not live. This may deadlock if called from
* nvme_mpath_init_identify() and the ctrl will never complete
* initialization, preventing I/O from completing. For this case we
* will reprocess the ANA log page in nvme_mpath_update() once the
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns); nvme_mpath_set_live(ns);
} }
@@ -662,6 +671,18 @@ static void nvme_ana_work(struct work_struct *work)
nvme_read_ana_log(ctrl); nvme_read_ana_log(ctrl);
} }
void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
u32 nr_change_groups = 0;
if (!ctrl->ana_log_buf)
return;
mutex_lock(&ctrl->ana_lock);
nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
mutex_unlock(&ctrl->ana_lock);
}
static void nvme_anatt_timeout(struct timer_list *t) static void nvme_anatt_timeout(struct timer_list *t)
{ {
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);

View File

@@ -776,6 +776,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head); void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns); bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -850,6 +851,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0; return 0;
} }
static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{ {
} }

View File

@@ -3379,7 +3379,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS | NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN }, NVME_QUIRK_SKIP_CID_GEN },
{ PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, } { 0, }
}; };

View File

@@ -978,7 +978,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work); queue_work(nvmet_wq, &ctrl->async_event_work);
} }
void nvmet_execute_keep_alive(struct nvmet_req *req) void nvmet_execute_keep_alive(struct nvmet_req *req)

View File

@@ -1554,7 +1554,7 @@ static void nvmet_port_release(struct config_item *item)
struct nvmet_port *port = to_nvmet_port(item); struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */ /* Let inflight controllers teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
list_del(&port->global_entry); list_del(&port->global_entry);
kfree(port->ana_state); kfree(port->ana_state);

View File

@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida); static DEFINE_IDA(cntlid_ida);
struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL(nvmet_wq);
/* /*
* This read/write semaphore is used to synchronize access to configuration * This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page * information on a target system that will result in discovery log page
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events); list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work); queue_work(nvmet_wq, &ctrl->async_event_work);
} }
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
if (reset_tbkas) { if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid); ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return; return;
} }
@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
pr_debug("ctrl %d start keep-alive timer for %d secs\n", pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato); ctrl->cntlid, ctrl->kato);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
} }
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -1477,7 +1480,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock); mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) { if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS; ctrl->csts |= NVME_CSTS_CFS;
schedule_work(&ctrl->fatal_err_work); queue_work(nvmet_wq, &ctrl->fatal_err_work);
} }
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
} }
@@ -1617,9 +1620,15 @@ static int __init nvmet_init(void)
goto out_free_zbd_work_queue; goto out_free_zbd_work_queue;
} }
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
if (!nvmet_wq) {
error = -ENOMEM;
goto out_free_buffered_work_queue;
}
error = nvmet_init_discovery(); error = nvmet_init_discovery();
if (error) if (error)
goto out_free_work_queue; goto out_free_nvmet_work_queue;
error = nvmet_init_configfs(); error = nvmet_init_configfs();
if (error) if (error)
@@ -1628,7 +1637,9 @@ static int __init nvmet_init(void)
out_exit_discovery: out_exit_discovery:
nvmet_exit_discovery(); nvmet_exit_discovery();
out_free_work_queue: out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq); destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue: out_free_zbd_work_queue:
destroy_workqueue(zbd_wq); destroy_workqueue(zbd_wq);
@@ -1640,6 +1651,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs(); nvmet_exit_configfs();
nvmet_exit_discovery(); nvmet_exit_discovery();
ida_destroy(&cntlid_ida); ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq); destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq); destroy_workqueue(zbd_wq);

View File

@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc)) if (!nvmet_fc_tgt_a_get(assoc))
continue; continue;
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue; continue;
assoc->hostport->invalid = 1; assoc->hostport->invalid = 1;
noassoc = false; noassoc = false;
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport); nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) { if (found_ctrl) {
if (!schedule_work(&assoc->del_work)) if (!queue_work(nvmet_wq, &assoc->del_work))
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
return; return;
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
iod->rqstdatalen = lsreqbuf_len; iod->rqstdatalen = lsreqbuf_len;
iod->hosthandle = hosthandle; iod->hosthandle = hosthandle;
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
return 0; return 0;
} }

View File

@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
schedule_work(&rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
return ret; return ret;
} }
@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
schedule_work(&rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
} }
return 0; return 0;
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
spin_lock(&tport->lock); spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list); list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock); spin_unlock(&tport->lock);
schedule_work(&tport->ls_work); queue_work(nvmet_wq, &tport->ls_work);
return ret; return ret;
} }
@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
spin_lock(&tport->lock); spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list); list_add_tail(&tport->ls_list, &tls_req->ls_list);
spin_unlock(&tport->lock); spin_unlock(&tport->lock);
schedule_work(&tport->ls_work); queue_work(nvmet_wq, &tport->ls_work);
} }
return 0; return 0;
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
tgt_rscn->tport = tgtport->private; tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
schedule_work(&tgt_rscn->work); queue_work(nvmet_wq, &tgt_rscn->work);
} }
static void static void
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref); kref_init(&tfcp_req->ref);
schedule_work(&tfcp_req->fcp_rcv_work); queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0; return 0;
} }
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{ {
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
schedule_work(&tfcp_req->tio_done_work); queue_work(nvmet_wq, &tfcp_req->tio_done_work);
} }
static void static void
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
if (abortio) if (abortio)
/* leave the reference while the work item is scheduled */ /* leave the reference while the work item is scheduled */
WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else { else {
/* /*
* as the io has already had the done callback made, * as the io has already had the done callback made,

View File

@@ -292,7 +292,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0)) if (!nvmet_check_transfer_len(req, 0))
return; return;
INIT_WORK(&req->f.work, nvmet_file_flush_work); INIT_WORK(&req->f.work, nvmet_file_flush_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
static void nvmet_file_execute_discard(struct nvmet_req *req) static void nvmet_file_execute_discard(struct nvmet_req *req)
@@ -352,7 +352,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return; return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work); INIT_WORK(&req->f.work, nvmet_file_dsm_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
static void nvmet_file_write_zeroes_work(struct work_struct *w) static void nvmet_file_write_zeroes_work(struct work_struct *w)
@@ -382,7 +382,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0)) if (!nvmet_check_transfer_len(req, 0))
return; return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
schedule_work(&req->f.work); queue_work(nvmet_wq, &req->f.work);
} }
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)

View File

@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req); iod->req.transfer_len = blk_rq_payload_bytes(req);
} }
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK; return BLK_STS_OK;
} }
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
return; return;
} }
schedule_work(&iod->work); queue_work(nvmet_wq, &iod->work);
} }
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,

View File

@@ -365,6 +365,7 @@ struct nvmet_req {
extern struct workqueue_struct *buffered_io_wq; extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq; extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result) static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{ {

View File

@@ -281,7 +281,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
if (req->p.use_workqueue || effects) { if (req->p.use_workqueue || effects) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq; req->p.rq = rq;
schedule_work(&req->p.work); queue_work(nvmet_wq, &req->p.work);
} else { } else {
rq->end_io_data = req; rq->end_io_data = req;
blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0, blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,

View File

@@ -1583,7 +1583,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) { if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1668,7 +1668,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) { if (disconnect) {
rdma_disconnect(queue->cm_id); rdma_disconnect(queue->cm_id);
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
} }
@@ -1698,7 +1698,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx); pr_err("failed to connect queue %d\n", queue->idx);
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
/** /**
@@ -1772,7 +1772,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
if (!queue) { if (!queue) {
struct nvmet_rdma_port *port = cm_id->context; struct nvmet_rdma_port *port = cm_id->context;
schedule_delayed_work(&port->repair_work, 0); queue_delayed_work(nvmet_wq, &port->repair_work, 0);
break; break;
} }
fallthrough; fallthrough;
@@ -1902,7 +1902,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
nvmet_rdma_disable_port(port); nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port); ret = nvmet_rdma_enable_port(port);
if (ret) if (ret)
schedule_delayed_work(&port->repair_work, 5 * HZ); queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
} }
static int nvmet_rdma_add_port(struct nvmet_port *nport) static int nvmet_rdma_add_port(struct nvmet_port *nport)
@@ -2046,7 +2046,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
} }
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
static struct ib_client nvmet_rdma_ib_client = { static struct ib_client nvmet_rdma_ib_client = {

View File

@@ -1251,7 +1251,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_lock(&queue->state_lock); spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) { if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING; queue->state = NVMET_TCP_Q_DISCONNECTING;
schedule_work(&queue->release_work); queue_work(nvmet_wq, &queue->release_work);
} }
spin_unlock(&queue->state_lock); spin_unlock(&queue->state_lock);
} }
@@ -1662,7 +1662,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
goto out; goto out;
if (sk->sk_state == TCP_LISTEN) if (sk->sk_state == TCP_LISTEN)
schedule_work(&port->accept_work); queue_work(nvmet_wq, &port->accept_work);
out: out:
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
} }
@@ -1793,7 +1793,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
if (sq->qid == 0) { if (sq->qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_scheduled_work(); flush_workqueue(nvmet_wq);
} }
queue->nr_cmds = sq->size * 2; queue->nr_cmds = sq->size * 2;
@@ -1854,12 +1854,12 @@ static void __exit nvmet_tcp_exit(void)
nvmet_unregister_transport(&nvmet_tcp_ops); nvmet_unregister_transport(&nvmet_tcp_ops);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex); mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR); kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex); mutex_unlock(&nvmet_tcp_queue_mutex);
flush_scheduled_work(); flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq); destroy_workqueue(nvmet_tcp_wq);
} }

View File

@@ -272,7 +272,6 @@ struct advk_pcie {
u32 actions; u32 actions;
} wins[OB_WIN_COUNT]; } wins[OB_WIN_COUNT];
u8 wins_count; u8 wins_count;
int irq;
struct irq_domain *rp_irq_domain; struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain; struct irq_domain *irq_domain;
struct irq_chip irq_chip; struct irq_chip irq_chip;
@@ -1572,26 +1571,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
} }
} }
static void advk_pcie_irq_handler(struct irq_desc *desc) static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
{ {
struct advk_pcie *pcie = irq_desc_get_handler_data(desc); struct advk_pcie *pcie = arg;
struct irq_chip *chip = irq_desc_get_chip(desc); u32 status;
u32 val, mask, status;
chained_irq_enter(chip, desc); status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
if (!(status & PCIE_IRQ_CORE_INT))
return IRQ_NONE;
val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
if (status & PCIE_IRQ_CORE_INT) {
advk_pcie_handle_int(pcie); advk_pcie_handle_int(pcie);
/* Clear interrupt */ /* Clear interrupt */
advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
}
chained_irq_exit(chip, desc); return IRQ_HANDLED;
} }
static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -1673,7 +1667,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie; struct advk_pcie *pcie;
struct pci_host_bridge *bridge; struct pci_host_bridge *bridge;
struct resource_entry *entry; struct resource_entry *entry;
int ret; int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
if (!bridge) if (!bridge)
@@ -1759,9 +1753,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->base)) if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base); return PTR_ERR(pcie->base);
pcie->irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (pcie->irq < 0) if (irq < 0)
return pcie->irq; return irq;
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
if (ret) {
dev_err(dev, "Failed to register interrupt\n");
return ret;
}
pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
"reset-gpios", 0, "reset-gpios", 0,
@@ -1818,15 +1820,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret; return ret;
} }
irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
bridge->sysdata = pcie; bridge->sysdata = pcie;
bridge->ops = &advk_pcie_ops; bridge->ops = &advk_pcie_ops;
bridge->map_irq = advk_pcie_map_irq; bridge->map_irq = advk_pcie_map_irq;
ret = pci_host_probe(bridge); ret = pci_host_probe(bridge);
if (ret < 0) { if (ret < 0) {
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie);
@@ -1875,9 +1874,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
/* Remove IRQ handler */
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
/* Remove IRQ domains */ /* Remove IRQ domains */
advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie);

View File

@@ -2888,6 +2888,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
}, },
/*
* Downstream device is not accessible after putting a root port
* into D3cold and back into D0 on Elo i2.
*/
.ident = "Elo i2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
},
}, },
#endif #endif
{ } { }

View File

@@ -1236,18 +1236,12 @@ FUNC_GROUP_DECL(SALT8, AA12);
FUNC_GROUP_DECL(WDTRST4, AA12); FUNC_GROUP_DECL(WDTRST4, AA12);
#define AE12 196 #define AE12 196
SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
SIG_DESC_SET(SCU438, 4));
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4); SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2), PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4));
SIG_EXPR_LIST_PTR(AE12, GPIOY4));
#define AF12 197 #define AF12 197
SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
SIG_DESC_SET(SCU438, 5));
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5); SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3), PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5));
SIG_EXPR_LIST_PTR(AF12, GPIOY5));
#define AC12 198 #define AC12 198
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6)); SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
@@ -1520,9 +1514,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7); PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4); GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4); GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID); FUNC_DECL_1(FWSPID, FWSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4); FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8); FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/* /*
@@ -1918,7 +1911,6 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(FSI2), ASPEED_PINCTRL_GROUP(FSI2),
ASPEED_PINCTRL_GROUP(FWSPIABR), ASPEED_PINCTRL_GROUP(FWSPIABR),
ASPEED_PINCTRL_GROUP(FWSPID), ASPEED_PINCTRL_GROUP(FWSPID),
ASPEED_PINCTRL_GROUP(FWQSPID),
ASPEED_PINCTRL_GROUP(FWSPIWP), ASPEED_PINCTRL_GROUP(FWSPIWP),
ASPEED_PINCTRL_GROUP(GPIT0), ASPEED_PINCTRL_GROUP(GPIT0),
ASPEED_PINCTRL_GROUP(GPIT1), ASPEED_PINCTRL_GROUP(GPIT1),

View File

@@ -259,7 +259,7 @@ static const struct mtk_pin_ies_smt_set mt8365_ies_set[] = {
MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13), MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13),
MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14), MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14),
MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15), MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15),
MTK_PIN_IES_SMT_SPEC(114, 112, 0x420, 16), MTK_PIN_IES_SMT_SPEC(114, 116, 0x420, 16),
MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17), MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17),
MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18), MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18),
MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19), MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19),

View File

@@ -25,6 +25,9 @@
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1)) #define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
/* waitqueue for log readers */
static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
/** /**
* struct cros_ec_debugfs - EC debugging information. * struct cros_ec_debugfs - EC debugging information.
* *
@@ -33,7 +36,6 @@
* @log_buffer: circular buffer for console log information * @log_buffer: circular buffer for console log information
* @read_msg: preallocated EC command and buffer to read console log * @read_msg: preallocated EC command and buffer to read console log
* @log_mutex: mutex to protect circular buffer * @log_mutex: mutex to protect circular buffer
* @log_wq: waitqueue for log readers
* @log_poll_work: recurring task to poll EC for new console log data * @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob * @panicinfo_blob: panicinfo debugfs blob
*/ */
@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
struct circ_buf log_buffer; struct circ_buf log_buffer;
struct cros_ec_command *read_msg; struct cros_ec_command *read_msg;
struct mutex log_mutex; struct mutex log_mutex;
wait_queue_head_t log_wq;
struct delayed_work log_poll_work; struct delayed_work log_poll_work;
/* EC panicinfo */ /* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob; struct debugfs_blob_wrapper panicinfo_blob;
@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
buf_space--; buf_space--;
} }
wake_up(&debug_info->log_wq); wake_up(&cros_ec_debugfs_log_wq);
} }
mutex_unlock(&debug_info->log_mutex); mutex_unlock(&debug_info->log_mutex);
@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
mutex_unlock(&debug_info->log_mutex); mutex_unlock(&debug_info->log_mutex);
ret = wait_event_interruptible(debug_info->log_wq, ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
CIRC_CNT(cb->head, cb->tail, LOG_SIZE)); CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
if (ret < 0) if (ret < 0)
return ret; return ret;
@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
struct cros_ec_debugfs *debug_info = file->private_data; struct cros_ec_debugfs *debug_info = file->private_data;
__poll_t mask = 0; __poll_t mask = 0;
poll_wait(file, &debug_info->log_wq, wait); poll_wait(file, &cros_ec_debugfs_log_wq, wait);
mutex_lock(&debug_info->log_mutex); mutex_lock(&debug_info->log_mutex);
if (CIRC_CNT(debug_info->log_buffer.head, if (CIRC_CNT(debug_info->log_buffer.head,
@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
debug_info->log_buffer.tail = 0; debug_info->log_buffer.tail = 0;
mutex_init(&debug_info->log_mutex); mutex_init(&debug_info->log_mutex);
init_waitqueue_head(&debug_info->log_wq);
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir, debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
debug_info, &cros_ec_console_log_fops); debug_info, &cros_ec_console_log_fops);

View File

@@ -26,6 +26,15 @@ struct class *rtc_class;
static void rtc_device_release(struct device *dev) static void rtc_device_release(struct device *dev)
{ {
struct rtc_device *rtc = to_rtc_device(dev); struct rtc_device *rtc = to_rtc_device(dev);
struct timerqueue_head *head = &rtc->timerqueue;
struct timerqueue_node *node;
mutex_lock(&rtc->ops_lock);
while ((node = timerqueue_getnext(head)))
timerqueue_del(head, node);
mutex_unlock(&rtc->ops_lock);
cancel_work_sync(&rtc->irqwork);
ida_simple_remove(&rtc_ida, rtc->id); ida_simple_remove(&rtc_ida, rtc->id);
mutex_destroy(&rtc->ops_lock); mutex_destroy(&rtc->ops_lock);

View File

@@ -146,6 +146,17 @@ again:
} }
EXPORT_SYMBOL_GPL(mc146818_get_time); EXPORT_SYMBOL_GPL(mc146818_get_time);
/* AMD systems don't allow access to AltCentury with DV1 */
static bool apply_amd_register_a_behavior(void)
{
#ifdef CONFIG_X86
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return true;
#endif
return false;
}
/* Set the current date and time in the real time clock. */ /* Set the current date and time in the real time clock. */
int mc146818_set_time(struct rtc_time *time) int mc146818_set_time(struct rtc_time *time)
{ {
@@ -219,6 +230,9 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL); save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT); save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
if (apply_amd_register_a_behavior())
CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
else
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION #ifdef CONFIG_MACH_DECSTATION

View File

@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{ {
struct pcf2127 *pcf2127 = dev_get_drvdata(dev); struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
unsigned int buf[5], ctrl2; u8 buf[5];
unsigned int ctrl2;
int ret; int ret;
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2); ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);

View File

@@ -138,7 +138,7 @@ struct sun6i_rtc_dev {
const struct sun6i_rtc_clk_data *data; const struct sun6i_rtc_clk_data *data;
void __iomem *base; void __iomem *base;
int irq; int irq;
unsigned long alarm; time64_t alarm;
struct clk_hw hw; struct clk_hw hw;
struct clk_hw *int_osc; struct clk_hw *int_osc;
@@ -510,10 +510,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &wkalrm->time; struct rtc_time *alrm_tm = &wkalrm->time;
struct rtc_time tm_now; struct rtc_time tm_now;
unsigned long time_now = 0; time64_t time_now, time_set;
unsigned long time_set = 0; int ret;
unsigned long time_gap = 0;
int ret = 0;
ret = sun6i_rtc_gettime(dev, &tm_now); ret = sun6i_rtc_gettime(dev, &tm_now);
if (ret < 0) { if (ret < 0) {
@@ -528,9 +526,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
return -EINVAL; return -EINVAL;
} }
time_gap = time_set - time_now; if ((time_set - time_now) > U32_MAX) {
if (time_gap > U32_MAX) {
dev_err(dev, "Date too far in the future\n"); dev_err(dev, "Date too far in the future\n");
return -EINVAL; return -EINVAL;
} }
@@ -539,7 +535,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
writel(0, chip->base + SUN6I_ALRM_COUNTER); writel(0, chip->base + SUN6I_ALRM_COUNTER);
usleep_range(100, 300); usleep_range(100, 300);
writel(time_gap, chip->base + SUN6I_ALRM_COUNTER); writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER);
chip->alarm = time_set; chip->alarm = time_set;
sun6i_rtc_setaie(wkalrm->enabled, chip); sun6i_rtc_setaie(wkalrm->enabled, chip);

View File

@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL: case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE: case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA: case SCSI_ACCESS_STATE_LBA:
return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING: case SCSI_ACCESS_STATE_TRANSITIONING:
return BLK_STS_AGAIN; return BLK_STS_OK;
default: default:
req->rq_flags |= RQF_QUIET; req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR; return BLK_STS_IOERR;

View File

@@ -3837,6 +3837,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags); spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) { if (cmd->aborted) {
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(&cmd->cmd_lock, flags); spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/* /*
* It's normal to see 2 calls in this path: * It's normal to see 2 calls in this path:

View File

@@ -1257,6 +1257,13 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr; struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
int data_seg_len; int data_seg_len;
data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
& MASK_RSP_UPIU_DATA_SEG_LEN;
/* If data segment length is zero, rsp_field is not valid */
if (!data_seg_len)
return;
if (unlikely(lrbp->lun != rsp_field->lun)) { if (unlikely(lrbp->lun != rsp_field->lun)) {
struct scsi_device *sdev; struct scsi_device *sdev;
bool found = false; bool found = false;
@@ -1291,18 +1298,6 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return; return;
} }
data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
& MASK_RSP_UPIU_DATA_SEG_LEN;
/* To flush remained rsp_list, we queue the map_work task */
if (!data_seg_len) {
if (!ufshpb_is_general_lun(hpb->lun))
return;
ufshpb_kick_map_work(hpb);
return;
}
BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE); BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field)) if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))

View File

@@ -145,6 +145,7 @@ enum dev_state {
STATE_DEV_INVALID = 0, STATE_DEV_INVALID = 0,
STATE_DEV_OPENED, STATE_DEV_OPENED,
STATE_DEV_INITIALIZED, STATE_DEV_INITIALIZED,
STATE_DEV_REGISTERING,
STATE_DEV_RUNNING, STATE_DEV_RUNNING,
STATE_DEV_CLOSED, STATE_DEV_CLOSED,
STATE_DEV_FAILED STATE_DEV_FAILED
@@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} }
dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags); spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_probe_driver(&dev->driver); ret = usb_gadget_probe_driver(&dev->driver);

Some files were not shown because too many files have changed in this diff Show More