Merge remote-tracking branch 'stable/linux-5.15.y' into rpi-5.15.y
This commit is contained in:
@@ -163,6 +163,9 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
||||
@@ -58,7 +58,7 @@ patternProperties:
|
||||
$ref: "/schemas/types.yaml#/definitions/string"
|
||||
enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
|
||||
ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
|
||||
EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP,
|
||||
EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP,
|
||||
GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
|
||||
GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
|
||||
I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 41
|
||||
SUBLEVEL = 43
|
||||
EXTRAVERSION =
|
||||
NAME = Trick or Treat
|
||||
|
||||
|
||||
@@ -231,6 +231,21 @@
|
||||
gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
iio-hwmon {
|
||||
compatible = "iio-hwmon";
|
||||
io-channels = <&adc1 7>;
|
||||
};
|
||||
};
|
||||
|
||||
&adc1 {
|
||||
status = "okay";
|
||||
aspeed,int-vref-microvolt = <2500000>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
|
||||
&pinctrl_adc10_default &pinctrl_adc11_default
|
||||
&pinctrl_adc12_default &pinctrl_adc13_default
|
||||
&pinctrl_adc14_default &pinctrl_adc15_default>;
|
||||
};
|
||||
|
||||
&gpio0 {
|
||||
|
||||
@@ -246,6 +246,21 @@
|
||||
linux,code = <11>;
|
||||
};
|
||||
};
|
||||
|
||||
iio-hwmon {
|
||||
compatible = "iio-hwmon";
|
||||
io-channels = <&adc1 7>;
|
||||
};
|
||||
};
|
||||
|
||||
&adc1 {
|
||||
status = "okay";
|
||||
aspeed,int-vref-microvolt = <2500000>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
|
||||
&pinctrl_adc10_default &pinctrl_adc11_default
|
||||
&pinctrl_adc12_default &pinctrl_adc13_default
|
||||
&pinctrl_adc14_default &pinctrl_adc15_default>;
|
||||
};
|
||||
|
||||
&ehci1 {
|
||||
|
||||
@@ -117,11 +117,6 @@
|
||||
groups = "FWSPID";
|
||||
};
|
||||
|
||||
pinctrl_fwqspid_default: fwqspid_default {
|
||||
function = "FWSPID";
|
||||
groups = "FWQSPID";
|
||||
};
|
||||
|
||||
pinctrl_fwspiwp_default: fwspiwp_default {
|
||||
function = "FWSPIWP";
|
||||
groups = "FWSPIWP";
|
||||
@@ -653,12 +648,12 @@
|
||||
};
|
||||
|
||||
pinctrl_qspi1_default: qspi1_default {
|
||||
function = "QSPI1";
|
||||
function = "SPI1";
|
||||
groups = "QSPI1";
|
||||
};
|
||||
|
||||
pinctrl_qspi2_default: qspi2_default {
|
||||
function = "QSPI2";
|
||||
function = "SPI2";
|
||||
groups = "QSPI2";
|
||||
};
|
||||
|
||||
|
||||
@@ -364,6 +364,41 @@
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
adc0: adc@1e6e9000 {
|
||||
compatible = "aspeed,ast2600-adc0";
|
||||
reg = <0x1e6e9000 0x100>;
|
||||
clocks = <&syscon ASPEED_CLK_APB2>;
|
||||
resets = <&syscon ASPEED_RESET_ADC>;
|
||||
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#io-channel-cells = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
adc1: adc@1e6e9100 {
|
||||
compatible = "aspeed,ast2600-adc1";
|
||||
reg = <0x1e6e9100 0x100>;
|
||||
clocks = <&syscon ASPEED_CLK_APB2>;
|
||||
resets = <&syscon ASPEED_RESET_ADC>;
|
||||
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#io-channel-cells = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sbc: secure-boot-controller@1e6f2000 {
|
||||
compatible = "aspeed,ast2600-sbc";
|
||||
reg = <0x1e6f2000 0x1000>;
|
||||
};
|
||||
|
||||
video: video@1e700000 {
|
||||
compatible = "aspeed,ast2600-video-engine";
|
||||
reg = <0x1e700000 0x1000>;
|
||||
clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
|
||||
<&syscon ASPEED_CLK_GATE_ECLK>;
|
||||
clock-names = "vclk", "eclk";
|
||||
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
gpio0: gpio@1e780000 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
|
||||
@@ -1038,7 +1038,7 @@ vector_bhb_loop8_\name:
|
||||
|
||||
@ bhb workaround
|
||||
mov r0, #8
|
||||
3: b . + 4
|
||||
3: W(b) . + 4
|
||||
subs r0, r0, #1
|
||||
bne 3b
|
||||
dsb
|
||||
|
||||
@@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame)
|
||||
return -EINVAL;
|
||||
|
||||
frame->sp = frame->fp;
|
||||
frame->fp = *(unsigned long *)(fp);
|
||||
frame->pc = *(unsigned long *)(fp + 4);
|
||||
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
|
||||
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
|
||||
#else
|
||||
/* check current frame pointer is within bounds */
|
||||
if (fp < low + 12 || fp > high - 4)
|
||||
return -EINVAL;
|
||||
|
||||
/* restore the registers from the stack frame */
|
||||
frame->fp = *(unsigned long *)(fp - 12);
|
||||
frame->sp = *(unsigned long *)(fp - 8);
|
||||
frame->pc = *(unsigned long *)(fp - 4);
|
||||
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
|
||||
frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
|
||||
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -288,6 +288,7 @@ void cpu_v7_ca15_ibe(void)
|
||||
{
|
||||
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
|
||||
cpu_v7_spectre_v2_init();
|
||||
cpu_v7_spectre_bhb_init();
|
||||
}
|
||||
|
||||
void cpu_v7_bugs_init(void)
|
||||
|
||||
@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1286807
|
||||
{
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
|
||||
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
|
||||
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
|
||||
},
|
||||
#endif
|
||||
{},
|
||||
|
||||
@@ -73,6 +73,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
|
||||
mte_sync_page_tags(page, old_pte, check_swap,
|
||||
pte_is_tagged);
|
||||
}
|
||||
|
||||
/* ensure the tags are visible before the PTE is set */
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
int memcmp_pages(struct page *page1, struct page *page2)
|
||||
|
||||
@@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu)
|
||||
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
|
||||
|
||||
struct pv_time_stolen_time_region {
|
||||
struct pvclock_vcpu_stolen_time *kaddr;
|
||||
struct pvclock_vcpu_stolen_time __rcu *kaddr;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
|
||||
@@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc);
|
||||
/* return stolen time in ns by asking the hypervisor */
|
||||
static u64 para_steal_clock(int cpu)
|
||||
{
|
||||
struct pvclock_vcpu_stolen_time *kaddr = NULL;
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
u64 ret = 0;
|
||||
|
||||
reg = per_cpu_ptr(&stolen_time_region, cpu);
|
||||
|
||||
@@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu)
|
||||
* online notification callback runs. Until the callback
|
||||
* has run we just return zero.
|
||||
*/
|
||||
if (!reg->kaddr)
|
||||
rcu_read_lock();
|
||||
kaddr = rcu_dereference(reg->kaddr);
|
||||
if (!kaddr) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
|
||||
ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int stolen_time_cpu_down_prepare(unsigned int cpu)
|
||||
{
|
||||
struct pvclock_vcpu_stolen_time *kaddr = NULL;
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
reg = this_cpu_ptr(&stolen_time_region);
|
||||
if (!reg->kaddr)
|
||||
return 0;
|
||||
|
||||
memunmap(reg->kaddr);
|
||||
memset(reg, 0, sizeof(*reg));
|
||||
kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
|
||||
synchronize_rcu();
|
||||
memunmap(kaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stolen_time_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct pvclock_vcpu_stolen_time *kaddr = NULL;
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
|
||||
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
|
||||
return -EINVAL;
|
||||
|
||||
reg->kaddr = memremap(res.a0,
|
||||
kaddr = memremap(res.a0,
|
||||
sizeof(struct pvclock_vcpu_stolen_time),
|
||||
MEMREMAP_WB);
|
||||
|
||||
rcu_assign_pointer(reg->kaddr, kaddr);
|
||||
|
||||
if (!reg->kaddr) {
|
||||
pr_warn("Failed to map stolen time data structure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(reg->kaddr->revision) != 0 ||
|
||||
le32_to_cpu(reg->kaddr->attributes) != 0) {
|
||||
if (le32_to_cpu(kaddr->revision) != 0 ||
|
||||
le32_to_cpu(kaddr->attributes) != 0) {
|
||||
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module,
|
||||
{
|
||||
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
|
||||
|
||||
if (!clk)
|
||||
return;
|
||||
clk->cl.dev_id = dev;
|
||||
clk->cl.con_id = NULL;
|
||||
clk->cl.clk = clk;
|
||||
|
||||
@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con,
|
||||
{
|
||||
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
|
||||
|
||||
if (!clk)
|
||||
return;
|
||||
clk->cl.dev_id = dev_name(dev);
|
||||
clk->cl.con_id = con;
|
||||
clk->cl.clk = clk;
|
||||
|
||||
@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
|
||||
{
|
||||
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
|
||||
|
||||
if (!clk)
|
||||
return;
|
||||
clk->cl.dev_id = dev;
|
||||
clk->cl.con_id = con;
|
||||
clk->cl.clk = clk;
|
||||
@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con,
|
||||
{
|
||||
struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
|
||||
|
||||
if (!clk)
|
||||
return;
|
||||
clk->cl.dev_id = dev;
|
||||
clk->cl.con_id = con;
|
||||
clk->cl.clk = clk;
|
||||
@@ -356,6 +360,7 @@ static void clkdev_add_pci(void)
|
||||
struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
|
||||
|
||||
/* main pci clock */
|
||||
if (clk) {
|
||||
clk->cl.dev_id = "17000000.pci";
|
||||
clk->cl.con_id = NULL;
|
||||
clk->cl.clk = clk;
|
||||
@@ -366,8 +371,10 @@ static void clkdev_add_pci(void)
|
||||
clk->module = 0;
|
||||
clk->bits = PMU_PCI;
|
||||
clkdev_add(&clk->cl);
|
||||
}
|
||||
|
||||
/* use internal/external bus clock */
|
||||
if (clk_ext) {
|
||||
clk_ext->cl.dev_id = "17000000.pci";
|
||||
clk_ext->cl.con_id = "external";
|
||||
clk_ext->cl.clk = clk_ext;
|
||||
@@ -375,6 +382,7 @@ static void clkdev_add_pci(void)
|
||||
clk_ext->disable = pci_ext_disable;
|
||||
clkdev_add(&clk_ext->cl);
|
||||
}
|
||||
}
|
||||
|
||||
/* xway socs can generate clocks on gpio pins */
|
||||
static unsigned long valid_clkout_rates[4][5] = {
|
||||
@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void)
|
||||
char *name;
|
||||
|
||||
name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
|
||||
if (!name)
|
||||
continue;
|
||||
sprintf(name, "clkout%d", i);
|
||||
|
||||
clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
|
||||
if (!clk) {
|
||||
kfree(name);
|
||||
continue;
|
||||
}
|
||||
clk->cl.dev_id = "1f103000.cgu";
|
||||
clk->cl.con_id = name;
|
||||
clk->cl.clk = clk;
|
||||
|
||||
@@ -166,7 +166,7 @@
|
||||
clocks = <&prci PRCI_CLK_TLCLK>;
|
||||
status = "disabled";
|
||||
};
|
||||
dma: dma@3000000 {
|
||||
dma: dma-controller@3000000 {
|
||||
compatible = "sifive,fu540-c000-pdma";
|
||||
reg = <0x0 0x3000000 0x0 0x8000>;
|
||||
interrupt-parent = <&plic0>;
|
||||
|
||||
@@ -142,10 +142,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
|
||||
do_trap(regs, SIGFPE, si_code, "floating point exception");
|
||||
}
|
||||
|
||||
static void translation_exception(struct pt_regs *regs)
|
||||
static void translation_specification_exception(struct pt_regs *regs)
|
||||
{
|
||||
/* May never happen. */
|
||||
panic("Translation exception");
|
||||
panic("Translation-Specification Exception");
|
||||
}
|
||||
|
||||
static void illegal_op(struct pt_regs *regs)
|
||||
@@ -374,7 +374,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = {
|
||||
[0x0f] = hfp_divide_exception,
|
||||
[0x10] = do_dat_exception,
|
||||
[0x11] = do_dat_exception,
|
||||
[0x12] = translation_exception,
|
||||
[0x12] = translation_specification_exception,
|
||||
[0x13] = special_op_exception,
|
||||
[0x14] = default_trap_handler,
|
||||
[0x15] = operand_exception,
|
||||
|
||||
@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
|
||||
list_for_each_entry(tmp, &zpci_list, entry) {
|
||||
if (tmp->fid == fid) {
|
||||
zdev = tmp;
|
||||
zpci_zdev_get(zdev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
|
||||
void zpci_release_device(struct kref *kref);
|
||||
static inline void zpci_zdev_put(struct zpci_dev *zdev)
|
||||
{
|
||||
if (zdev)
|
||||
kref_put(&zdev->kref, zpci_release_device);
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
#include <asm/clp.h>
|
||||
#include <uapi/asm/clp.h>
|
||||
|
||||
#include "pci_bus.h"
|
||||
|
||||
bool zpci_unique_uid;
|
||||
|
||||
void update_uid_checking(bool new)
|
||||
@@ -403,7 +405,10 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
||||
return;
|
||||
|
||||
zdev = get_zdev_by_fid(entry->fid);
|
||||
if (!zdev)
|
||||
if (zdev) {
|
||||
zpci_zdev_put(zdev);
|
||||
return;
|
||||
}
|
||||
zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
||||
}
|
||||
|
||||
|
||||
@@ -62,10 +62,12 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
||||
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
|
||||
|
||||
if (!pdev)
|
||||
return;
|
||||
goto no_pdev;
|
||||
|
||||
pdev->error_state = pci_channel_io_perm_failure;
|
||||
pci_dev_put(pdev);
|
||||
no_pdev:
|
||||
zpci_zdev_put(zdev);
|
||||
}
|
||||
|
||||
void zpci_event_error(void *data)
|
||||
@@ -94,6 +96,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
|
||||
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
{
|
||||
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
||||
bool existing_zdev = !!zdev;
|
||||
enum zpci_state state;
|
||||
|
||||
zpci_err("avail CCDF:\n");
|
||||
@@ -156,6 +159,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (existing_zdev)
|
||||
zpci_zdev_put(zdev);
|
||||
}
|
||||
|
||||
void zpci_event_availability(void *data)
|
||||
|
||||
@@ -172,7 +172,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
|
||||
# xor remaining bytes from partial register into output
|
||||
mov %rcx,%rax
|
||||
and $0xf,%rcx
|
||||
jz .Ldone8
|
||||
jz .Ldone2
|
||||
mov %rax,%r9
|
||||
and $~0xf,%r9
|
||||
|
||||
@@ -438,7 +438,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
|
||||
# xor remaining bytes from partial register into output
|
||||
mov %rcx,%rax
|
||||
and $0xf,%rcx
|
||||
jz .Ldone8
|
||||
jz .Ldone4
|
||||
mov %rax,%r9
|
||||
and $~0xf,%r9
|
||||
|
||||
|
||||
@@ -5590,6 +5590,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_mmu_page *sp, *node;
|
||||
int nr_zapped, batch = 0;
|
||||
bool unstable;
|
||||
|
||||
restart:
|
||||
list_for_each_entry_safe_reverse(sp, node,
|
||||
@@ -5621,12 +5622,13 @@ restart:
|
||||
goto restart;
|
||||
}
|
||||
|
||||
if (__kvm_mmu_prepare_zap_page(kvm, sp,
|
||||
&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
|
||||
unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
|
||||
&kvm->arch.zapped_obsolete_pages, &nr_zapped);
|
||||
batch += nr_zapped;
|
||||
|
||||
if (unstable)
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger a remote TLB flush before freeing the page tables to ensure
|
||||
|
||||
@@ -10,13 +10,12 @@
|
||||
#include <linux/msg.h>
|
||||
#include <linux/shm.h>
|
||||
|
||||
typedef long syscall_handler_t(void);
|
||||
typedef long syscall_handler_t(long, long, long, long, long, long);
|
||||
|
||||
extern syscall_handler_t *sys_call_table[];
|
||||
|
||||
#define EXECUTE_SYSCALL(syscall, regs) \
|
||||
(((long (*)(long, long, long, long, long, long)) \
|
||||
(*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \
|
||||
(((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \
|
||||
UPT_SYSCALL_ARG2(®s->regs), \
|
||||
UPT_SYSCALL_ARG3(®s->regs), \
|
||||
UPT_SYSCALL_ARG4(®s->regs), \
|
||||
|
||||
@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
|
||||
unsigned int set_size)
|
||||
{
|
||||
struct drbd_request *r;
|
||||
struct drbd_request *req = NULL;
|
||||
struct drbd_request *req = NULL, *tmp = NULL;
|
||||
int expect_epoch = 0;
|
||||
int expect_size = 0;
|
||||
|
||||
@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
|
||||
* to catch requests being barrier-acked "unexpectedly".
|
||||
* It usually should find the same req again, or some READ preceding it. */
|
||||
list_for_each_entry(req, &connection->transfer_log, tl_requests)
|
||||
if (req->epoch == expect_epoch)
|
||||
if (req->epoch == expect_epoch) {
|
||||
tmp = req;
|
||||
break;
|
||||
}
|
||||
req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
|
||||
list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
|
||||
if (req->epoch != expect_epoch)
|
||||
break;
|
||||
|
||||
@@ -509,8 +509,8 @@ static unsigned long fdc_busy;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(command_done);
|
||||
|
||||
/* Errors during formatting are counted here. */
|
||||
static int format_errors;
|
||||
/* errors encountered on the current (or last) request */
|
||||
static int floppy_errors;
|
||||
|
||||
/* Format request descriptor. */
|
||||
static struct format_descr format_req;
|
||||
@@ -530,7 +530,6 @@ static struct format_descr format_req;
|
||||
static char *floppy_track_buffer;
|
||||
static int max_buffer_sectors;
|
||||
|
||||
static int *errors;
|
||||
typedef void (*done_f)(int);
|
||||
static const struct cont_t {
|
||||
void (*interrupt)(void);
|
||||
@@ -1455,7 +1454,7 @@ static int interpret_errors(void)
|
||||
if (drive_params[current_drive].flags & FTD_MSG)
|
||||
DPRINT("Over/Underrun - retrying\n");
|
||||
bad = 0;
|
||||
} else if (*errors >= drive_params[current_drive].max_errors.reporting) {
|
||||
} else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
|
||||
print_errors();
|
||||
}
|
||||
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
|
||||
@@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
|
||||
if (!next_valid_format(current_drive))
|
||||
return;
|
||||
}
|
||||
err_count = ++(*errors);
|
||||
err_count = ++floppy_errors;
|
||||
INFBOUND(write_errors[current_drive].badness, err_count);
|
||||
if (err_count > drive_params[current_drive].max_errors.abort)
|
||||
cont->done(0);
|
||||
@@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
|
||||
return -EINVAL;
|
||||
}
|
||||
format_req = *tmp_format_req;
|
||||
format_errors = 0;
|
||||
cont = &format_cont;
|
||||
errors = &format_errors;
|
||||
floppy_errors = 0;
|
||||
ret = wait_til_done(redo_format, true);
|
||||
if (ret == -EINTR)
|
||||
return -EINTR;
|
||||
@@ -2761,10 +2759,11 @@ static int set_next_request(void)
|
||||
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
|
||||
queuelist);
|
||||
if (current_req) {
|
||||
current_req->error_count = 0;
|
||||
floppy_errors = 0;
|
||||
list_del_init(¤t_req->queuelist);
|
||||
return 1;
|
||||
}
|
||||
return current_req != NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Starts or continues processing request. Will automatically unlock the
|
||||
@@ -2823,7 +2822,6 @@ do_request:
|
||||
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
|
||||
} else
|
||||
probing = 0;
|
||||
errors = &(current_req->error_count);
|
||||
tmp = make_raw_rw_request();
|
||||
if (tmp < 2) {
|
||||
request_done(tmp);
|
||||
|
||||
@@ -106,6 +106,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
|
||||
tmp_rate = parent_rate;
|
||||
else
|
||||
tmp_rate = parent_rate / div;
|
||||
|
||||
if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
|
||||
return;
|
||||
|
||||
tmp_diff = abs(req->rate - tmp_rate);
|
||||
|
||||
if (*best_diff < 0 || *best_diff >= tmp_diff) {
|
||||
|
||||
@@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
|
||||
} else {
|
||||
/* copy only remaining bytes */
|
||||
memcpy(data, &val, max - currsize);
|
||||
break;
|
||||
}
|
||||
} while (currsize < max);
|
||||
|
||||
|
||||
@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev)
|
||||
struct stm32_crc *crc = platform_get_drvdata(pdev);
|
||||
int ret = pm_runtime_get_sync(crc->dev);
|
||||
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(crc->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock(&crc_list.lock);
|
||||
list_del(&crc->list);
|
||||
|
||||
@@ -436,6 +436,7 @@ static inline int is_dma_buf_file(struct file *file)
|
||||
|
||||
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
{
|
||||
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
|
||||
struct file *file;
|
||||
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
|
||||
|
||||
@@ -445,6 +446,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
inode->i_size = dmabuf->size;
|
||||
inode_set_bytes(inode, dmabuf->size);
|
||||
|
||||
/*
|
||||
* The ->i_ino acquired from get_next_ino() is not unique thus
|
||||
* not suitable for using it as dentry name by dmabuf stats.
|
||||
* Override ->i_ino with the unique and dmabuffs specific
|
||||
* value.
|
||||
*/
|
||||
inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
|
||||
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
|
||||
flags, &dma_buf_fops);
|
||||
if (IS_ERR(file))
|
||||
|
||||
@@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
|
||||
unsigned long flags;
|
||||
unsigned int on, off;
|
||||
|
||||
if (state->polarity != PWM_POLARITY_NORMAL)
|
||||
return -EINVAL;
|
||||
|
||||
val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
|
||||
do_div(val, NSEC_PER_SEC);
|
||||
if (val > UINT_MAX + 1ULL)
|
||||
|
||||
@@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
|
||||
{
|
||||
struct vf610_gpio_port *port = gpiochip_get_data(chip);
|
||||
unsigned long mask = BIT(gpio);
|
||||
u32 val;
|
||||
|
||||
if (port->sdata && port->sdata->have_paddr)
|
||||
vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
|
||||
if (port->sdata && port->sdata->have_paddr) {
|
||||
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
|
||||
val |= mask;
|
||||
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
|
||||
}
|
||||
|
||||
vf610_gpio_set(chip, gpio, value);
|
||||
|
||||
|
||||
@@ -1411,9 +1411,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
|
||||
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
#else
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1045,6 +1045,20 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
|
||||
(pm_suspend_target_state == PM_SUSPEND_MEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_should_gpu_reset
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* returns true if should reset GPU, false if not
|
||||
*/
|
||||
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return false;
|
||||
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_is_s0ix_active
|
||||
*
|
||||
|
||||
@@ -2259,7 +2259,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
if (!adev->in_s0ix)
|
||||
if (amdgpu_acpi_should_gpu_reset(adev))
|
||||
return amdgpu_asic_reset(adev);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -4834,6 +4834,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
|
||||
|
||||
mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
|
||||
drm_edid_get_monitor_name(mst_edid, name, namelen);
|
||||
kfree(mst_edid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -375,6 +375,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
|
||||
}
|
||||
}
|
||||
|
||||
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
|
||||
const u32 *mmioaddr, u32 mmio_count,
|
||||
int header_ver, u8 dmc_id)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
|
||||
u32 start_range, end_range;
|
||||
int i;
|
||||
|
||||
if (dmc_id >= DMC_FW_MAX) {
|
||||
drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (header_ver == 1) {
|
||||
start_range = DMC_MMIO_START_RANGE;
|
||||
end_range = DMC_MMIO_END_RANGE;
|
||||
} else if (dmc_id == DMC_FW_MAIN) {
|
||||
start_range = TGL_MAIN_MMIO_START;
|
||||
end_range = TGL_MAIN_MMIO_END;
|
||||
} else if (DISPLAY_VER(i915) >= 13) {
|
||||
start_range = ADLP_PIPE_MMIO_START;
|
||||
end_range = ADLP_PIPE_MMIO_END;
|
||||
} else if (DISPLAY_VER(i915) >= 12) {
|
||||
start_range = TGL_PIPE_MMIO_START(dmc_id);
|
||||
end_range = TGL_PIPE_MMIO_END(dmc_id);
|
||||
} else {
|
||||
drm_warn(&i915->drm, "Unknown mmio range for sanity check");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
const struct intel_dmc_header_base *dmc_header,
|
||||
size_t rem_size, u8 dmc_id)
|
||||
@@ -444,6 +482,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
|
||||
dmc_header->header_ver, dmc_id)) {
|
||||
drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
|
||||
dmc_info->mmiodata[i] = mmiodata[i];
|
||||
|
||||
@@ -376,21 +376,6 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The port numbering and mapping here is bizarre. The now-obsolete
|
||||
* swsci spec supports ports numbered [0..4]. Port E is handled as a
|
||||
* special case, but port F and beyond are not. The functionality is
|
||||
* supposed to be obsolete for new platforms. Just bail out if the port
|
||||
* number is out of bounds after mapping.
|
||||
*/
|
||||
if (port > 4) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
|
||||
intel_encoder->base.base.id, intel_encoder->base.name,
|
||||
port_name(intel_encoder->port), port);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enable)
|
||||
parm |= 4 << 8;
|
||||
|
||||
|
||||
@@ -7818,6 +7818,22 @@ enum {
|
||||
/* MMIO address range for DMC program (0x80000 - 0x82FFF) */
|
||||
#define DMC_MMIO_START_RANGE 0x80000
|
||||
#define DMC_MMIO_END_RANGE 0x8FFFF
|
||||
#define DMC_V1_MMIO_START_RANGE 0x80000
|
||||
#define TGL_MAIN_MMIO_START 0x8F000
|
||||
#define TGL_MAIN_MMIO_END 0x8FFFF
|
||||
#define _TGL_PIPEA_MMIO_START 0x92000
|
||||
#define _TGL_PIPEA_MMIO_END 0x93FFF
|
||||
#define _TGL_PIPEB_MMIO_START 0x96000
|
||||
#define _TGL_PIPEB_MMIO_END 0x97FFF
|
||||
#define ADLP_PIPE_MMIO_START 0x5F000
|
||||
#define ADLP_PIPE_MMIO_END 0x5FFFF
|
||||
|
||||
#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
|
||||
_TGL_PIPEB_MMIO_START)
|
||||
|
||||
#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
|
||||
_TGL_PIPEB_MMIO_END)
|
||||
|
||||
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
|
||||
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
|
||||
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
|
||||
|
||||
@@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
if (i2c->bus_freq == 0) {
|
||||
dev_warn(i2c->dev, "clock-frequency 0 not supported\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err_disable_clk;
|
||||
}
|
||||
|
||||
adap = &i2c->adap;
|
||||
@@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
ret = i2c_add_adapter(adap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_disable_clk;
|
||||
|
||||
dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_clk:
|
||||
clk_disable_unprepare(i2c->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -77,6 +77,7 @@
|
||||
|
||||
/* SB800 constants */
|
||||
#define SB800_PIIX4_SMB_IDX 0xcd6
|
||||
#define SB800_PIIX4_SMB_MAP_SIZE 2
|
||||
|
||||
#define KERNCZ_IMC_IDX 0x3e
|
||||
#define KERNCZ_IMC_DATA 0x3f
|
||||
@@ -97,6 +98,9 @@
|
||||
#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
|
||||
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
|
||||
|
||||
#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
|
||||
#define SB800_PIIX4_FCH_PM_SIZE 8
|
||||
|
||||
/* insmod parameters */
|
||||
|
||||
/* If force is set to anything different from 0, we forcibly enable the
|
||||
@@ -155,6 +159,12 @@ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
|
||||
};
|
||||
static const char *piix4_aux_port_name_sb800 = " port 1";
|
||||
|
||||
struct sb800_mmio_cfg {
|
||||
void __iomem *addr;
|
||||
struct resource *res;
|
||||
bool use_mmio;
|
||||
};
|
||||
|
||||
struct i2c_piix4_adapdata {
|
||||
unsigned short smba;
|
||||
|
||||
@@ -162,8 +172,75 @@ struct i2c_piix4_adapdata {
|
||||
bool sb800_main;
|
||||
bool notify_imc;
|
||||
u8 port; /* Port number, shifted */
|
||||
struct sb800_mmio_cfg mmio_cfg;
|
||||
};
|
||||
|
||||
static int piix4_sb800_region_request(struct device *dev,
|
||||
struct sb800_mmio_cfg *mmio_cfg)
|
||||
{
|
||||
if (mmio_cfg->use_mmio) {
|
||||
struct resource *res;
|
||||
void __iomem *addr;
|
||||
|
||||
res = request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
|
||||
SB800_PIIX4_FCH_PM_SIZE,
|
||||
"sb800_piix4_smb");
|
||||
if (!res) {
|
||||
dev_err(dev,
|
||||
"SMBus base address memory region 0x%x already in use.\n",
|
||||
SB800_PIIX4_FCH_PM_ADDR);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
|
||||
SB800_PIIX4_FCH_PM_SIZE);
|
||||
if (!addr) {
|
||||
release_resource(res);
|
||||
dev_err(dev, "SMBus base address mapping failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mmio_cfg->res = res;
|
||||
mmio_cfg->addr = addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE,
|
||||
"sb800_piix4_smb")) {
|
||||
dev_err(dev,
|
||||
"SMBus base address index region 0x%x already in use.\n",
|
||||
SB800_PIIX4_SMB_IDX);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void piix4_sb800_region_release(struct device *dev,
|
||||
struct sb800_mmio_cfg *mmio_cfg)
|
||||
{
|
||||
if (mmio_cfg->use_mmio) {
|
||||
iounmap(mmio_cfg->addr);
|
||||
release_resource(mmio_cfg->res);
|
||||
return;
|
||||
}
|
||||
|
||||
release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE);
|
||||
}
|
||||
|
||||
static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev)
|
||||
{
|
||||
/*
|
||||
* cd6h/cd7h port I/O accesses can be disabled on AMD processors
|
||||
* w/ SMBus PCI revision ID 0x51 or greater. MMIO is supported on
|
||||
* the same processors and is the recommended access method.
|
||||
*/
|
||||
return (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
|
||||
PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
|
||||
PIIX4_dev->revision >= 0x51);
|
||||
}
|
||||
|
||||
static int piix4_setup(struct pci_dev *PIIX4_dev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
@@ -263,12 +340,61 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
|
||||
return piix4_smba;
|
||||
}
|
||||
|
||||
static int piix4_setup_sb800_smba(struct pci_dev *PIIX4_dev,
|
||||
u8 smb_en,
|
||||
u8 aux,
|
||||
u8 *smb_en_status,
|
||||
unsigned short *piix4_smba)
|
||||
{
|
||||
struct sb800_mmio_cfg mmio_cfg;
|
||||
u8 smba_en_lo;
|
||||
u8 smba_en_hi;
|
||||
int retval;
|
||||
|
||||
mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
|
||||
retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
if (mmio_cfg.use_mmio) {
|
||||
smba_en_lo = ioread8(mmio_cfg.addr);
|
||||
smba_en_hi = ioread8(mmio_cfg.addr + 1);
|
||||
} else {
|
||||
outb_p(smb_en, SB800_PIIX4_SMB_IDX);
|
||||
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
|
||||
outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
|
||||
smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
|
||||
}
|
||||
|
||||
piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
|
||||
|
||||
if (!smb_en) {
|
||||
*smb_en_status = smba_en_lo & 0x10;
|
||||
*piix4_smba = smba_en_hi << 8;
|
||||
if (aux)
|
||||
*piix4_smba |= 0x20;
|
||||
} else {
|
||||
*smb_en_status = smba_en_lo & 0x01;
|
||||
*piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
|
||||
}
|
||||
|
||||
if (!*smb_en_status) {
|
||||
dev_err(&PIIX4_dev->dev,
|
||||
"SMBus Host Controller not enabled!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
|
||||
const struct pci_device_id *id, u8 aux)
|
||||
{
|
||||
unsigned short piix4_smba;
|
||||
u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status, port_sel;
|
||||
u8 smb_en, smb_en_status, port_sel;
|
||||
u8 i2ccfg, i2ccfg_offset = 0x10;
|
||||
struct sb800_mmio_cfg mmio_cfg;
|
||||
int retval;
|
||||
|
||||
/* SB800 and later SMBus does not support forcing address */
|
||||
if (force || force_addr) {
|
||||
@@ -290,35 +416,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
|
||||
else
|
||||
smb_en = (aux) ? 0x28 : 0x2c;
|
||||
|
||||
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb")) {
|
||||
dev_err(&PIIX4_dev->dev,
|
||||
"SMB base address index region 0x%x already in use.\n",
|
||||
SB800_PIIX4_SMB_IDX);
|
||||
return -EBUSY;
|
||||
}
|
||||
retval = piix4_setup_sb800_smba(PIIX4_dev, smb_en, aux, &smb_en_status,
|
||||
&piix4_smba);
|
||||
|
||||
outb_p(smb_en, SB800_PIIX4_SMB_IDX);
|
||||
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
|
||||
outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
|
||||
smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
|
||||
|
||||
release_region(SB800_PIIX4_SMB_IDX, 2);
|
||||
|
||||
if (!smb_en) {
|
||||
smb_en_status = smba_en_lo & 0x10;
|
||||
piix4_smba = smba_en_hi << 8;
|
||||
if (aux)
|
||||
piix4_smba |= 0x20;
|
||||
} else {
|
||||
smb_en_status = smba_en_lo & 0x01;
|
||||
piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
|
||||
}
|
||||
|
||||
if (!smb_en_status) {
|
||||
dev_err(&PIIX4_dev->dev,
|
||||
"SMBus Host Controller not enabled!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
|
||||
return -ENODEV;
|
||||
@@ -371,10 +473,11 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
|
||||
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
|
||||
}
|
||||
} else {
|
||||
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
|
||||
"sb800_piix4_smb")) {
|
||||
mmio_cfg.use_mmio = piix4_sb800_use_mmio(PIIX4_dev);
|
||||
retval = piix4_sb800_region_request(&PIIX4_dev->dev, &mmio_cfg);
|
||||
if (retval) {
|
||||
release_region(piix4_smba, SMBIOSIZE);
|
||||
return -EBUSY;
|
||||
return retval;
|
||||
}
|
||||
|
||||
outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
|
||||
@@ -384,7 +487,7 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
|
||||
SB800_PIIX4_PORT_IDX;
|
||||
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
|
||||
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
|
||||
release_region(SB800_PIIX4_SMB_IDX, 2);
|
||||
piix4_sb800_region_release(&PIIX4_dev->dev, &mmio_cfg);
|
||||
}
|
||||
|
||||
dev_info(&PIIX4_dev->dev,
|
||||
@@ -662,6 +765,29 @@ static void piix4_imc_wakeup(void)
|
||||
release_region(KERNCZ_IMC_IDX, 2);
|
||||
}
|
||||
|
||||
static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg)
|
||||
{
|
||||
u8 smba_en_lo, val;
|
||||
|
||||
if (mmio_cfg->use_mmio) {
|
||||
smba_en_lo = ioread8(mmio_cfg->addr + piix4_port_sel_sb800);
|
||||
val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
|
||||
if (smba_en_lo != val)
|
||||
iowrite8(val, mmio_cfg->addr + piix4_port_sel_sb800);
|
||||
|
||||
return (smba_en_lo & piix4_port_mask_sb800);
|
||||
}
|
||||
|
||||
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
|
||||
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
|
||||
|
||||
val = (smba_en_lo & ~piix4_port_mask_sb800) | port;
|
||||
if (smba_en_lo != val)
|
||||
outb_p(val, SB800_PIIX4_SMB_IDX + 1);
|
||||
|
||||
return (smba_en_lo & piix4_port_mask_sb800);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles access to multiple SMBus ports on the SB800.
|
||||
* The port is selected by bits 2:1 of the smb_en register (0x2c).
|
||||
@@ -678,12 +804,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
|
||||
unsigned short piix4_smba = adapdata->smba;
|
||||
int retries = MAX_TIMEOUT;
|
||||
int smbslvcnt;
|
||||
u8 smba_en_lo;
|
||||
u8 port;
|
||||
u8 prev_port;
|
||||
int retval;
|
||||
|
||||
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, "sb800_piix4_smb"))
|
||||
return -EBUSY;
|
||||
retval = piix4_sb800_region_request(&adap->dev, &adapdata->mmio_cfg);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
/* Request the SMBUS semaphore, avoid conflicts with the IMC */
|
||||
smbslvcnt = inb_p(SMBSLVCNT);
|
||||
@@ -738,18 +864,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
|
||||
}
|
||||
}
|
||||
|
||||
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
|
||||
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
|
||||
|
||||
port = adapdata->port;
|
||||
if ((smba_en_lo & piix4_port_mask_sb800) != port)
|
||||
outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
|
||||
SB800_PIIX4_SMB_IDX + 1);
|
||||
prev_port = piix4_sb800_port_sel(adapdata->port, &adapdata->mmio_cfg);
|
||||
|
||||
retval = piix4_access(adap, addr, flags, read_write,
|
||||
command, size, data);
|
||||
|
||||
outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
|
||||
piix4_sb800_port_sel(prev_port, &adapdata->mmio_cfg);
|
||||
|
||||
/* Release the semaphore */
|
||||
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
|
||||
@@ -758,7 +878,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
|
||||
piix4_imc_wakeup();
|
||||
|
||||
release:
|
||||
release_region(SB800_PIIX4_SMB_IDX, 2);
|
||||
piix4_sb800_region_release(&adap->dev, &adapdata->mmio_cfg);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -836,6 +956,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
adapdata->mmio_cfg.use_mmio = piix4_sb800_use_mmio(dev);
|
||||
adapdata->smba = smba;
|
||||
adapdata->sb800_main = sb800_main;
|
||||
adapdata->port = port << piix4_port_shift_sb800;
|
||||
|
||||
@@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex);
|
||||
|
||||
static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
|
||||
|
||||
static const unsigned int input_max_code[EV_CNT] = {
|
||||
[EV_KEY] = KEY_MAX,
|
||||
[EV_REL] = REL_MAX,
|
||||
[EV_ABS] = ABS_MAX,
|
||||
[EV_MSC] = MSC_MAX,
|
||||
[EV_SW] = SW_MAX,
|
||||
[EV_LED] = LED_MAX,
|
||||
[EV_SND] = SND_MAX,
|
||||
[EV_FF] = FF_MAX,
|
||||
};
|
||||
|
||||
static inline int is_event_supported(unsigned int code,
|
||||
unsigned long *bm, unsigned int max)
|
||||
{
|
||||
@@ -2074,6 +2085,14 @@ EXPORT_SYMBOL(input_get_timestamp);
|
||||
*/
|
||||
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
|
||||
{
|
||||
if (type < EV_CNT && input_max_code[type] &&
|
||||
code > input_max_code[type]) {
|
||||
pr_err("%s: invalid code %u for type %u\n", __func__, code,
|
||||
type);
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case EV_KEY:
|
||||
__set_bit(code, dev->keybit);
|
||||
|
||||
@@ -420,9 +420,9 @@ static int ili210x_i2c_probe(struct i2c_client *client,
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
usleep_range(50, 100);
|
||||
usleep_range(12000, 15000);
|
||||
gpiod_set_value_cansleep(reset_gpio, 0);
|
||||
msleep(100);
|
||||
msleep(160);
|
||||
}
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
|
||||
@@ -339,11 +339,11 @@ static int stmfts_input_open(struct input_dev *dev)
|
||||
|
||||
err = pm_runtime_get_sync(&sdata->client->dev);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
mutex_lock(&sdata->mutex);
|
||||
sdata->running = true;
|
||||
@@ -366,7 +366,9 @@ static int stmfts_input_open(struct input_dev *dev)
|
||||
"failed to enable touchkey\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
pm_runtime_put_noidle(&sdata->client->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void stmfts_input_close(struct input_dev *dev)
|
||||
|
||||
@@ -18,14 +18,9 @@
|
||||
|
||||
#define M_CAN_PCI_MMIO_BAR 0
|
||||
|
||||
#define M_CAN_CLOCK_FREQ_EHL 200000000
|
||||
#define CTL_CSR_INT_CTL_OFFSET 0x508
|
||||
|
||||
struct m_can_pci_config {
|
||||
const struct can_bittiming_const *bit_timing;
|
||||
const struct can_bittiming_const *data_timing;
|
||||
unsigned int clock_freq;
|
||||
};
|
||||
|
||||
struct m_can_pci_priv {
|
||||
struct m_can_classdev cdev;
|
||||
|
||||
@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
|
||||
.read_fifo = iomap_read_fifo,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const m_can_bittiming_const_ehl = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
|
||||
.tseg1_max = 64,
|
||||
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
|
||||
.tseg2_max = 128,
|
||||
.sjw_max = 128,
|
||||
.brp_min = 1,
|
||||
.brp_max = 512,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 32,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct m_can_pci_config m_can_pci_ehl = {
|
||||
.bit_timing = &m_can_bittiming_const_ehl,
|
||||
.data_timing = &m_can_data_bittiming_const_ehl,
|
||||
.clock_freq = 200000000,
|
||||
};
|
||||
|
||||
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
{
|
||||
struct device *dev = &pci->dev;
|
||||
const struct m_can_pci_config *cfg;
|
||||
struct m_can_classdev *mcan_class;
|
||||
struct m_can_pci_priv *priv;
|
||||
void __iomem *base;
|
||||
@@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
if (!mcan_class)
|
||||
return -ENOMEM;
|
||||
|
||||
cfg = (const struct m_can_pci_config *)id->driver_data;
|
||||
|
||||
priv = cdev_to_priv(mcan_class);
|
||||
|
||||
priv->base = base;
|
||||
@@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
mcan_class->dev = &pci->dev;
|
||||
mcan_class->net->irq = pci_irq_vector(pci, 0);
|
||||
mcan_class->pm_clock_support = 1;
|
||||
mcan_class->bit_timing = cfg->bit_timing;
|
||||
mcan_class->data_timing = cfg->data_timing;
|
||||
mcan_class->can.clock.freq = cfg->clock_freq;
|
||||
mcan_class->can.clock.freq = id->driver_data;
|
||||
mcan_class->ops = &m_can_pci_ops;
|
||||
|
||||
pci_set_drvdata(pci, mcan_class);
|
||||
@@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
|
||||
m_can_pci_suspend, m_can_pci_resume);
|
||||
|
||||
static const struct pci_device_id m_can_pci_id_table[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
|
||||
{ } /* Terminating Entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
|
||||
|
||||
@@ -345,7 +345,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
int budget)
|
||||
{
|
||||
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
|
||||
bool is_rsc_completed = true;
|
||||
int err = 0;
|
||||
|
||||
for (; (self->sw_head != self->hw_head) && budget;
|
||||
@@ -363,12 +362,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
continue;
|
||||
|
||||
if (!buff->is_eop) {
|
||||
unsigned int frag_cnt = 0U;
|
||||
buff_ = buff;
|
||||
do {
|
||||
bool is_rsc_completed = true;
|
||||
|
||||
if (buff_->next >= self->size) {
|
||||
err = -EIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
frag_cnt++;
|
||||
next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_];
|
||||
is_rsc_completed =
|
||||
@@ -376,18 +380,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
next_,
|
||||
self->hw_head);
|
||||
|
||||
if (unlikely(!is_rsc_completed))
|
||||
break;
|
||||
if (unlikely(!is_rsc_completed) ||
|
||||
frag_cnt > MAX_SKB_FRAGS) {
|
||||
err = 0;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
buff->is_error |= buff_->is_error;
|
||||
buff->is_cso_err |= buff_->is_cso_err;
|
||||
|
||||
} while (!buff_->is_eop);
|
||||
|
||||
if (!is_rsc_completed) {
|
||||
err = 0;
|
||||
goto err_exit;
|
||||
}
|
||||
if (buff->is_error ||
|
||||
(buff->is_lro && buff->is_cso_err)) {
|
||||
buff_ = buff;
|
||||
@@ -445,7 +448,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
ALIGN(hdr_len, sizeof(long)));
|
||||
|
||||
if (buff->len - hdr_len > 0) {
|
||||
skb_add_rx_frag(skb, 0, buff->rxdata.page,
|
||||
skb_add_rx_frag(skb, i++, buff->rxdata.page,
|
||||
buff->rxdata.pg_off + hdr_len,
|
||||
buff->len - hdr_len,
|
||||
AQ_CFG_RX_FRAME_MAX);
|
||||
@@ -454,7 +457,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
|
||||
if (!buff->is_eop) {
|
||||
buff_ = buff;
|
||||
i = 1U;
|
||||
do {
|
||||
next_ = buff_->next;
|
||||
buff_ = &self->buff_ring[next_];
|
||||
|
||||
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
|
||||
err = -ENXIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
/* Validate that the new hw_head_ is reasonable. */
|
||||
if (hw_head_ >= ring->size) {
|
||||
err = -ENXIO;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
ring->hw_head = hw_head_;
|
||||
err = aq_hw_err_from_flags(self);
|
||||
|
||||
|
||||
@@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
||||
device_set_wakeup_capable(&pdev->dev, 1);
|
||||
|
||||
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
|
||||
if (IS_ERR(priv->wol_clk))
|
||||
return PTR_ERR(priv->wol_clk);
|
||||
if (IS_ERR(priv->wol_clk)) {
|
||||
ret = PTR_ERR(priv->wol_clk);
|
||||
goto err_deregister_fixed_link;
|
||||
}
|
||||
|
||||
/* Set the needed headroom once and for all */
|
||||
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
|
||||
|
||||
@@ -1250,7 +1250,6 @@ static void gem_rx_refill(struct macb_queue *queue)
|
||||
/* Make hw descriptor updates visible to CPU */
|
||||
rmb();
|
||||
|
||||
queue->rx_prepared_head++;
|
||||
desc = macb_rx_desc(queue, entry);
|
||||
|
||||
if (!queue->rx_skbuff[entry]) {
|
||||
@@ -1289,6 +1288,7 @@ static void gem_rx_refill(struct macb_queue *queue)
|
||||
dma_wmb();
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
}
|
||||
queue->rx_prepared_head++;
|
||||
}
|
||||
|
||||
/* Make descriptor updates visible to hardware */
|
||||
|
||||
@@ -1398,8 +1398,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
/* alloc_etherdev ensures aligned and zeroed private structures */
|
||||
dev = alloc_etherdev (sizeof (*tp));
|
||||
if (!dev)
|
||||
if (!dev) {
|
||||
pci_disable_device(pdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
|
||||
@@ -1778,6 +1780,7 @@ err_out_free_res:
|
||||
|
||||
err_out_free_netdev:
|
||||
free_netdev (dev);
|
||||
pci_disable_device(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
||||
@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
|
||||
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
|
||||
q_vector->tx.itr_mode = ITR_DYNAMIC;
|
||||
q_vector->rx.itr_mode = ITR_DYNAMIC;
|
||||
q_vector->tx.type = ICE_TX_CONTAINER;
|
||||
q_vector->rx.type = ICE_RX_CONTAINER;
|
||||
|
||||
if (vsi->type == ICE_VSI_VF)
|
||||
goto out;
|
||||
|
||||
@@ -3466,15 +3466,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum ice_container_type {
|
||||
ICE_RX_CONTAINER,
|
||||
ICE_TX_CONTAINER,
|
||||
};
|
||||
|
||||
/**
|
||||
* ice_get_rc_coalesce - get ITR values for specific ring container
|
||||
* @ec: ethtool structure to fill with driver's coalesce settings
|
||||
* @c_type: container type, Rx or Tx
|
||||
* @rc: ring container that the ITR values will come from
|
||||
*
|
||||
* Query the device for ice_ring_container specific ITR values. This is
|
||||
@@ -3484,13 +3478,12 @@ enum ice_container_type {
|
||||
* Returns 0 on success, negative otherwise.
|
||||
*/
|
||||
static int
|
||||
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
|
||||
struct ice_ring_container *rc)
|
||||
ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
|
||||
{
|
||||
if (!rc->ring)
|
||||
return -EINVAL;
|
||||
|
||||
switch (c_type) {
|
||||
switch (rc->type) {
|
||||
case ICE_RX_CONTAINER:
|
||||
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
|
||||
ec->rx_coalesce_usecs = rc->itr_setting;
|
||||
@@ -3501,7 +3494,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
|
||||
ec->tx_coalesce_usecs = rc->itr_setting;
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
|
||||
dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", rc->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -3522,18 +3515,18 @@ static int
|
||||
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
|
||||
{
|
||||
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
|
||||
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
|
||||
if (ice_get_rc_coalesce(ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx))
|
||||
return -EINVAL;
|
||||
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
|
||||
if (ice_get_rc_coalesce(ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_rxq) {
|
||||
if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
|
||||
if (ice_get_rc_coalesce(ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_txq) {
|
||||
if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
|
||||
if (ice_get_rc_coalesce(ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
@@ -3585,7 +3578,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
|
||||
|
||||
/**
|
||||
* ice_set_rc_coalesce - set ITR values for specific ring container
|
||||
* @c_type: container type, Rx or Tx
|
||||
* @ec: ethtool structure from user to update ITR settings
|
||||
* @rc: ring container that the ITR values will come from
|
||||
* @vsi: VSI associated to the ring container
|
||||
@@ -3597,10 +3589,10 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
|
||||
* Returns 0 on success, negative otherwise.
|
||||
*/
|
||||
static int
|
||||
ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
ice_set_rc_coalesce(struct ethtool_coalesce *ec,
|
||||
struct ice_ring_container *rc, struct ice_vsi *vsi)
|
||||
{
|
||||
const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx";
|
||||
const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
|
||||
u32 use_adaptive_coalesce, coalesce_usecs;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
u16 itr_setting;
|
||||
@@ -3608,7 +3600,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
if (!rc->ring)
|
||||
return -EINVAL;
|
||||
|
||||
switch (c_type) {
|
||||
switch (rc->type) {
|
||||
case ICE_RX_CONTAINER:
|
||||
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
|
||||
(ec->rx_coalesce_usecs_high &&
|
||||
@@ -3641,7 +3633,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
|
||||
c_type);
|
||||
rc->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -3690,22 +3682,22 @@ static int
|
||||
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
|
||||
{
|
||||
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
|
||||
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
|
||||
if (ice_set_rc_coalesce(ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
|
||||
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
|
||||
if (ice_set_rc_coalesce(ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_rxq) {
|
||||
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
|
||||
if (ice_set_rc_coalesce(ec,
|
||||
&vsi->rx_rings[q_num]->q_vector->rx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
} else if (q_num < vsi->num_txq) {
|
||||
if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
|
||||
if (ice_set_rc_coalesce(ec,
|
||||
&vsi->tx_rings[q_num]->q_vector->tx,
|
||||
vsi))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -2980,8 +2980,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
|
||||
ice_for_each_q_vector(vsi, i) {
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[i];
|
||||
|
||||
coalesce[i].itr_tx = q_vector->tx.itr_setting;
|
||||
coalesce[i].itr_rx = q_vector->rx.itr_setting;
|
||||
coalesce[i].itr_tx = q_vector->tx.itr_settings;
|
||||
coalesce[i].itr_rx = q_vector->rx.itr_settings;
|
||||
coalesce[i].intrl = q_vector->intrl;
|
||||
|
||||
if (i < vsi->num_txq)
|
||||
@@ -3037,21 +3037,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
|
||||
*/
|
||||
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[i].itr_rx;
|
||||
rc->itr_settings = coalesce[i].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
} else if (i < vsi->alloc_rxq) {
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[0].itr_rx;
|
||||
rc->itr_settings = coalesce[0].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
}
|
||||
|
||||
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[i].itr_tx;
|
||||
rc->itr_settings = coalesce[i].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
} else if (i < vsi->alloc_txq) {
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[0].itr_tx;
|
||||
rc->itr_settings = coalesce[0].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
}
|
||||
|
||||
@@ -3065,12 +3065,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
|
||||
for (; i < vsi->num_q_vectors; i++) {
|
||||
/* transmit */
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[0].itr_tx;
|
||||
rc->itr_settings = coalesce[0].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
|
||||
/* receive */
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[0].itr_rx;
|
||||
rc->itr_settings = coalesce[0].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
|
||||
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
|
||||
|
||||
@@ -5656,9 +5656,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
|
||||
netif_carrier_on(vsi->netdev);
|
||||
}
|
||||
|
||||
/* clear this now, and the first stats read will be used as baseline */
|
||||
vsi->stat_offsets_loaded = false;
|
||||
|
||||
/* Perform an initial read of the statistics registers now to
|
||||
* set the baseline so counters are ready when interface is up
|
||||
*/
|
||||
ice_update_eth_stats(vsi);
|
||||
ice_service_task_schedule(pf);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -254,12 +254,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
|
||||
* This function must be called periodically to ensure that the cached value
|
||||
* is never more than 2 seconds old. It must also be called whenever the PHC
|
||||
* time has been changed.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - OK, successfully updated
|
||||
* * -EAGAIN - PF was busy, need to reschedule the update
|
||||
*/
|
||||
static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
{
|
||||
u64 systime;
|
||||
int i;
|
||||
|
||||
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
|
||||
return -EAGAIN;
|
||||
|
||||
/* Read the current PHC time */
|
||||
systime = ice_ptp_read_src_clk_reg(pf, NULL);
|
||||
|
||||
@@ -282,6 +289,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
|
||||
}
|
||||
}
|
||||
clear_bit(ICE_CFG_BUSY, pf->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1418,17 +1428,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
|
||||
{
|
||||
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
|
||||
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
|
||||
int err;
|
||||
|
||||
if (!test_bit(ICE_FLAG_PTP, pf->flags))
|
||||
return;
|
||||
|
||||
ice_ptp_update_cached_phctime(pf);
|
||||
err = ice_ptp_update_cached_phctime(pf);
|
||||
|
||||
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
|
||||
|
||||
/* Run twice a second */
|
||||
/* Run twice a second or reschedule if phc update failed */
|
||||
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
|
||||
msecs_to_jiffies(500));
|
||||
msecs_to_jiffies(err ? 10 : 500));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -332,6 +332,11 @@ static inline bool ice_ring_is_xdp(struct ice_ring *ring)
|
||||
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
|
||||
}
|
||||
|
||||
enum ice_container_type {
|
||||
ICE_RX_CONTAINER,
|
||||
ICE_TX_CONTAINER,
|
||||
};
|
||||
|
||||
struct ice_ring_container {
|
||||
/* head of linked-list of rings */
|
||||
struct ice_ring *ring;
|
||||
@@ -340,10 +345,16 @@ struct ice_ring_container {
|
||||
/* this matches the maximum number of ITR bits, but in usec
|
||||
* values, so it is shifted left one bit (bit zero is ignored)
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
u16 itr_setting:13;
|
||||
u16 itr_reserved:2;
|
||||
u16 itr_mode:1;
|
||||
};
|
||||
u16 itr_settings;
|
||||
};
|
||||
enum ice_container_type type;
|
||||
};
|
||||
|
||||
struct ice_coalesce_stored {
|
||||
u16 itr_tx;
|
||||
|
||||
@@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
if (adapter->link_speed != SPEED_1000)
|
||||
if (adapter->link_speed != SPEED_1000 ||
|
||||
!hw->phy.ops.read_reg)
|
||||
goto no_wait;
|
||||
|
||||
/* wait for Remote receiver status OK */
|
||||
|
||||
@@ -3542,6 +3542,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
|
||||
if (params->xdp_prog) {
|
||||
if (features & NETIF_F_LRO) {
|
||||
netdev_warn(netdev, "LRO is incompatible with XDP\n");
|
||||
features &= ~NETIF_F_LRO;
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
|
||||
features &= ~NETIF_F_RXHASH;
|
||||
if (netdev->features & NETIF_F_RXHASH)
|
||||
|
||||
@@ -846,7 +846,8 @@ struct mlx5dr_action *
|
||||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
|
||||
struct mlx5dr_action **ref_actions;
|
||||
@@ -914,7 +915,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
reformat_req,
|
||||
&action->dest_tbl->fw_tbl.id,
|
||||
&action->dest_tbl->fw_tbl.group_id,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (ret)
|
||||
goto free_action;
|
||||
|
||||
|
||||
@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
|
||||
struct mlx5dr_cmd_fte_info fte_info = {};
|
||||
@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
fte_info.val = val;
|
||||
fte_info.dest_arr = dest;
|
||||
fte_info.ignore_flow_level = ignore_flow_level;
|
||||
fte_info.flow_context.flow_source = flow_source;
|
||||
|
||||
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
|
||||
if (ret) {
|
||||
|
||||
@@ -1394,7 +1394,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
|
||||
u32 group_id);
|
||||
#endif /* _DR_TYPES_H_ */
|
||||
|
||||
@@ -492,11 +492,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
} else if (num_term_actions > 1) {
|
||||
bool ignore_flow_level =
|
||||
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
|
||||
u32 flow_source = fte->flow_context.flow_source;
|
||||
|
||||
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
|
||||
term_actions,
|
||||
num_term_actions,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (!tmp_action) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto free_actions;
|
||||
|
||||
@@ -96,7 +96,8 @@ struct mlx5dr_action *
|
||||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
|
||||
struct mlx5dr_action *mlx5dr_action_create_drop(void);
|
||||
|
||||
|
||||
@@ -3612,7 +3612,8 @@ static void ql_reset_work(struct work_struct *work)
|
||||
qdev->mem_map_registers;
|
||||
unsigned long hw_flags;
|
||||
|
||||
if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
|
||||
if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
|
||||
test_bit(QL_RESET_START, &qdev->flags)) {
|
||||
clear_bit(QL_LINK_MASTER, &qdev->flags);
|
||||
|
||||
/*
|
||||
|
||||
@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
|
||||
return -ENOMEM;
|
||||
|
||||
/* Enable pci device */
|
||||
ret = pci_enable_device(pdev);
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
|
||||
__func__);
|
||||
@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
|
||||
pcim_iounmap_regions(pdev, BIT(i));
|
||||
break;
|
||||
}
|
||||
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
|
||||
|
||||
@@ -1370,9 +1370,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
struct gsi_event *event_done;
|
||||
struct gsi_event *event;
|
||||
struct gsi_trans *trans;
|
||||
u32 trans_count = 0;
|
||||
u32 byte_count = 0;
|
||||
u32 old_index;
|
||||
u32 event_avail;
|
||||
u32 old_index;
|
||||
|
||||
trans_info = &channel->trans_info;
|
||||
|
||||
@@ -1393,6 +1394,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
do {
|
||||
trans->len = __le16_to_cpu(event->len);
|
||||
byte_count += trans->len;
|
||||
trans_count++;
|
||||
|
||||
/* Move on to the next event and transaction */
|
||||
if (--event_avail)
|
||||
@@ -1404,7 +1406,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
|
||||
/* We record RX bytes when they are received */
|
||||
channel->byte_count += byte_count;
|
||||
channel->trans_count++;
|
||||
channel->trans_count += trans_count;
|
||||
}
|
||||
|
||||
/* Initialize a ring, including allocating DMA memory for its entries */
|
||||
|
||||
@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
|
||||
path->encap.proto = htons(ETH_P_PPP_SES);
|
||||
path->encap.id = be16_to_cpu(po->num);
|
||||
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
|
||||
memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
|
||||
path->dev = ctx->dev;
|
||||
ctx->dev = dev;
|
||||
|
||||
|
||||
@@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
||||
if (dma_mapping_error(&adapter->pdev->dev,
|
||||
rbi->dma_addr)) {
|
||||
dev_kfree_skb_any(rbi->skb);
|
||||
rbi->skb = NULL;
|
||||
rq->stats.rx_buf_alloc_failure++;
|
||||
break;
|
||||
}
|
||||
@@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
||||
if (dma_mapping_error(&adapter->pdev->dev,
|
||||
rbi->dma_addr)) {
|
||||
put_page(rbi->page);
|
||||
rbi->page = NULL;
|
||||
rq->stats.rx_buf_alloc_failure++;
|
||||
break;
|
||||
}
|
||||
@@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
|
||||
u32 i, ring_idx;
|
||||
struct Vmxnet3_RxDesc *rxd;
|
||||
|
||||
/* ring has already been cleaned up */
|
||||
if (!rq->rx_ring[0].base)
|
||||
return;
|
||||
|
||||
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
|
||||
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
|
||||
@@ -118,109 +118,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
|
||||
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
|
||||
}
|
||||
|
||||
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
|
||||
{
|
||||
static const struct {
|
||||
u32 phys;
|
||||
u32 mapped;
|
||||
u32 size;
|
||||
} fixed_map[] = {
|
||||
{ 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
|
||||
{ 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
|
||||
{ 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
|
||||
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
|
||||
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
|
||||
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
|
||||
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
|
||||
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
|
||||
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
|
||||
{ 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
|
||||
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
|
||||
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
|
||||
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
|
||||
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
|
||||
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
|
||||
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
|
||||
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
|
||||
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
|
||||
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
|
||||
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
|
||||
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
|
||||
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
|
||||
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
|
||||
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
|
||||
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
|
||||
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
|
||||
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
|
||||
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
|
||||
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
|
||||
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
|
||||
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
|
||||
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
|
||||
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
|
||||
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
|
||||
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
|
||||
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
|
||||
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
|
||||
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
|
||||
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
|
||||
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
|
||||
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
|
||||
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
|
||||
};
|
||||
int i;
|
||||
|
||||
if (addr < 0x100000)
|
||||
return addr;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
|
||||
u32 ofs;
|
||||
|
||||
if (addr < fixed_map[i].phys)
|
||||
continue;
|
||||
|
||||
ofs = addr - fixed_map[i].phys;
|
||||
if (ofs > fixed_map[i].size)
|
||||
continue;
|
||||
|
||||
return fixed_map[i].mapped + ofs;
|
||||
}
|
||||
|
||||
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
|
||||
(addr >= 0x70000000 && addr < 0x78000000) ||
|
||||
(addr >= 0x7c000000 && addr < 0x7c400000))
|
||||
return mt7921_reg_map_l1(dev, addr);
|
||||
|
||||
dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
|
||||
addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
|
||||
{
|
||||
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
|
||||
u32 addr = __mt7921_reg_addr(dev, offset);
|
||||
|
||||
return dev->bus_ops->rr(mdev, addr);
|
||||
}
|
||||
|
||||
static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
|
||||
{
|
||||
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
|
||||
u32 addr = __mt7921_reg_addr(dev, offset);
|
||||
|
||||
dev->bus_ops->wr(mdev, addr, val);
|
||||
}
|
||||
|
||||
static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
|
||||
{
|
||||
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
|
||||
u32 addr = __mt7921_reg_addr(dev, offset);
|
||||
|
||||
return dev->bus_ops->rmw(mdev, addr, mask, val);
|
||||
}
|
||||
|
||||
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
|
||||
{
|
||||
if (force) {
|
||||
@@ -380,20 +277,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
|
||||
|
||||
int mt7921_dma_init(struct mt7921_dev *dev)
|
||||
{
|
||||
struct mt76_bus_ops *bus_ops;
|
||||
int ret;
|
||||
|
||||
dev->bus_ops = dev->mt76.bus;
|
||||
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
|
||||
GFP_KERNEL);
|
||||
if (!bus_ops)
|
||||
return -ENOMEM;
|
||||
|
||||
bus_ops->rr = mt7921_rr;
|
||||
bus_ops->wr = mt7921_wr;
|
||||
bus_ops->rmw = mt7921_rmw;
|
||||
dev->mt76.bus = bus_ops;
|
||||
|
||||
mt76_dma_attach(&dev->mt76);
|
||||
|
||||
ret = mt7921_dma_disable(dev, true);
|
||||
|
||||
@@ -1306,8 +1306,6 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
|
||||
|
||||
int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
|
||||
{
|
||||
struct mt76_phy *mphy = &dev->mt76.phy;
|
||||
struct mt76_connac_pm *pm = &dev->pm;
|
||||
int i, err = 0;
|
||||
|
||||
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
|
||||
@@ -1320,16 +1318,8 @@ int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
|
||||
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
|
||||
dev_err(dev->mt76.dev, "driver own failed\n");
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mt7921_wpdma_reinit_cond(dev);
|
||||
clear_bit(MT76_STATE_PM, &mphy->state);
|
||||
|
||||
pm->stats.last_wake_event = jiffies;
|
||||
pm->stats.doze_time += pm->stats.last_wake_event -
|
||||
pm->stats.last_doze_event;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1345,6 +1335,16 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
|
||||
goto out;
|
||||
|
||||
err = __mt7921_mcu_drv_pmctrl(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
mt7921_wpdma_reinit_cond(dev);
|
||||
clear_bit(MT76_STATE_PM, &mphy->state);
|
||||
|
||||
pm->stats.last_wake_event = jiffies;
|
||||
pm->stats.doze_time += pm->stats.last_wake_event -
|
||||
pm->stats.last_doze_event;
|
||||
|
||||
out:
|
||||
mutex_unlock(&pm->mutex);
|
||||
|
||||
|
||||
@@ -88,6 +88,110 @@ static void mt7921_irq_tasklet(unsigned long data)
|
||||
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
|
||||
}
|
||||
|
||||
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
|
||||
{
|
||||
static const struct {
|
||||
u32 phys;
|
||||
u32 mapped;
|
||||
u32 size;
|
||||
} fixed_map[] = {
|
||||
{ 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
|
||||
{ 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
|
||||
{ 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
|
||||
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
|
||||
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
|
||||
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
|
||||
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
|
||||
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
|
||||
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
|
||||
{ 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
|
||||
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
|
||||
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
|
||||
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
|
||||
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
|
||||
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
|
||||
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
|
||||
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
|
||||
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
|
||||
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
|
||||
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
|
||||
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
|
||||
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
|
||||
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
|
||||
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
|
||||
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
|
||||
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
|
||||
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
|
||||
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
|
||||
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
|
||||
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
|
||||
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
|
||||
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
|
||||
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
|
||||
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
|
||||
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
|
||||
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
|
||||
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
|
||||
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
|
||||
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
|
||||
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
|
||||
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
|
||||
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
|
||||
};
|
||||
int i;
|
||||
|
||||
if (addr < 0x100000)
|
||||
return addr;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
|
||||
u32 ofs;
|
||||
|
||||
if (addr < fixed_map[i].phys)
|
||||
continue;
|
||||
|
||||
ofs = addr - fixed_map[i].phys;
|
||||
if (ofs > fixed_map[i].size)
|
||||
continue;
|
||||
|
||||
return fixed_map[i].mapped + ofs;
|
||||
}
|
||||
|
||||
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
|
||||
(addr >= 0x70000000 && addr < 0x78000000) ||
|
||||
(addr >= 0x7c000000 && addr < 0x7c400000))
|
||||
return mt7921_reg_map_l1(dev, addr);
|
||||
|
||||
dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
|
||||
addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
|
||||
{
|
||||
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
|
||||
u32 addr = __mt7921_reg_addr(dev, offset);
|
||||
|
||||
return dev->bus_ops->rr(mdev, addr);
|
||||
}
|
||||
|
||||
static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
|
||||
{
|
||||
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
|
||||
u32 addr = __mt7921_reg_addr(dev, offset);
|
||||
|
||||
dev->bus_ops->wr(mdev, addr, val);
|
||||
}
|
||||
|
||||
static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
|
||||
{
|
||||
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
|
||||
u32 addr = __mt7921_reg_addr(dev, offset);
|
||||
|
||||
return dev->bus_ops->rmw(mdev, addr, mask, val);
|
||||
}
|
||||
|
||||
|
||||
static int mt7921_pci_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
@@ -110,6 +214,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
|
||||
.sta_remove = mt7921_mac_sta_remove,
|
||||
.update_survey = mt7921_update_channel,
|
||||
};
|
||||
struct mt76_bus_ops *bus_ops;
|
||||
struct mt7921_dev *dev;
|
||||
struct mt76_dev *mdev;
|
||||
int ret;
|
||||
@@ -145,6 +250,22 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
|
||||
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
|
||||
|
||||
dev->bus_ops = dev->mt76.bus;
|
||||
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
|
||||
GFP_KERNEL);
|
||||
if (!bus_ops)
|
||||
return -ENOMEM;
|
||||
|
||||
bus_ops->rr = mt7921_rr;
|
||||
bus_ops->wr = mt7921_wr;
|
||||
bus_ops->rmw = mt7921_rmw;
|
||||
dev->mt76.bus = bus_ops;
|
||||
|
||||
ret = __mt7921_mcu_drv_pmctrl(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
|
||||
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
|
||||
dev_err(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
|
||||
|
||||
@@ -4358,6 +4358,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||
if (ctrl->queue_count > 1) {
|
||||
nvme_queue_scan(ctrl);
|
||||
nvme_start_queues(ctrl);
|
||||
nvme_mpath_update(ctrl);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
@@ -574,8 +574,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
|
||||
ns->ana_grpid = le32_to_cpu(desc->grpid);
|
||||
ns->ana_state = desc->state;
|
||||
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
|
||||
|
||||
if (nvme_state_is_live(ns->ana_state))
|
||||
/*
|
||||
* nvme_mpath_set_live() will trigger I/O to the multipath path device
|
||||
* and in turn to this path device. However we cannot accept this I/O
|
||||
* if the controller is not live. This may deadlock if called from
|
||||
* nvme_mpath_init_identify() and the ctrl will never complete
|
||||
* initialization, preventing I/O from completing. For this case we
|
||||
* will reprocess the ANA log page in nvme_mpath_update() once the
|
||||
* controller is ready.
|
||||
*/
|
||||
if (nvme_state_is_live(ns->ana_state) &&
|
||||
ns->ctrl->state == NVME_CTRL_LIVE)
|
||||
nvme_mpath_set_live(ns);
|
||||
}
|
||||
|
||||
@@ -662,6 +671,18 @@ static void nvme_ana_work(struct work_struct *work)
|
||||
nvme_read_ana_log(ctrl);
|
||||
}
|
||||
|
||||
void nvme_mpath_update(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
u32 nr_change_groups = 0;
|
||||
|
||||
if (!ctrl->ana_log_buf)
|
||||
return;
|
||||
|
||||
mutex_lock(&ctrl->ana_lock);
|
||||
nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
|
||||
mutex_unlock(&ctrl->ana_lock);
|
||||
}
|
||||
|
||||
static void nvme_anatt_timeout(struct timer_list *t)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
|
||||
|
||||
@@ -776,6 +776,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
|
||||
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
|
||||
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
|
||||
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
|
||||
void nvme_mpath_update(struct nvme_ctrl *ctrl);
|
||||
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
|
||||
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
|
||||
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
|
||||
@@ -850,6 +851,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
|
||||
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
|
||||
return 0;
|
||||
}
|
||||
static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
}
|
||||
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -3379,7 +3379,10 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
NVME_QUIRK_128_BYTES_SQES |
|
||||
NVME_QUIRK_SHARED_TAGS |
|
||||
NVME_QUIRK_SKIP_CID_GEN },
|
||||
|
||||
{ PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
|
||||
NVME_QUIRK_NO_DEEPEST_PS |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
@@ -978,7 +978,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
|
||||
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
|
||||
mutex_unlock(&ctrl->lock);
|
||||
|
||||
schedule_work(&ctrl->async_event_work);
|
||||
queue_work(nvmet_wq, &ctrl->async_event_work);
|
||||
}
|
||||
|
||||
void nvmet_execute_keep_alive(struct nvmet_req *req)
|
||||
|
||||
@@ -1554,7 +1554,7 @@ static void nvmet_port_release(struct config_item *item)
|
||||
struct nvmet_port *port = to_nvmet_port(item);
|
||||
|
||||
/* Let inflight controllers teardown complete */
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(nvmet_wq);
|
||||
list_del(&port->global_entry);
|
||||
|
||||
kfree(port->ana_state);
|
||||
|
||||
@@ -20,6 +20,9 @@ struct workqueue_struct *zbd_wq;
|
||||
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
|
||||
static DEFINE_IDA(cntlid_ida);
|
||||
|
||||
struct workqueue_struct *nvmet_wq;
|
||||
EXPORT_SYMBOL_GPL(nvmet_wq);
|
||||
|
||||
/*
|
||||
* This read/write semaphore is used to synchronize access to configuration
|
||||
* information on a target system that will result in discovery log page
|
||||
@@ -205,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
|
||||
list_add_tail(&aen->entry, &ctrl->async_events);
|
||||
mutex_unlock(&ctrl->lock);
|
||||
|
||||
schedule_work(&ctrl->async_event_work);
|
||||
queue_work(nvmet_wq, &ctrl->async_event_work);
|
||||
}
|
||||
|
||||
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
|
||||
@@ -385,7 +388,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
|
||||
if (reset_tbkas) {
|
||||
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
|
||||
ctrl->cntlid);
|
||||
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
||||
queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -403,7 +406,7 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
||||
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
|
||||
ctrl->cntlid, ctrl->kato);
|
||||
|
||||
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
||||
queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
|
||||
}
|
||||
|
||||
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
||||
@@ -1477,7 +1480,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
|
||||
mutex_lock(&ctrl->lock);
|
||||
if (!(ctrl->csts & NVME_CSTS_CFS)) {
|
||||
ctrl->csts |= NVME_CSTS_CFS;
|
||||
schedule_work(&ctrl->fatal_err_work);
|
||||
queue_work(nvmet_wq, &ctrl->fatal_err_work);
|
||||
}
|
||||
mutex_unlock(&ctrl->lock);
|
||||
}
|
||||
@@ -1617,9 +1620,15 @@ static int __init nvmet_init(void)
|
||||
goto out_free_zbd_work_queue;
|
||||
}
|
||||
|
||||
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
|
||||
if (!nvmet_wq) {
|
||||
error = -ENOMEM;
|
||||
goto out_free_buffered_work_queue;
|
||||
}
|
||||
|
||||
error = nvmet_init_discovery();
|
||||
if (error)
|
||||
goto out_free_work_queue;
|
||||
goto out_free_nvmet_work_queue;
|
||||
|
||||
error = nvmet_init_configfs();
|
||||
if (error)
|
||||
@@ -1628,7 +1637,9 @@ static int __init nvmet_init(void)
|
||||
|
||||
out_exit_discovery:
|
||||
nvmet_exit_discovery();
|
||||
out_free_work_queue:
|
||||
out_free_nvmet_work_queue:
|
||||
destroy_workqueue(nvmet_wq);
|
||||
out_free_buffered_work_queue:
|
||||
destroy_workqueue(buffered_io_wq);
|
||||
out_free_zbd_work_queue:
|
||||
destroy_workqueue(zbd_wq);
|
||||
@@ -1640,6 +1651,7 @@ static void __exit nvmet_exit(void)
|
||||
nvmet_exit_configfs();
|
||||
nvmet_exit_discovery();
|
||||
ida_destroy(&cntlid_ida);
|
||||
destroy_workqueue(nvmet_wq);
|
||||
destroy_workqueue(buffered_io_wq);
|
||||
destroy_workqueue(zbd_wq);
|
||||
|
||||
|
||||
@@ -1491,7 +1491,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
|
||||
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
|
||||
if (!nvmet_fc_tgt_a_get(assoc))
|
||||
continue;
|
||||
if (!schedule_work(&assoc->del_work))
|
||||
if (!queue_work(nvmet_wq, &assoc->del_work))
|
||||
/* already deleting - release local reference */
|
||||
nvmet_fc_tgt_a_put(assoc);
|
||||
}
|
||||
@@ -1546,7 +1546,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
|
||||
continue;
|
||||
assoc->hostport->invalid = 1;
|
||||
noassoc = false;
|
||||
if (!schedule_work(&assoc->del_work))
|
||||
if (!queue_work(nvmet_wq, &assoc->del_work))
|
||||
/* already deleting - release local reference */
|
||||
nvmet_fc_tgt_a_put(assoc);
|
||||
}
|
||||
@@ -1592,7 +1592,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
|
||||
if (found_ctrl) {
|
||||
if (!schedule_work(&assoc->del_work))
|
||||
if (!queue_work(nvmet_wq, &assoc->del_work))
|
||||
/* already deleting - release local reference */
|
||||
nvmet_fc_tgt_a_put(assoc);
|
||||
return;
|
||||
@@ -2060,7 +2060,7 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
|
||||
iod->rqstdatalen = lsreqbuf_len;
|
||||
iod->hosthandle = hosthandle;
|
||||
|
||||
schedule_work(&iod->work);
|
||||
queue_work(nvmet_wq, &iod->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
|
||||
spin_lock(&rport->lock);
|
||||
list_add_tail(&rport->ls_list, &tls_req->ls_list);
|
||||
spin_unlock(&rport->lock);
|
||||
schedule_work(&rport->ls_work);
|
||||
queue_work(nvmet_wq, &rport->ls_work);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -393,7 +393,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
|
||||
spin_lock(&rport->lock);
|
||||
list_add_tail(&rport->ls_list, &tls_req->ls_list);
|
||||
spin_unlock(&rport->lock);
|
||||
schedule_work(&rport->ls_work);
|
||||
queue_work(nvmet_wq, &rport->ls_work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -448,7 +448,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
|
||||
spin_lock(&tport->lock);
|
||||
list_add_tail(&tport->ls_list, &tls_req->ls_list);
|
||||
spin_unlock(&tport->lock);
|
||||
schedule_work(&tport->ls_work);
|
||||
queue_work(nvmet_wq, &tport->ls_work);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -480,7 +480,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
|
||||
spin_lock(&tport->lock);
|
||||
list_add_tail(&tport->ls_list, &tls_req->ls_list);
|
||||
spin_unlock(&tport->lock);
|
||||
schedule_work(&tport->ls_work);
|
||||
queue_work(nvmet_wq, &tport->ls_work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -520,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
|
||||
tgt_rscn->tport = tgtport->private;
|
||||
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
|
||||
|
||||
schedule_work(&tgt_rscn->work);
|
||||
queue_work(nvmet_wq, &tgt_rscn->work);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -739,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
|
||||
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
|
||||
kref_init(&tfcp_req->ref);
|
||||
|
||||
schedule_work(&tfcp_req->fcp_rcv_work);
|
||||
queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -921,7 +921,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
|
||||
{
|
||||
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
||||
|
||||
schedule_work(&tfcp_req->tio_done_work);
|
||||
queue_work(nvmet_wq, &tfcp_req->tio_done_work);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -976,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
||||
|
||||
if (abortio)
|
||||
/* leave the reference while the work item is scheduled */
|
||||
WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
|
||||
WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
|
||||
else {
|
||||
/*
|
||||
* as the io has already had the done callback made,
|
||||
|
||||
@@ -292,7 +292,7 @@ static void nvmet_file_execute_flush(struct nvmet_req *req)
|
||||
if (!nvmet_check_transfer_len(req, 0))
|
||||
return;
|
||||
INIT_WORK(&req->f.work, nvmet_file_flush_work);
|
||||
schedule_work(&req->f.work);
|
||||
queue_work(nvmet_wq, &req->f.work);
|
||||
}
|
||||
|
||||
static void nvmet_file_execute_discard(struct nvmet_req *req)
|
||||
@@ -352,7 +352,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
|
||||
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
|
||||
return;
|
||||
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
|
||||
schedule_work(&req->f.work);
|
||||
queue_work(nvmet_wq, &req->f.work);
|
||||
}
|
||||
|
||||
static void nvmet_file_write_zeroes_work(struct work_struct *w)
|
||||
@@ -382,7 +382,7 @@ static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
|
||||
if (!nvmet_check_transfer_len(req, 0))
|
||||
return;
|
||||
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
|
||||
schedule_work(&req->f.work);
|
||||
queue_work(nvmet_wq, &req->f.work);
|
||||
}
|
||||
|
||||
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
|
||||
|
||||
@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
iod->req.transfer_len = blk_rq_payload_bytes(req);
|
||||
}
|
||||
|
||||
schedule_work(&iod->work);
|
||||
queue_work(nvmet_wq, &iod->work);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
|
||||
return;
|
||||
}
|
||||
|
||||
schedule_work(&iod->work);
|
||||
queue_work(nvmet_wq, &iod->work);
|
||||
}
|
||||
|
||||
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
|
||||
|
||||
@@ -365,6 +365,7 @@ struct nvmet_req {
|
||||
|
||||
extern struct workqueue_struct *buffered_io_wq;
|
||||
extern struct workqueue_struct *zbd_wq;
|
||||
extern struct workqueue_struct *nvmet_wq;
|
||||
|
||||
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
|
||||
{
|
||||
|
||||
@@ -281,7 +281,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
|
||||
if (req->p.use_workqueue || effects) {
|
||||
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
|
||||
req->p.rq = rq;
|
||||
schedule_work(&req->p.work);
|
||||
queue_work(nvmet_wq, &req->p.work);
|
||||
} else {
|
||||
rq->end_io_data = req;
|
||||
blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
|
||||
|
||||
@@ -1583,7 +1583,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
|
||||
|
||||
if (queue->host_qid == 0) {
|
||||
/* Let inflight controller teardown complete */
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(nvmet_wq);
|
||||
}
|
||||
|
||||
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
|
||||
@@ -1668,7 +1668,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
|
||||
|
||||
if (disconnect) {
|
||||
rdma_disconnect(queue->cm_id);
|
||||
schedule_work(&queue->release_work);
|
||||
queue_work(nvmet_wq, &queue->release_work);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1698,7 +1698,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
|
||||
pr_err("failed to connect queue %d\n", queue->idx);
|
||||
schedule_work(&queue->release_work);
|
||||
queue_work(nvmet_wq, &queue->release_work);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1772,7 +1772,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||
if (!queue) {
|
||||
struct nvmet_rdma_port *port = cm_id->context;
|
||||
|
||||
schedule_delayed_work(&port->repair_work, 0);
|
||||
queue_delayed_work(nvmet_wq, &port->repair_work, 0);
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
@@ -1902,7 +1902,7 @@ static void nvmet_rdma_repair_port_work(struct work_struct *w)
|
||||
nvmet_rdma_disable_port(port);
|
||||
ret = nvmet_rdma_enable_port(port);
|
||||
if (ret)
|
||||
schedule_delayed_work(&port->repair_work, 5 * HZ);
|
||||
queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
|
||||
}
|
||||
|
||||
static int nvmet_rdma_add_port(struct nvmet_port *nport)
|
||||
@@ -2046,7 +2046,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
|
||||
}
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(nvmet_wq);
|
||||
}
|
||||
|
||||
static struct ib_client nvmet_rdma_ib_client = {
|
||||
|
||||
@@ -1251,7 +1251,7 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
|
||||
spin_lock(&queue->state_lock);
|
||||
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
|
||||
queue->state = NVMET_TCP_Q_DISCONNECTING;
|
||||
schedule_work(&queue->release_work);
|
||||
queue_work(nvmet_wq, &queue->release_work);
|
||||
}
|
||||
spin_unlock(&queue->state_lock);
|
||||
}
|
||||
@@ -1662,7 +1662,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
|
||||
goto out;
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
schedule_work(&port->accept_work);
|
||||
queue_work(nvmet_wq, &port->accept_work);
|
||||
out:
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
@@ -1793,7 +1793,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
|
||||
|
||||
if (sq->qid == 0) {
|
||||
/* Let inflight controller teardown complete */
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(nvmet_wq);
|
||||
}
|
||||
|
||||
queue->nr_cmds = sq->size * 2;
|
||||
@@ -1854,12 +1854,12 @@ static void __exit nvmet_tcp_exit(void)
|
||||
|
||||
nvmet_unregister_transport(&nvmet_tcp_ops);
|
||||
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(nvmet_wq);
|
||||
mutex_lock(&nvmet_tcp_queue_mutex);
|
||||
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
|
||||
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
|
||||
mutex_unlock(&nvmet_tcp_queue_mutex);
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(nvmet_wq);
|
||||
|
||||
destroy_workqueue(nvmet_tcp_wq);
|
||||
}
|
||||
|
||||
@@ -272,7 +272,6 @@ struct advk_pcie {
|
||||
u32 actions;
|
||||
} wins[OB_WIN_COUNT];
|
||||
u8 wins_count;
|
||||
int irq;
|
||||
struct irq_domain *rp_irq_domain;
|
||||
struct irq_domain *irq_domain;
|
||||
struct irq_chip irq_chip;
|
||||
@@ -1572,26 +1571,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
|
||||
}
|
||||
}
|
||||
|
||||
static void advk_pcie_irq_handler(struct irq_desc *desc)
|
||||
static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
u32 val, mask, status;
|
||||
struct advk_pcie *pcie = arg;
|
||||
u32 status;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
|
||||
if (!(status & PCIE_IRQ_CORE_INT))
|
||||
return IRQ_NONE;
|
||||
|
||||
val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
|
||||
mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
|
||||
status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
|
||||
|
||||
if (status & PCIE_IRQ_CORE_INT) {
|
||||
advk_pcie_handle_int(pcie);
|
||||
|
||||
/* Clear interrupt */
|
||||
advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
@@ -1673,7 +1667,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||
struct advk_pcie *pcie;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct resource_entry *entry;
|
||||
int ret;
|
||||
int ret, irq;
|
||||
|
||||
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
|
||||
if (!bridge)
|
||||
@@ -1759,9 +1753,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(pcie->base))
|
||||
return PTR_ERR(pcie->base);
|
||||
|
||||
pcie->irq = platform_get_irq(pdev, 0);
|
||||
if (pcie->irq < 0)
|
||||
return pcie->irq;
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
|
||||
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
|
||||
pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register interrupt\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
|
||||
"reset-gpios", 0,
|
||||
@@ -1818,15 +1820,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
|
||||
|
||||
bridge->sysdata = pcie;
|
||||
bridge->ops = &advk_pcie_ops;
|
||||
bridge->map_irq = advk_pcie_map_irq;
|
||||
|
||||
ret = pci_host_probe(bridge);
|
||||
if (ret < 0) {
|
||||
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
|
||||
advk_pcie_remove_rp_irq_domain(pcie);
|
||||
advk_pcie_remove_msi_irq_domain(pcie);
|
||||
advk_pcie_remove_irq_domain(pcie);
|
||||
@@ -1875,9 +1874,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
|
||||
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
|
||||
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
|
||||
|
||||
/* Remove IRQ handler */
|
||||
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
|
||||
|
||||
/* Remove IRQ domains */
|
||||
advk_pcie_remove_rp_irq_domain(pcie);
|
||||
advk_pcie_remove_msi_irq_domain(pcie);
|
||||
|
||||
@@ -2888,6 +2888,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
|
||||
},
|
||||
/*
|
||||
* Downstream device is not accessible after putting a root port
|
||||
* into D3cold and back into D0 on Elo i2.
|
||||
*/
|
||||
.ident = "Elo i2",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
|
||||
},
|
||||
},
|
||||
#endif
|
||||
{ }
|
||||
|
||||
@@ -1236,18 +1236,12 @@ FUNC_GROUP_DECL(SALT8, AA12);
|
||||
FUNC_GROUP_DECL(WDTRST4, AA12);
|
||||
|
||||
#define AE12 196
|
||||
SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
|
||||
SIG_DESC_SET(SCU438, 4));
|
||||
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
|
||||
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
|
||||
SIG_EXPR_LIST_PTR(AE12, GPIOY4));
|
||||
PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4));
|
||||
|
||||
#define AF12 197
|
||||
SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
|
||||
SIG_DESC_SET(SCU438, 5));
|
||||
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
|
||||
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
|
||||
SIG_EXPR_LIST_PTR(AF12, GPIOY5));
|
||||
PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5));
|
||||
|
||||
#define AC12 198
|
||||
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
|
||||
@@ -1520,9 +1514,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
|
||||
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
|
||||
|
||||
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
|
||||
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
|
||||
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
|
||||
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
|
||||
FUNC_DECL_1(FWSPID, FWSPID);
|
||||
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
|
||||
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
|
||||
/*
|
||||
@@ -1918,7 +1911,6 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
|
||||
ASPEED_PINCTRL_GROUP(FSI2),
|
||||
ASPEED_PINCTRL_GROUP(FWSPIABR),
|
||||
ASPEED_PINCTRL_GROUP(FWSPID),
|
||||
ASPEED_PINCTRL_GROUP(FWQSPID),
|
||||
ASPEED_PINCTRL_GROUP(FWSPIWP),
|
||||
ASPEED_PINCTRL_GROUP(GPIT0),
|
||||
ASPEED_PINCTRL_GROUP(GPIT1),
|
||||
|
||||
@@ -259,7 +259,7 @@ static const struct mtk_pin_ies_smt_set mt8365_ies_set[] = {
|
||||
MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13),
|
||||
MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14),
|
||||
MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15),
|
||||
MTK_PIN_IES_SMT_SPEC(114, 112, 0x420, 16),
|
||||
MTK_PIN_IES_SMT_SPEC(114, 116, 0x420, 16),
|
||||
MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17),
|
||||
MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18),
|
||||
MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19),
|
||||
|
||||
@@ -25,6 +25,9 @@
|
||||
|
||||
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
|
||||
|
||||
/* waitqueue for log readers */
|
||||
static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
|
||||
|
||||
/**
|
||||
* struct cros_ec_debugfs - EC debugging information.
|
||||
*
|
||||
@@ -33,7 +36,6 @@
|
||||
* @log_buffer: circular buffer for console log information
|
||||
* @read_msg: preallocated EC command and buffer to read console log
|
||||
* @log_mutex: mutex to protect circular buffer
|
||||
* @log_wq: waitqueue for log readers
|
||||
* @log_poll_work: recurring task to poll EC for new console log data
|
||||
* @panicinfo_blob: panicinfo debugfs blob
|
||||
*/
|
||||
@@ -44,7 +46,6 @@ struct cros_ec_debugfs {
|
||||
struct circ_buf log_buffer;
|
||||
struct cros_ec_command *read_msg;
|
||||
struct mutex log_mutex;
|
||||
wait_queue_head_t log_wq;
|
||||
struct delayed_work log_poll_work;
|
||||
/* EC panicinfo */
|
||||
struct debugfs_blob_wrapper panicinfo_blob;
|
||||
@@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work)
|
||||
buf_space--;
|
||||
}
|
||||
|
||||
wake_up(&debug_info->log_wq);
|
||||
wake_up(&cros_ec_debugfs_log_wq);
|
||||
}
|
||||
|
||||
mutex_unlock(&debug_info->log_mutex);
|
||||
@@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
|
||||
|
||||
mutex_unlock(&debug_info->log_mutex);
|
||||
|
||||
ret = wait_event_interruptible(debug_info->log_wq,
|
||||
ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
|
||||
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file,
|
||||
struct cros_ec_debugfs *debug_info = file->private_data;
|
||||
__poll_t mask = 0;
|
||||
|
||||
poll_wait(file, &debug_info->log_wq, wait);
|
||||
poll_wait(file, &cros_ec_debugfs_log_wq, wait);
|
||||
|
||||
mutex_lock(&debug_info->log_mutex);
|
||||
if (CIRC_CNT(debug_info->log_buffer.head,
|
||||
@@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
|
||||
debug_info->log_buffer.tail = 0;
|
||||
|
||||
mutex_init(&debug_info->log_mutex);
|
||||
init_waitqueue_head(&debug_info->log_wq);
|
||||
|
||||
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
|
||||
debug_info, &cros_ec_console_log_fops);
|
||||
|
||||
@@ -26,6 +26,15 @@ struct class *rtc_class;
|
||||
static void rtc_device_release(struct device *dev)
|
||||
{
|
||||
struct rtc_device *rtc = to_rtc_device(dev);
|
||||
struct timerqueue_head *head = &rtc->timerqueue;
|
||||
struct timerqueue_node *node;
|
||||
|
||||
mutex_lock(&rtc->ops_lock);
|
||||
while ((node = timerqueue_getnext(head)))
|
||||
timerqueue_del(head, node);
|
||||
mutex_unlock(&rtc->ops_lock);
|
||||
|
||||
cancel_work_sync(&rtc->irqwork);
|
||||
|
||||
ida_simple_remove(&rtc_ida, rtc->id);
|
||||
mutex_destroy(&rtc->ops_lock);
|
||||
|
||||
@@ -146,6 +146,17 @@ again:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mc146818_get_time);
|
||||
|
||||
/* AMD systems don't allow access to AltCentury with DV1 */
|
||||
static bool apply_amd_register_a_behavior(void)
|
||||
{
|
||||
#ifdef CONFIG_X86
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Set the current date and time in the real time clock. */
|
||||
int mc146818_set_time(struct rtc_time *time)
|
||||
{
|
||||
@@ -219,6 +230,9 @@ int mc146818_set_time(struct rtc_time *time)
|
||||
save_control = CMOS_READ(RTC_CONTROL);
|
||||
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
|
||||
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
|
||||
if (apply_amd_register_a_behavior())
|
||||
CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT);
|
||||
else
|
||||
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
|
||||
|
||||
#ifdef CONFIG_MACH_DECSTATION
|
||||
|
||||
@@ -374,7 +374,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
|
||||
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
|
||||
{
|
||||
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
|
||||
unsigned int buf[5], ctrl2;
|
||||
u8 buf[5];
|
||||
unsigned int ctrl2;
|
||||
int ret;
|
||||
|
||||
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
|
||||
|
||||
@@ -138,7 +138,7 @@ struct sun6i_rtc_dev {
|
||||
const struct sun6i_rtc_clk_data *data;
|
||||
void __iomem *base;
|
||||
int irq;
|
||||
unsigned long alarm;
|
||||
time64_t alarm;
|
||||
|
||||
struct clk_hw hw;
|
||||
struct clk_hw *int_osc;
|
||||
@@ -510,10 +510,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
|
||||
struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
|
||||
struct rtc_time *alrm_tm = &wkalrm->time;
|
||||
struct rtc_time tm_now;
|
||||
unsigned long time_now = 0;
|
||||
unsigned long time_set = 0;
|
||||
unsigned long time_gap = 0;
|
||||
int ret = 0;
|
||||
time64_t time_now, time_set;
|
||||
int ret;
|
||||
|
||||
ret = sun6i_rtc_gettime(dev, &tm_now);
|
||||
if (ret < 0) {
|
||||
@@ -528,9 +526,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
time_gap = time_set - time_now;
|
||||
|
||||
if (time_gap > U32_MAX) {
|
||||
if ((time_set - time_now) > U32_MAX) {
|
||||
dev_err(dev, "Date too far in the future\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -539,7 +535,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
|
||||
writel(0, chip->base + SUN6I_ALRM_COUNTER);
|
||||
usleep_range(100, 300);
|
||||
|
||||
writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
|
||||
writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER);
|
||||
chip->alarm = time_set;
|
||||
|
||||
sun6i_rtc_setaie(wkalrm->enabled, chip);
|
||||
|
||||
@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
|
||||
case SCSI_ACCESS_STATE_OPTIMAL:
|
||||
case SCSI_ACCESS_STATE_ACTIVE:
|
||||
case SCSI_ACCESS_STATE_LBA:
|
||||
return BLK_STS_OK;
|
||||
case SCSI_ACCESS_STATE_TRANSITIONING:
|
||||
return BLK_STS_AGAIN;
|
||||
return BLK_STS_OK;
|
||||
default:
|
||||
req->rq_flags |= RQF_QUIET;
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
@@ -3837,6 +3837,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
|
||||
|
||||
spin_lock_irqsave(&cmd->cmd_lock, flags);
|
||||
if (cmd->aborted) {
|
||||
if (cmd->sg_mapped)
|
||||
qlt_unmap_sg(vha, cmd);
|
||||
|
||||
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
|
||||
/*
|
||||
* It's normal to see 2 calls in this path:
|
||||
|
||||
@@ -1257,6 +1257,13 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
|
||||
int data_seg_len;
|
||||
|
||||
data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
|
||||
& MASK_RSP_UPIU_DATA_SEG_LEN;
|
||||
|
||||
/* If data segment length is zero, rsp_field is not valid */
|
||||
if (!data_seg_len)
|
||||
return;
|
||||
|
||||
if (unlikely(lrbp->lun != rsp_field->lun)) {
|
||||
struct scsi_device *sdev;
|
||||
bool found = false;
|
||||
@@ -1291,18 +1298,6 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
return;
|
||||
}
|
||||
|
||||
data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
|
||||
& MASK_RSP_UPIU_DATA_SEG_LEN;
|
||||
|
||||
/* To flush remained rsp_list, we queue the map_work task */
|
||||
if (!data_seg_len) {
|
||||
if (!ufshpb_is_general_lun(hpb->lun))
|
||||
return;
|
||||
|
||||
ufshpb_kick_map_work(hpb);
|
||||
return;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
|
||||
|
||||
if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
|
||||
|
||||
@@ -145,6 +145,7 @@ enum dev_state {
|
||||
STATE_DEV_INVALID = 0,
|
||||
STATE_DEV_OPENED,
|
||||
STATE_DEV_INITIALIZED,
|
||||
STATE_DEV_REGISTERING,
|
||||
STATE_DEV_RUNNING,
|
||||
STATE_DEV_CLOSED,
|
||||
STATE_DEV_FAILED
|
||||
@@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
dev->state = STATE_DEV_REGISTERING;
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
ret = usb_gadget_probe_driver(&dev->driver);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user