Merge "Merge 5.15.75 into android14-5.15" into android14-5.15

This commit is contained in:
Treehugger Robot
2022-11-03 00:49:10 +00:00
521 changed files with 4733 additions and 2185 deletions

View File

@@ -188,7 +188,7 @@ Description:
Raw capacitance measurement from channel Y. Units after
application of scale and offset are nanofarads.
What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw
What: /sys/.../iio:deviceX/in_capacitanceY-capacitanceZ_raw
KernelVersion: 3.2
Contact: linux-iio@vger.kernel.org
Description:

View File

@@ -68,6 +68,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A55 | #1530923 | ARM64_ERRATUM_1530923 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A55 | #2441007 | ARM64_ERRATUM_2441007 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #852523 | N/A |

View File

@@ -274,6 +274,9 @@ or bottom half).
This is specifically for the inode itself being marked dirty,
not its data. If the update needs to be persisted by fdatasync(),
then I_DIRTY_DATASYNC will be set in the flags argument.
I_DIRTY_TIME will be set in the flags in case lazytime is enabled
and struct inode has times updated since the last ->dirty_inode
call.
``write_inode``
this method is called when the VFS needs to write an inode to

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 74
SUBLEVEL = 75
EXTRAVERSION =
NAME = Trick or Treat
@@ -866,12 +866,12 @@ endif
# Initialize all stack variables with a zero value.
ifdef CONFIG_INIT_STACK_ALL_ZERO
# Future support for zero initialization is still being debated, see
# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being
# renamed or dropped.
KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
# https://github.com/llvm/llvm-project/issues/44842
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
endif
endif
# While VLAs have been removed, GCC produces unreachable stack probes
# for the randomize_kstack_offset feature. Disable it for all compilers.

View File

@@ -1742,7 +1742,6 @@ config CMDLINE
choice
prompt "Kernel command line type" if CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
depends on ATAGS
config CMDLINE_FROM_BOOTLOADER
bool "Use bootloader kernel arguments if available"

View File

@@ -23,6 +23,7 @@ SECTIONS
*(.ARM.extab*)
*(.note.*)
*(.rel.*)
*(.printk_index)
/*
* Discard any r/w data - this produces a link error if we have any,
* which is required for PIC decompression. Local data generates
@@ -57,6 +58,7 @@ SECTIONS
*(.rodata)
*(.rodata.*)
*(.data.rel.ro)
*(.data.rel.ro.*)
}
.piggydata : {
*(.piggydata)

View File

@@ -471,7 +471,7 @@
marvell,function = "spi0";
};
spi0cs1_pins: spi0cs1-pins {
spi0cs2_pins: spi0cs2-pins {
marvell,pins = "mpp26";
marvell,function = "spi0";
};
@@ -506,7 +506,7 @@
};
};
/* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */
/* MISO, MOSI, SCLK and CS2 are routed to pin header CN11 */
};
&uart0 {

View File

@@ -585,7 +585,7 @@
clocks = <&camera 1>;
clock-names = "extclk";
samsung,camclk-out = <1>;
gpios = <&gpm1 6 GPIO_ACTIVE_HIGH>;
gpios = <&gpm1 6 GPIO_ACTIVE_LOW>;
port {
is_s5k6a3_ep: endpoint {

View File

@@ -95,7 +95,7 @@
};
&ehci {
samsung,vbus-gpio = <&gpx3 5 1>;
samsung,vbus-gpio = <&gpx3 5 GPIO_ACTIVE_HIGH>;
status = "okay";
phys = <&exynos_usbphy 2>, <&exynos_usbphy 3>;
phy-names = "hsic0", "hsic1";

View File

@@ -84,6 +84,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};

View File

@@ -163,6 +163,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x40000>;
ranges = <0 0x00900000 0x40000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};

View File

@@ -263,6 +263,10 @@
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
};
&hdmi {
ddc-i2c-bus = <&i2c2>;
};
&i2c_intern {
pmic@8 {
compatible = "fsl,pfuze100";
@@ -387,7 +391,7 @@
/* HDMI_CTRL */
&i2c2 {
clock-frequency = <375000>;
clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
};

View File

@@ -9,12 +9,18 @@
ocram2: sram@940000 {
compatible = "mmio-sram";
reg = <0x00940000 0x20000>;
ranges = <0 0x00940000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
ocram3: sram@960000 {
compatible = "mmio-sram";
reg = <0x00960000 0x20000>;
ranges = <0 0x00960000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};

View File

@@ -117,6 +117,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6SL_CLK_OCRAM>;
};

View File

@@ -117,6 +117,9 @@
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
};
intc: interrupt-controller@a01000 {

View File

@@ -164,12 +164,18 @@
ocram_s: sram@8f8000 {
compatible = "mmio-sram";
reg = <0x008f8000 0x4000>;
ranges = <0 0x008f8000 0x4000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6SX_CLK_OCRAM_S>;
};
ocram: sram@900000 {
compatible = "mmio-sram";
reg = <0x00900000 0x20000>;
ranges = <0 0x00900000 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
clocks = <&clks IMX6SX_CLK_OCRAM>;
};

View File

@@ -206,12 +206,7 @@
interrupt-parent = <&gpio2>;
interrupts = <29 0>;
pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
ti,x-min = /bits/ 16 <0>;
ti,x-max = /bits/ 16 <0>;
ti,y-min = /bits/ 16 <0>;
ti,y-max = /bits/ 16 <0>;
ti,pressure-max = /bits/ 16 <0>;
ti,x-plate-ohms = /bits/ 16 <400>;
touchscreen-max-pressure = <255>;
wakeup-source;
};
};

View File

@@ -10,6 +10,11 @@
ocp@f1000000 {
pinctrl: pin-controller@10000 {
/* Non-default UART pins */
pmx_uart0: pmx-uart0 {
marvell,pins = "mpp4", "mpp5";
};
pmx_power_hdd: pmx-power-hdd {
marvell,pins = "mpp10";
marvell,function = "gpo";
@@ -213,22 +218,11 @@
&mdio {
status = "okay";
ethphy0: ethernet-phy@0 {
reg = <0>;
};
ethphy1: ethernet-phy@8 {
reg = <8>;
};
};
&eth0 {
status = "okay";
ethernet0-port@0 {
phy-handle = <&ethphy0>;
};
};
&eth1 {
status = "okay";
ethernet1-port@0 {

View File

@@ -346,7 +346,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
addr = start + i * PMD_SIZE;
domain = get_domain_name(pmd);
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
note_page(st, addr, 3, pmd_val(*pmd), domain);
note_page(st, addr, 4, pmd_val(*pmd), domain);
else
walk_pte(st, pmd, addr, domain);

View File

@@ -264,12 +264,17 @@ void __init kasan_init(void)
/*
* 1. The module global variables are in MODULES_VADDR ~ MODULES_END,
* so we need to map this area.
* so we need to map this area if CONFIG_KASAN_VMALLOC=n. With
* VMALLOC support KASAN will manage this region dynamically,
* refer to kasan_populate_vmalloc() and ARM's implementation of
* module_alloc().
* 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
* ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't
* use kasan_populate_zero_shadow.
*/
create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE));
if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES))
create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END));
create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE));
/*
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so

View File

@@ -300,7 +300,11 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
#ifdef CONFIG_ARM_LPAE
.prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.prot_sect = PMD_TYPE_SECT,
#endif
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {

View File

@@ -603,6 +603,23 @@ config ARM64_ERRATUM_1530923
config ARM64_WORKAROUND_REPEAT_TLBI
bool
config ARM64_ERRATUM_2441007
bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
default y
select ARM64_WORKAROUND_REPEAT_TLBI
help
This option adds a workaround for ARM Cortex-A55 erratum #2441007.
Under very rare circumstances, affected Cortex-A55 CPUs
may not handle a race between a break-before-make sequence on one
CPU, and another CPU accessing the same page. This could allow a
store to a page that has been unmapped.
Work around this by adding the affected CPUs to the list that needs
TLB sequences to be done twice.
If unsure, say Y.
config ARM64_ERRATUM_1286807
bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
default y

View File

@@ -912,7 +912,7 @@
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy0>, <&usb3_phy0>;
phy-names = "usb2-phy", "usb3-phy";
snps,dis-u2-freeclk-exists-quirk;
snps,gfladj-refclk-lpm-sel-quirk;
};
};
@@ -953,7 +953,7 @@
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy1>, <&usb3_phy1>;
phy-names = "usb2-phy", "usb3-phy";
snps,dis-u2-freeclk-exists-quirk;
snps,gfladj-refclk-lpm-sel-quirk;
};
};

View File

@@ -967,6 +967,7 @@
interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gauge>;
power-supplies = <&bq25895>;
maxim,over-heat-temp = <700>;
maxim,over-volt = <4500>;
maxim,rsns-microohm = <5000>;

View File

@@ -131,12 +131,6 @@
>;
};
main_usbss0_pins_default: main-usbss0-pins-default {
pinctrl-single,pins = <
J721E_IOPAD(0x120, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */
>;
};
vdd_sd_dv_pins_default: vdd-sd-dv-pins-default {
pinctrl-single,pins = <
J721E_IOPAD(0xd0, PIN_OUTPUT, 7) /* (T5) SPI0_D1.GPIO0_55 */
@@ -144,6 +138,14 @@
};
};
&main_pmx1 {
main_usbss0_pins_default: main-usbss0-pins-default {
pinctrl-single,pins = <
J721E_IOPAD(0x04, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */
>;
};
};
&wkup_uart0 {
/* Wakeup UART is used by System firmware */
status = "reserved";

View File

@@ -295,7 +295,16 @@
main_pmx0: pinctrl@11c000 {
compatible = "pinctrl-single";
/* Proxy 0 addressing */
reg = <0x00 0x11c000 0x00 0x2b4>;
reg = <0x00 0x11c000 0x00 0x10c>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xffffffff>;
};
main_pmx1: pinctrl@11c11c {
compatible = "pinctrl-single";
/* Proxy 0 addressing */
reg = <0x00 0x11c11c 0x00 0xc>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xffffffff>;

View File

@@ -214,6 +214,11 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2441007
{
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2441009
{
/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */

View File

@@ -217,11 +217,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old = 0, new;
new = aarch64_insn_gen_nop();
/*
* When using mcount, callsites in modules may have been initalized to
* call an arbitrary module PLT (which redirects to the _mcount stub)
* rather than the ftrace PLT we'll use at runtime (which redirects to
* the ftrace trampoline). We can ignore the old PLT when initializing
* the callsite.
*
* Note: 'mod' is only set at module load time.
*/
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
return aarch64_insn_patch_text_nosync((void *)pc, new);
}
if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL;
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
}

View File

@@ -22,46 +22,6 @@
#include <asm/cputype.h>
#include <asm/topology.h>
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
u64 mpidr;
if (cpuid_topo->package_id != -1)
goto topology_populated;
mpidr = read_cpuid_mpidr();
/* Uniprocessor systems can rely on default topology values */
if (mpidr & MPIDR_UP_BITMASK)
return;
/*
* This would be the place to create cpu topology based on MPIDR.
*
* However, it cannot be trusted to depict the actual topology; some
* pieces of the architecture enforce an artificial cap on Aff0 values
* (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
* artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
* having absolutely no relationship to the actual underlying system
* topology, and cannot be reasonably used as core / package ID.
*
* If the MT bit is set, Aff0 *could* be used to define a thread ID, but
* we still wouldn't be able to obtain a sane core ID. This means we
* need to entirely ignore MPIDR for any topology deduction.
*/
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = cpuid;
cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
cpuid_topo->thread_id, mpidr);
topology_populated:
update_siblings_masks(cpuid);
}
#ifdef CONFIG_ACPI
static bool __init acpi_cpu_is_threaded(int cpu)
{

View File

@@ -75,5 +75,6 @@ int memory_add_physaddr_to_nid(u64 addr)
return 0;
return nid;
}
EXPORT_SYMBOL(memory_add_physaddr_to_nid);
#endif
#endif

View File

@@ -86,7 +86,7 @@ static __init void prom_init_mem(void)
pr_debug("Assume 128MB RAM\n");
break;
}
if (!memcmp(prom_init, prom_init + mem, 32))
if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32))
break;
}
lowmem = mem;
@@ -159,7 +159,7 @@ void __init bcm47xx_prom_highmem_init(void)
off = EXTVBASE + __pa(off);
for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) {
if (!memcmp(prom_init, (void *)(off + extmem), 16))
if (!memcmp((void *)prom_init, (void *)(off + extmem), 16))
break;
}
extmem -= lowmem;

View File

@@ -27,15 +27,18 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
{
struct xtalk_bridge_platform_data *bd;
struct sgi_w1_platform_data *wd;
struct platform_device *pdev;
struct platform_device *pdev_wd;
struct platform_device *pdev_bd;
struct resource w1_res;
unsigned long offset;
offset = NODE_OFFSET(nasid);
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
if (!wd)
goto no_mem;
if (!wd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
return;
}
snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
offset + (widget << SWIN_SIZE_BITS));
@@ -46,22 +49,35 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
w1_res.end = w1_res.start + 3;
w1_res.flags = IORESOURCE_MEM;
pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
if (!pdev) {
kfree(wd);
goto no_mem;
pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
if (!pdev_wd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_kfree_wd;
}
platform_device_add_resources(pdev, &w1_res, 1);
platform_device_add_data(pdev, wd, sizeof(*wd));
platform_device_add(pdev);
if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget);
goto err_put_pdev_wd;
}
if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
goto err_put_pdev_wd;
}
if (platform_device_add(pdev_wd)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
goto err_put_pdev_wd;
}
/* platform_device_add_data() duplicates the data */
kfree(wd);
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
if (!bd)
goto no_mem;
pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
if (!pdev) {
kfree(bd);
goto no_mem;
if (!bd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_unregister_pdev_wd;
}
pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
if (!pdev_bd) {
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
goto err_kfree_bd;
}
@@ -82,13 +98,31 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
bd->io.flags = IORESOURCE_IO;
bd->io_offset = offset;
platform_device_add_data(pdev, bd, sizeof(*bd));
platform_device_add(pdev);
if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
goto err_put_pdev_bd;
}
if (platform_device_add(pdev_bd)) {
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
goto err_put_pdev_bd;
}
/* platform_device_add_data() duplicates the data */
kfree(bd);
pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
return;
no_mem:
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
err_put_pdev_bd:
platform_device_put(pdev_bd);
err_kfree_bd:
kfree(bd);
err_unregister_pdev_wd:
platform_device_unregister(pdev_wd);
return;
err_put_pdev_wd:
platform_device_put(pdev_wd);
err_kfree_wd:
kfree(wd);
return;
}
static int probe_one_port(nasid_t nasid, int widget, int masterwid)

View File

@@ -154,7 +154,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8)
else
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5))
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4)
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
endif
else ifdef CONFIG_PPC_BOOK3E_64
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64

View File

@@ -34,6 +34,7 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
$(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
$(LINUXINCLUDE)

View File

@@ -0,0 +1,51 @@
/*
* e500v1 Power ISA Device Tree Source (include)
*
* Copyright 2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/ {
cpus {
power-isa-version = "2.03";
power-isa-b; // Base
power-isa-e; // Embedded
power-isa-atb; // Alternate Time Base
power-isa-cs; // Cache Specification
power-isa-e.le; // Embedded.Little-Endian
power-isa-e.pm; // Embedded.Performance Monitor
power-isa-ecl; // Embedded Cache Locking
power-isa-mmc; // Memory Coherence
power-isa-sp; // Signal Processing Engine
power-isa-sp.fs; // SPE.Embedded Float Scalar Single
power-isa-sp.fv; // SPE.Embedded Float Vector
mmu-type = "power-embedded";
};
};

View File

@@ -7,7 +7,7 @@
/dts-v1/;
/include/ "e500v2_power_isa.dtsi"
/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8540ADS";

View File

@@ -7,7 +7,7 @@
/dts-v1/;
/include/ "e500v2_power_isa.dtsi"
/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8541CDS";

View File

@@ -7,7 +7,7 @@
/dts-v1/;
/include/ "e500v2_power_isa.dtsi"
/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8555CDS";

View File

@@ -7,7 +7,7 @@
/dts-v1/;
/include/ "e500v2_power_isa.dtsi"
/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8560ADS";

View File

@@ -41,6 +41,7 @@ CONFIG_DTL=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
CONFIG_LIBNVDIMM=m
CONFIG_PAPR_SCM=m
CONFIG_PPC_SVM=y
# CONFIG_PPC_PMAC is not set

View File

@@ -8,6 +8,18 @@
#include <linux/types.h>
#include <linux/compat.h>
/*
* long long munging:
* The 32 bit ABI passes long longs in an odd even register pair.
* High and low parts are swapped depending on endian mode,
* so define a macro (similar to mips linux32) to handle that.
*/
#ifdef __LITTLE_ENDIAN__
#define merge_64(low, high) (((u64)high << 32) | low)
#else
#define merge_64(high, low) (((u64)high << 32) | low)
#endif
struct rtas_args;
asmlinkage long sys_mmap(unsigned long addr, size_t len,

View File

@@ -140,7 +140,13 @@ int arch_prepare_kprobe(struct kprobe *p)
preempt_disable();
prev = get_kprobe(p->addr - 1);
preempt_enable_no_resched();
if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
/*
* When prev is a ftrace-based kprobe, we don't have an insn, and it
* doesn't probe for prefixed instruction.
*/
if (prev && !kprobe_ftrace(prev) &&
ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}

View File

@@ -330,6 +330,7 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
INIT_LIST_HEAD(&pdn->list);
parent = of_get_parent(dn);
pdn->parent = parent ? PCI_DN(parent) : NULL;
of_node_put(parent);
if (pdn->parent)
list_add_tail(&pdn->list, &pdn->parent->child_list);

View File

@@ -56,18 +56,6 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
}
/*
* long long munging:
* The 32 bit ABI passes long longs in an odd even register pair.
* High and low parts are swapped depending on endian mode,
* so define a macro (similar to mips linux32) to handle that.
*/
#ifdef __LITTLE_ENDIAN__
#define merge_64(low, high) ((u64)high << 32) | low
#else
#define merge_64(high, low) ((u64)high << 32) | low
#endif
compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
u32 reg6, u32 pos1, u32 pos2)
{
@@ -94,7 +82,7 @@ asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2,
u32 len1, u32 len2)
{
return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2,
return ksys_fallocate(fd, mode, merge_64(offset1, offset2),
merge_64(len1, len2));
}

View File

@@ -99,8 +99,8 @@ long ppc64_personality(unsigned long personality)
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low)
{
return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice);
return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low),
merge_64(len_high, len_low), advice);
}
SYSCALL_DEFINE0(switch_endian)

View File

@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/prctl.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/reg.h>

View File

@@ -892,6 +892,7 @@ static void opal_export_attrs(void)
kobj = kobject_create_and_add("exports", opal_kobj);
if (!kobj) {
pr_warn("kobject_create_and_add() of exports failed\n");
of_node_put(np);
return;
}

View File

@@ -324,7 +324,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
* So no unpacking needs to be done.
*/
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain,
VPHN_FLAG_VCPU, smp_processor_id());
VPHN_FLAG_VCPU, hard_smp_processor_id());
if (rc != H_SUCCESS) {
pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc);
goto out;

View File

@@ -211,8 +211,10 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
dev_err(&pdev->dev,
"node %pOF has an invalid fsl,msi phandle %u\n",
hose->dn, np->phandle);
of_node_put(np);
return -EINVAL;
}
of_node_put(np);
}
for_each_pci_msi_entry(entry, pdev) {

View File

@@ -46,7 +46,7 @@ config RISCV
select CLINT_TIMER if !MMU
select COMMON_CLK
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_EARLY_IOREMAP

View File

@@ -39,6 +39,7 @@ else
endif
ifeq ($(CONFIG_LD_IS_LLD),y)
ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0)
KBUILD_CFLAGS += -mno-relax
KBUILD_AFLAGS += -mno-relax
ifndef CONFIG_AS_IS_LLVM
@@ -46,6 +47,7 @@ ifndef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS += -Wa,-mno-relax
endif
endif
endif
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima

View File

@@ -101,9 +101,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -115,22 +115,22 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
#ifdef CONFIG_64BIT
__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
#endif
#include <asm-generic/io.h>

View File

@@ -260,10 +260,10 @@ static void __init parse_dtb(void)
pr_info("Machine model: %s\n", name);
dump_stack_set_arch_desc("%s (DT)", name);
}
return;
} else {
pr_err("No DTB passed to the kernel\n");
}
pr_err("No DTB passed to the kernel\n");
#ifdef CONFIG_CMDLINE_FORCE
strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
pr_info("Forcing kernel command line to: %s\n", boot_command_line);

View File

@@ -53,6 +53,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
unsigned int curr_cpuid;
curr_cpuid = smp_processor_id();
store_cpu_topology(curr_cpuid);
numa_store_cpu_info(curr_cpuid);
numa_add_cpu(curr_cpuid);
@@ -165,9 +166,9 @@ asmlinkage __visible void smp_callin(void)
mmgrab(mm);
current->active_mm = mm;
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
numa_add_cpu(curr_cpuid);
update_siblings_masks(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
/*

View File

@@ -18,9 +18,6 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
}

View File

@@ -188,7 +188,8 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
}
break;
case EXC_LOAD_PAGE_FAULT:
if (!(vma->vm_flags & VM_READ)) {
/* Write implies read */
if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
return true;
}
break;

View File

@@ -4,7 +4,7 @@
#include <asm-generic/sections.h>
extern long __machvec_start, __machvec_end;
extern char __machvec_start[], __machvec_end[];
extern char __uncached_start, __uncached_end;
extern char __start_eh_frame[], __stop_eh_frame[];

View File

@@ -20,8 +20,8 @@
#define MV_NAME_SIZE 32
#define for_each_mv(mv) \
for ((mv) = (struct sh_machine_vector *)&__machvec_start; \
(mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \
for ((mv) = (struct sh_machine_vector *)__machvec_start; \
(mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \
(mv)++)
static struct sh_machine_vector * __init get_mv_byname(const char *name)
@@ -87,8 +87,8 @@ void __init sh_mv_setup(void)
if (!machvec_selected) {
unsigned long machvec_size;
machvec_size = ((unsigned long)&__machvec_end -
(unsigned long)&__machvec_start);
machvec_size = ((unsigned long)__machvec_end -
(unsigned long)__machvec_start);
/*
* Sanity check for machvec section alignment. Ensure
@@ -102,7 +102,7 @@ void __init sh_mv_setup(void)
* vector (usually the only one) from .machvec.init.
*/
if (machvec_size >= sizeof(struct sh_machine_vector))
sh_mv = *(struct sh_machine_vector *)&__machvec_start;
sh_mv = *(struct sh_machine_vector *)__machvec_start;
}
pr_notice("Booting machvec: %s\n", get_system_type());

View File

@@ -94,7 +94,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? cpu_data + *pos : NULL;
return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)

View File

@@ -529,7 +529,7 @@ struct hv_enlightened_vmcs {
u64 guest_rip;
u32 hv_clean_fields;
u32 hv_padding_32;
u32 padding32_1;
u32 hv_synthetic_controls;
struct {
u32 nested_flush_hypercall:1;
@@ -537,7 +537,7 @@ struct hv_enlightened_vmcs {
u32 reserved:30;
} __packed hv_enlightenments_control;
u32 hv_vp_id;
u32 padding32_2;
u64 hv_vm_id;
u64 partition_assist_page;
u64 padding64_4[4];

View File

@@ -9,6 +9,7 @@
struct ucode_patch {
struct list_head plist;
void *data; /* Intel uses only this one */
unsigned int size;
u32 patch_id;
u16 equiv_cpu;
};

View File

@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/tboot.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/msr-index.h>
#include <asm/processor.h>
#include <asm/vmx.h>
#include "cpu.h"
#undef pr_fmt
#define pr_fmt(fmt) "x86/cpu: " fmt

View File

@@ -29,15 +29,26 @@
void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
{
struct mce m;
int lsb;
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
return;
/*
* Even if the ->validation_bits are set for address mask,
* to be extra safe, check and reject an error radius '0',
* and fall back to the default page size.
*/
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
lsb = find_first_bit((void *)&mem_err->physical_addr_mask, PAGE_SHIFT);
else
lsb = PAGE_SHIFT;
mce_setup(&m);
m.bank = -1;
/* Fake a memory read error with unknown channel */
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
m.misc = (MCI_MISC_ADDR_PHYS << 6) | PAGE_SHIFT;
m.misc = (MCI_MISC_ADDR_PHYS << 6) | lsb;
if (severity >= GHES_SEV_RECOVERABLE)
m.status |= MCI_STATUS_UC;

View File

@@ -782,6 +782,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
kfree(patch);
return -EINVAL;
}
patch->size = *patch_size;
mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
proc_id = mc_hdr->processor_rev_id;
@@ -863,7 +864,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
return ret;
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
return ret;
}

View File

@@ -420,6 +420,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
struct pseudo_lock_region *plr = rdtgrp->plr;
u32 rmid_p, closid_p;
unsigned long i;
u64 saved_msr;
#ifdef CONFIG_KASAN
/*
* The registers used for local register variables are also used
@@ -463,6 +464,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
* the buffer and evict pseudo-locked memory read earlier from the
* cache.
*/
saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
__wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
closid_p = this_cpu_read(pqr_state.cur_closid);
rmid_p = this_cpu_read(pqr_state.cur_rmid);
@@ -514,7 +516,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
__wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
/* Re-enable the hardware prefetcher(s) */
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
local_irq_enable();
plr->thread_done = 1;
@@ -871,6 +873,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
static int measure_cycles_lat_fn(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
u32 saved_low, saved_high;
unsigned long i;
u64 start, end;
void *mem_r;
@@ -879,6 +882,7 @@ static int measure_cycles_lat_fn(void *_plr)
/*
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
mem_r = READ_ONCE(plr->kmem);
/*
@@ -895,7 +899,7 @@ static int measure_cycles_lat_fn(void *_plr)
end = rdtsc_ordered();
trace_pseudo_lock_mem_latency((u32)(end - start));
}
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
local_irq_enable();
plr->thread_done = 1;
wake_up_interruptible(&plr->lock_thread_wq);
@@ -940,6 +944,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
struct perf_event *miss_event, *hit_event;
int hit_pmcnum, miss_pmcnum;
u32 saved_low, saved_high;
unsigned int line_size;
unsigned int size;
unsigned long i;
@@ -973,6 +978,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
/*
* Disable hardware prefetchers.
*/
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
/* Initialize rest of local variables */
@@ -1031,7 +1037,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
*/
rmb();
/* Re-enable hardware prefetchers */
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
local_irq_enable();
out_hit:
perf_event_release_kernel(hit_event);

View File

@@ -1936,7 +1936,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->modrm_reg == VCPU_SREG_SS)
if (seg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
if (ctxt->op_bytes > 2)
rsp_increment(ctxt, ctxt->op_bytes - 2);

View File

@@ -2313,9 +2313,14 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
* are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
* on the related bits (if supported by the CPU) in the hope that
* we can avoid VMWrites during vmx_set_efer().
*
* Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
* loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
* do the same for L2.
*/
exec_control = __vm_entry_controls_get(vmcs01);
exec_control |= vmcs12->vm_entry_controls;
exec_control |= (vmcs12->vm_entry_controls &
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
if (cpu_has_load_ia32_efer()) {
if (guest_efer & EFER_LMA)
@@ -3821,7 +3826,16 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
u32 intr_info = nr | INTR_INFO_VALID_MASK;
if (vcpu->arch.exception.has_error_code) {
vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
/*
* Intel CPUs do not generate error codes with bits 31:16 set,
* and more importantly VMX disallows setting bits 31:16 in the
* injected error code for VM-Entry. Drop the bits to mimic
* hardware and avoid inducing failure on nested VM-Entry if L1
* chooses to inject the exception back to L2. AMD CPUs _do_
* generate "full" 32-bit error codes, so KVM allows userspace
* to inject exception error codes with bits 31:16 set.
*/
vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code;
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
@@ -4251,14 +4265,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
nested_vmx_abort(vcpu,
VMX_ABORT_SAVE_GUEST_MSR_FAIL);
}
/*
* Drop what we picked up for L2 via vmx_complete_interrupts. It is
* preserved above and would only end up incorrectly in L1.
*/
vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(vcpu);
}
/*
@@ -4598,6 +4604,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
WARN_ON_ONCE(nested_early_check);
}
/*
* Drop events/exceptions that were queued for re-injection to L2
* (picked up via vmx_complete_interrupts()), as well as exceptions
* that were pending for L2. Note, this must NOT be hoisted above
* prepare_vmcs12(), events/exceptions queued for re-injection need to
* be captured in vmcs12 (see vmcs12_save_pending_event()).
*/
vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
/* Update any VMCS fields that might have changed while L2 ran */

View File

@@ -1676,7 +1676,17 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
kvm_deliver_exception_payload(vcpu);
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
/*
* Despite the error code being architecturally defined as 32
* bits, and the VMCS field being 32 bits, Intel CPUs and thus
* VMX don't actually supporting setting bits 31:16. Hardware
* will (should) never provide a bogus error code, but AMD CPUs
* do generate error codes with bits 31:16 set, and so KVM's
* ABI lets userspace shove in arbitrary 32-bit values. Drop
* the upper bits to avoid VM-Fail, losing information that
* does't really exist is preferable to killing the VM.
*/
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}

View File

@@ -759,6 +759,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
{
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
static const struct trap_info zero = { };
unsigned out;
trace_xen_cpu_load_idt(desc);
@@ -768,7 +769,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
out = xen_convert_trap_info(desc, traps, false);
memset(&traps[out], 0, sizeof(traps[0]));
traps[out] = zero;
xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps))

View File

@@ -950,7 +950,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
u64 bps_limit, unsigned long *wait)
{
bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp;
u64 bytes_allowed, extra_bytes;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
@@ -967,10 +967,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
jiffy_elapsed_rnd = tg->td->throtl_slice;
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
tmp = bps_limit * jiffy_elapsed_rnd;
do_div(tmp, HZ);
bytes_allowed = tmp;
bytes_allowed = mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed_rnd,
(u64)HZ);
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
if (wait)

View File

@@ -841,8 +841,11 @@ int wbt_init(struct request_queue *q)
rwb->last_comp = rwb->last_issue = jiffies;
rwb->win_nsec = RWB_WINDOW_NSEC;
rwb->enable_state = WBT_STATE_ON_DEFAULT;
rwb->wc = 1;
rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
wbt_queue_depth_changed(&rwb->rqos);
/*
* Assign rwb and add the stats callback.
@@ -853,11 +856,6 @@ int wbt_init(struct request_queue *q)
blk_stat_add_callback(q, rwb->cb);
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
wbt_queue_depth_changed(&rwb->rqos);
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
return 0;
err_free:

View File

@@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req)
return -ENOSYS;
}
static int akcipher_default_set_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
return -ENOSYS;
}
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
alg->decrypt = akcipher_default_op;
if (!alg->set_priv_key)
alg->set_priv_key = akcipher_default_set_key;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);

View File

@@ -143,6 +143,23 @@ static const struct attribute_group boot_attr_group = {
static struct kobject *fpdt_kobj;
#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT
#include <linux/processor.h>
static bool fpdt_address_valid(u64 address)
{
/*
* On some systems the table contains invalid addresses
* with unsuppored high address bits set, check for this.
*/
return !(address >> boot_cpu_data.x86_phys_bits);
}
#else
static bool fpdt_address_valid(u64 address)
{
return true;
}
#endif
static int fpdt_process_subtable(u64 address, u32 subtable_type)
{
struct fpdt_subtable_header *subtable_header;
@@ -151,6 +168,11 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
u32 length, offset;
int result;
if (!fpdt_address_valid(address)) {
pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address);
return -EINVAL;
}
subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
if (!subtable_header)
return -ENOMEM;

View File

@@ -496,6 +496,22 @@ static const struct dmi_system_id video_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
},
},
{
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Satellite Z830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"),
},
},
{
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Portege Z830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"),
},
},
/*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme.

View File

@@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
ghes_estatus_cache_add(generic, estatus);
}
if (task_work_pending && current->mm != &init_mm) {
if (task_work_pending && current->mm) {
estatus_node->task_work.func = ghes_kick_task_work;
estatus_node->task_work_cpu = smp_processor_id();
ret = task_work_add(current, &estatus_node->task_work,

View File

@@ -198,7 +198,24 @@ static const struct x86_cpu_id storage_d3_cpu_ids[] = {
{}
};
static const struct dmi_system_id force_storage_d3_dmi[] = {
{
/*
* _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
* but .NVME is needed to get StorageD3Enable node
* https://bugzilla.kernel.org/show_bug.cgi?id=216440
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
}
},
{}
};
bool force_storage_d3(void)
{
return x86_match_cpu(storage_d3_cpu_ids);
const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
}

View File

@@ -451,14 +451,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
}
}
hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
/*
* Too many sub-nodes most likely means having something wrong with
* the firmware.
*/
child_nodes = of_get_child_count(dev->of_node);
if (child_nodes > AHCI_MAX_PORTS) {
rc = -EINVAL;
goto err_out;
}
/*
* If no sub-node was found, we still need to set nports to
* one in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
if (!child_nodes)
if (child_nodes)
hpriv->nports = child_nodes;
else
hpriv->nports = 1;
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);

View File

@@ -698,4 +698,23 @@ void __init init_cpu_topology(void)
else if (of_have_populated_dt() && parse_dt_topology())
reset_cpu_topology();
}
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
if (cpuid_topo->package_id != -1)
goto topology_populated;
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = cpuid;
cpuid_topo->package_id = cpu_to_node(cpuid);
pr_debug("CPU%u: package %d core %d thread %d\n",
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
cpuid_topo->thread_id);
topology_populated:
update_siblings_masks(cpuid);
}
#endif

View File

@@ -1351,10 +1351,12 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
if (ret)
if (ret) {
sock_shutdown(nbd);
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
}
flush_workqueue(nbd->recv_workq);
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */

View File

@@ -2274,15 +2274,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
set_bit(HCI_QUIRK_VALID_LE_STATES,
&hdev->quirks);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
case 0x0b: /* SfP */
case 0x0c: /* WsP */
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
fallthrough;
case 0x0c: /* WsP */
/* Apply the device specific HCI quirks
*
* All Legacy bootloader devices support WBS
@@ -2290,11 +2295,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
/* Valid LE States quirk for JfP/ThP familiy */
if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
set_bit(HCI_QUIRK_VALID_LE_STATES,
&hdev->quirks);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -2361,8 +2361,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
/* Valid LE States quirk for JfP/ThP familiy */
if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
/* Set Valid LE States quirk */
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
/* Setup MSFT Extension support */

View File

@@ -2435,15 +2435,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
/* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
* it needs constantly polling control pipe until the host received the
* WMT event, thus, we should require to specifically acquire PM counter
* on the USB to prevent the interface from entering auto suspended
* while WMT cmd/event in progress.
*/
err = usb_autopm_get_interface(data->intf);
if (err < 0)
goto err_free_wc;
err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
usb_autopm_put_interface(data->intf);
goto err_free_wc;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
usb_autopm_put_interface(data->intf);
if (err < 0)
goto err_free_wc;

View File

@@ -490,6 +490,11 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
if (percpu_init_rwsem(&hu->proto_lock)) {
BT_ERR("Can't allocate semaphore structure");
kfree(hu);
return -ENOMEM;
}
tty->disc_data = hu;
hu->tty = tty;
@@ -502,8 +507,6 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
percpu_init_rwsem(&hu->proto_lock);
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);

View File

@@ -301,11 +301,12 @@ int hci_uart_register_device(struct hci_uart *hu,
serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops);
if (percpu_init_rwsem(&hu->proto_lock))
return -ENOMEM;
err = serdev_device_open(hu->serdev);
if (err)
return err;
percpu_init_rwsem(&hu->proto_lock);
goto err_rwsem;
err = p->open(hu);
if (err)
@@ -378,6 +379,8 @@ err_alloc:
p->close(hu);
err_open:
serdev_device_close(hu->serdev);
err_rwsem:
percpu_free_rwsem(&hu->proto_lock);
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
@@ -399,5 +402,6 @@ void hci_uart_unregister_device(struct hci_uart *hu)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
percpu_free_rwsem(&hu->proto_lock);
}
EXPORT_SYMBOL_GPL(hci_uart_unregister_device);

View File

@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
MAX_BITS_PER_CALL);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
if ((int)res.a0 < 0)
return (int)res.a0;
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return copied;
cond_resched();
break;
default:
return -EIO;
}
}

View File

@@ -270,13 +270,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
goto err;
}
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
if (ret) {
dev_err(rngc->dev, "Can't get interrupt working.\n");
goto err;
}
init_completion(&rngc->rng_op_done);
rngc->rng.name = pdev->name;
@@ -290,6 +283,13 @@ static int imx_rngc_probe(struct platform_device *pdev)
imx_rngc_irq_mask_clear(rngc);
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
if (ret) {
dev_err(rngc->dev, "Can't get interrupt working.\n");
return ret;
}
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {

View File

@@ -34,6 +34,7 @@
#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
#define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
#define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
#define CCU_DIV_RST_DELAY_US 1
@@ -170,6 +171,40 @@ static int ccu_div_gate_is_enabled(struct clk_hw *hw)
return !!(val & CCU_DIV_CTL_EN);
}
static int ccu_div_buf_enable(struct clk_hw *hw)
{
struct ccu_div *div = to_ccu_div(hw);
unsigned long flags;
spin_lock_irqsave(&div->lock, flags);
regmap_update_bits(div->sys_regs, div->reg_ctl,
CCU_DIV_CTL_GATE_REF_BUF, 0);
spin_unlock_irqrestore(&div->lock, flags);
return 0;
}
static void ccu_div_buf_disable(struct clk_hw *hw)
{
struct ccu_div *div = to_ccu_div(hw);
unsigned long flags;
spin_lock_irqsave(&div->lock, flags);
regmap_update_bits(div->sys_regs, div->reg_ctl,
CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
spin_unlock_irqrestore(&div->lock, flags);
}
static int ccu_div_buf_is_enabled(struct clk_hw *hw)
{
struct ccu_div *div = to_ccu_div(hw);
u32 val = 0;
regmap_read(div->sys_regs, div->reg_ctl, &val);
return !(val & CCU_DIV_CTL_GATE_REF_BUF);
}
static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -323,6 +358,7 @@ static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
};
@@ -441,6 +477,9 @@ static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
continue;
}
if (!strcmp("div_buf", name))
continue;
bits[didx] = ccu_div_bits[bidx];
bits[didx].div = div;
@@ -477,6 +516,21 @@ static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
&ccu_div_dbgfs_fixed_clkdiv_fops);
}
static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
{
struct ccu_div *div = to_ccu_div(hw);
struct ccu_div_dbgfs_bit *bit;
bit = kmalloc(sizeof(*bit), GFP_KERNEL);
if (!bit)
return;
*bit = ccu_div_bits[3];
bit->div = div;
debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
&ccu_div_dbgfs_bit_fops);
}
static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
{
struct ccu_div *div = to_ccu_div(hw);
@@ -489,6 +543,7 @@ static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
#define ccu_div_var_debug_init NULL
#define ccu_div_gate_debug_init NULL
#define ccu_div_buf_debug_init NULL
#define ccu_div_fixed_debug_init NULL
#endif /* !CONFIG_DEBUG_FS */
@@ -520,6 +575,13 @@ static const struct clk_ops ccu_div_gate_ops = {
.debug_init = ccu_div_gate_debug_init
};
static const struct clk_ops ccu_div_buf_ops = {
.enable = ccu_div_buf_enable,
.disable = ccu_div_buf_disable,
.is_enabled = ccu_div_buf_is_enabled,
.debug_init = ccu_div_buf_debug_init
};
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
.round_rate = ccu_div_fixed_round_rate,
@@ -566,6 +628,8 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
} else if (div_init->type == CCU_DIV_GATE) {
hw_init.ops = &ccu_div_gate_ops;
div->divider = div_init->divider;
} else if (div_init->type == CCU_DIV_BUF) {
hw_init.ops = &ccu_div_buf_ops;
} else if (div_init->type == CCU_DIV_FIXED) {
hw_init.ops = &ccu_div_fixed_ops;
div->divider = div_init->divider;
@@ -579,6 +643,7 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
goto err_free_div;
}
parent_data.fw_name = div_init->parent_name;
parent_data.name = div_init->parent_name;
hw_init.parent_data = &parent_data;
hw_init.num_parents = 1;

View File

@@ -13,6 +13,14 @@
#include <linux/bits.h>
#include <linux/of.h>
/*
* CCU Divider private clock IDs
* @CCU_SYS_SATA_CLK: CCU SATA internal clock
* @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock
*/
#define CCU_SYS_SATA_CLK -1
#define CCU_SYS_XGMAC_CLK -2
/*
* CCU Divider private flags
* @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1.
@@ -31,11 +39,13 @@
* enum ccu_div_type - CCU Divider types
* @CCU_DIV_VAR: Clocks gate with variable divider.
* @CCU_DIV_GATE: Clocks gate with fixed divider.
* @CCU_DIV_BUF: Clock gate with no divider.
* @CCU_DIV_FIXED: Ungateable clock with fixed divider.
*/
enum ccu_div_type {
CCU_DIV_VAR,
CCU_DIV_GATE,
CCU_DIV_BUF,
CCU_DIV_FIXED
};

View File

@@ -76,6 +76,16 @@
.divider = _divider \
}
#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \
{ \
.id = _id, \
.name = _name, \
.parent_name = _pname, \
.base = _base, \
.type = CCU_DIV_BUF, \
.flags = _flags \
}
#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \
{ \
.id = _id, \
@@ -188,11 +198,14 @@ static const struct ccu_div_rst_map axi_rst_map[] = {
* for the SoC devices registers IO-operations.
*/
static const struct ccu_div_info sys_info[] = {
CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk",
"sata_clk", CCU_SYS_SATA_REF_BASE, 4,
CLK_SET_RATE_GATE,
CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED |
CCU_DIV_RESET_DOMAIN),
CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
"sys_sata_clk", CCU_SYS_SATA_REF_BASE,
CLK_SET_RATE_PARENT),
CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk",
"pcie_clk", CCU_SYS_APB_BASE, 5,
CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
@@ -204,10 +217,12 @@ static const struct ccu_div_info sys_info[] = {
"eth_clk", CCU_SYS_GMAC1_BASE, 5),
CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk",
"eth_clk", 10),
CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
"eth_clk", CCU_SYS_XGMAC_BASE, 8),
CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk",
"eth_clk", CCU_SYS_XGMAC_BASE, 1),
CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
"sys_xgmac_clk", 8),
CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk",
"eth_clk", 10),
"sys_xgmac_clk", 8),
CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk",
"eth_clk", CCU_SYS_USB_BASE, 10),
CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk",
@@ -396,6 +411,9 @@ static int ccu_div_clk_register(struct ccu_div_data *data)
init.base = info->base;
init.sys_regs = data->sys_regs;
init.divider = info->divider;
} else if (init.type == CCU_DIV_BUF) {
init.base = info->base;
init.sys_regs = data->sys_regs;
} else {
init.divider = info->divider;
}

View File

@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -502,6 +503,8 @@ struct bcm2835_clock_data {
bool low_jitter;
u32 tcnt_mux;
bool round_up;
};
struct bcm2835_gate_data {
@@ -967,7 +970,7 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
return div;
}
static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
unsigned long parent_rate,
u32 div)
{
@@ -994,12 +997,34 @@ static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
return temp;
}
static unsigned long bcm2835_round_rate(unsigned long rate)
{
unsigned long scaler;
unsigned long limit;
limit = rate / 100000;
scaler = 1;
while (scaler < limit)
scaler *= 10;
/*
* If increasing a clock by less than 0.1% changes it
* from ..999.. to ..000.., round up.
*/
if ((rate + scaler - 1) / scaler % 1000 == 0)
rate = roundup(rate, scaler);
return rate;
}
static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct bcm2835_cprman *cprman = clock->cprman;
const struct bcm2835_clock_data *data = clock->data;
unsigned long rate;
u32 div;
if (data->int_bits == 0 && data->frac_bits == 0)
@@ -1007,7 +1032,12 @@ static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
div = cprman_read(cprman, data->div_reg);
return bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
rate = bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
if (data->round_up)
rate = bcm2835_round_rate(rate);
return rate;
}
static void bcm2835_clock_wait_busy(struct bcm2835_clock *clock)
@@ -1785,7 +1815,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.load_mask = CM_PLLC_LOADPER,
.hold_mask = CM_PLLC_HOLDPER,
.fixed_divider = 1,
.flags = CLK_SET_RATE_PARENT),
.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
/*
* PLLD is the display PLL, used to drive DSI display panels.
@@ -2144,7 +2174,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.div_reg = CM_UARTDIV,
.int_bits = 10,
.frac_bits = 12,
.tcnt_mux = 28),
.tcnt_mux = 28,
.round_up = true),
/* TV encoder clock. Only operating frequency is 108Mhz. */
[BCM2835_CLOCK_VEC] = REGISTER_PER_CLK(

View File

@@ -500,12 +500,15 @@ static void __init berlin2_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
if (!clk_data)
if (!clk_data) {
of_node_put(parent_np);
return;
}
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
of_node_put(parent_np);
if (!gbase)
return;

View File

@@ -286,19 +286,23 @@ static void __init berlin2q_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
if (!clk_data)
if (!clk_data) {
of_node_put(parent_np);
return;
}
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
if (!gbase) {
of_node_put(parent_np);
pr_err("%pOF: Unable to map global base\n", np);
return;
}
/* BG2Q CPU PLL is not part of global registers */
cpupll_base = of_iomap(parent_np, 1);
of_node_put(parent_np);
if (!cpupll_base) {
pr_err("%pOF: Unable to map cpupll base\n", np);
iounmap(gbase);

View File

@@ -622,7 +622,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
/* P-Bus (BCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0,
hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);

View File

@@ -207,7 +207,7 @@ static const struct of_device_id oxnas_stdclk_dt_ids[] = {
static int oxnas_stdclk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *np = pdev->dev.of_node, *parent_np;
const struct oxnas_stdclk_data *data;
const struct of_device_id *id;
struct regmap *regmap;
@@ -219,7 +219,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
return -ENODEV;
data = id->data;
regmap = syscon_node_to_regmap(of_get_parent(np));
parent_np = of_get_parent(np);
regmap = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
return PTR_ERR(regmap);

View File

@@ -1063,8 +1063,13 @@ static void __init _clockgen_init(struct device_node *np, bool legacy);
*/
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node)
_clockgen_init(of_get_parent(np), true);
if (!clockgen.node) {
struct device_node *parent_np;
parent_np = of_get_parent(np);
_clockgen_init(parent_np, true);
of_node_put(parent_np);
}
}
/* Legacy node */
@@ -1159,6 +1164,7 @@ static struct clk * __init create_sysclk(const char *name)
sysclk = of_get_child_by_name(clockgen.node, "sysclk");
if (sysclk) {
clk = sysclk_from_fixed(sysclk, name);
of_node_put(sysclk);
if (!IS_ERR(clk))
return clk;
}

View File

@@ -1204,7 +1204,7 @@ static const struct vc5_chip_info idt_5p49v6901_info = {
.model = IDT_VC6_5P49V6901,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
.flags = VC5_HAS_PFD_FREQ_DBL,
.flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
};
static const struct vc5_chip_info idt_5p49v6965_info = {

View File

@@ -690,7 +690,11 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
pr_warn("%s: failed to attached the power domain %d\n",
name, ret);
platform_device_add(pdev);
ret = platform_device_add(pdev);
if (ret) {
platform_device_put(pdev);
return ERR_PTR(ret);
}
/* For API backwards compatiblilty, simply return NULL for success */
return NULL;

View File

@@ -19,8 +19,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
};
#define GATE_MFG(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \
&mtk_clk_gate_ops_setclr)
GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \
&mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)

View File

@@ -38,6 +38,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
struct meson_aoclk_reset_controller *rstc;
struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
struct device_node *np;
struct regmap *regmap;
int ret, clkid;
@@ -49,7 +50,9 @@ int meson_aoclkc_probe(struct platform_device *pdev)
if (!rstc)
return -ENOMEM;
regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
np = of_get_parent(dev->of_node);
regmap = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap\n");
return PTR_ERR(regmap);

View File

@@ -18,6 +18,7 @@ int meson_eeclkc_probe(struct platform_device *pdev)
{
const struct meson_eeclkc_data *data;
struct device *dev = &pdev->dev;
struct device_node *np;
struct regmap *map;
int ret, i;
@@ -26,7 +27,9 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return -EINVAL;
/* Get the hhi system controller node */
map = syscon_node_to_regmap(of_get_parent(dev->of_node));
np = of_get_parent(dev->of_node);
map = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev,
"failed to get HHI regmap\n");

View File

@@ -3717,12 +3717,15 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
struct device_node *parent_np;
const char *notifier_clk_name;
struct clk *notifier_clk;
struct regmap *map;
int i, ret;
map = syscon_node_to_regmap(of_get_parent(np));
parent_np = of_get_parent(np);
map = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(map)) {
pr_err("failed to get HHI regmap - Trying obsolete regs\n");
return;

Some files were not shown because too many files have changed in this diff Show More