Merge remote-tracking branch 'stable/linux-5.15.y' into rpi-5.15.y
This commit is contained in:
@@ -188,7 +188,7 @@ Description:
|
||||
Raw capacitance measurement from channel Y. Units after
|
||||
application of scale and offset are nanofarads.
|
||||
|
||||
What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw
|
||||
What: /sys/.../iio:deviceX/in_capacitanceY-capacitanceZ_raw
|
||||
KernelVersion: 3.2
|
||||
Contact: linux-iio@vger.kernel.org
|
||||
Description:
|
||||
|
||||
@@ -68,6 +68,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A55 | #1530923 | ARM64_ERRATUM_1530923 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A55 | #2441007 | ARM64_ERRATUM_2441007 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #852523 | N/A |
|
||||
@@ -76,10 +78,14 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #853709 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
|
||||
|
||||
@@ -274,6 +274,9 @@ or bottom half).
|
||||
This is specifically for the inode itself being marked dirty,
|
||||
not its data. If the update needs to be persisted by fdatasync(),
|
||||
then I_DIRTY_DATASYNC will be set in the flags argument.
|
||||
I_DIRTY_TIME will be set in the flags in case lazytime is enabled
|
||||
and struct inode has times updated since the last ->dirty_inode
|
||||
call.
|
||||
|
||||
``write_inode``
|
||||
this method is called when the VFS needs to write an inode to
|
||||
|
||||
12
Makefile
12
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 74
|
||||
SUBLEVEL = 76
|
||||
EXTRAVERSION =
|
||||
NAME = Trick or Treat
|
||||
|
||||
@@ -844,12 +844,12 @@ endif
|
||||
|
||||
# Initialize all stack variables with a zero value.
|
||||
ifdef CONFIG_INIT_STACK_ALL_ZERO
|
||||
# Future support for zero initialization is still being debated, see
|
||||
# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being
|
||||
# renamed or dropped.
|
||||
KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
|
||||
ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER
|
||||
# https://github.com/llvm/llvm-project/issues/44842
|
||||
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
|
||||
endif
|
||||
endif
|
||||
|
||||
# While VLAs have been removed, GCC produces unreachable stack probes
|
||||
# for the randomize_kstack_offset feature. Disable it for all compilers.
|
||||
@@ -870,7 +870,9 @@ else
|
||||
DEBUG_CFLAGS += -g
|
||||
endif
|
||||
|
||||
ifndef CONFIG_AS_IS_LLVM
|
||||
ifdef CONFIG_AS_IS_LLVM
|
||||
KBUILD_AFLAGS += -g
|
||||
else
|
||||
KBUILD_AFLAGS += -Wa,-gdwarf-2
|
||||
endif
|
||||
|
||||
|
||||
@@ -1741,7 +1741,6 @@ config CMDLINE
|
||||
choice
|
||||
prompt "Kernel command line type" if CMDLINE != ""
|
||||
default CMDLINE_FROM_BOOTLOADER
|
||||
depends on ATAGS
|
||||
|
||||
config CMDLINE_FROM_BOOTLOADER
|
||||
bool "Use bootloader kernel arguments if available"
|
||||
|
||||
@@ -23,6 +23,7 @@ SECTIONS
|
||||
*(.ARM.extab*)
|
||||
*(.note.*)
|
||||
*(.rel.*)
|
||||
*(.printk_index)
|
||||
/*
|
||||
* Discard any r/w data - this produces a link error if we have any,
|
||||
* which is required for PIC decompression. Local data generates
|
||||
@@ -57,6 +58,7 @@ SECTIONS
|
||||
*(.rodata)
|
||||
*(.rodata.*)
|
||||
*(.data.rel.ro)
|
||||
*(.data.rel.ro.*)
|
||||
}
|
||||
.piggydata : {
|
||||
*(.piggydata)
|
||||
|
||||
@@ -471,7 +471,7 @@
|
||||
marvell,function = "spi0";
|
||||
};
|
||||
|
||||
spi0cs1_pins: spi0cs1-pins {
|
||||
spi0cs2_pins: spi0cs2-pins {
|
||||
marvell,pins = "mpp26";
|
||||
marvell,function = "spi0";
|
||||
};
|
||||
@@ -506,7 +506,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
/* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */
|
||||
/* MISO, MOSI, SCLK and CS2 are routed to pin header CN11 */
|
||||
};
|
||||
|
||||
&uart0 {
|
||||
|
||||
@@ -585,7 +585,7 @@
|
||||
clocks = <&camera 1>;
|
||||
clock-names = "extclk";
|
||||
samsung,camclk-out = <1>;
|
||||
gpios = <&gpm1 6 GPIO_ACTIVE_HIGH>;
|
||||
gpios = <&gpm1 6 GPIO_ACTIVE_LOW>;
|
||||
|
||||
port {
|
||||
is_s5k6a3_ep: endpoint {
|
||||
|
||||
@@ -95,7 +95,7 @@
|
||||
};
|
||||
|
||||
&ehci {
|
||||
samsung,vbus-gpio = <&gpx3 5 1>;
|
||||
samsung,vbus-gpio = <&gpx3 5 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
phys = <&exynos_usbphy 2>, <&exynos_usbphy 3>;
|
||||
phy-names = "hsic0", "hsic1";
|
||||
|
||||
@@ -84,6 +84,9 @@
|
||||
ocram: sram@900000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x00900000 0x20000>;
|
||||
ranges = <0 0x00900000 0x20000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
clocks = <&clks IMX6QDL_CLK_OCRAM>;
|
||||
};
|
||||
|
||||
|
||||
@@ -163,6 +163,9 @@
|
||||
ocram: sram@900000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x00900000 0x40000>;
|
||||
ranges = <0 0x00900000 0x40000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
clocks = <&clks IMX6QDL_CLK_OCRAM>;
|
||||
};
|
||||
|
||||
|
||||
@@ -263,6 +263,10 @@
|
||||
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
&hdmi {
|
||||
ddc-i2c-bus = <&i2c2>;
|
||||
};
|
||||
|
||||
&i2c_intern {
|
||||
pmic@8 {
|
||||
compatible = "fsl,pfuze100";
|
||||
@@ -387,7 +391,7 @@
|
||||
|
||||
/* HDMI_CTRL */
|
||||
&i2c2 {
|
||||
clock-frequency = <375000>;
|
||||
clock-frequency = <100000>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_i2c2>;
|
||||
};
|
||||
|
||||
@@ -9,12 +9,18 @@
|
||||
ocram2: sram@940000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x00940000 0x20000>;
|
||||
ranges = <0 0x00940000 0x20000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
clocks = <&clks IMX6QDL_CLK_OCRAM>;
|
||||
};
|
||||
|
||||
ocram3: sram@960000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x00960000 0x20000>;
|
||||
ranges = <0 0x00960000 0x20000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
clocks = <&clks IMX6QDL_CLK_OCRAM>;
|
||||
};
|
||||
|
||||
|
||||
@@ -117,6 +117,9 @@
|
||||
ocram: sram@900000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x00900000 0x20000>;
|
||||
ranges = <0 0x00900000 0x20000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
clocks = <&clks IMX6SL_CLK_OCRAM>;
|
||||
};
|
||||
|
||||
|
||||
@@ -117,6 +117,9 @@
|
||||
ocram: sram@900000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x00900000 0x20000>;
|
||||
ranges = <0 0x00900000 0x20000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
};
|
||||
|
||||
intc: interrupt-controller@a01000 {
|
||||
|
||||
@@ -164,12 +164,18 @@
|
||||
ocram_s: sram@8f8000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x008f8000 0x4000>;
|
||||
ranges = <0 0x008f8000 0x4000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
clocks = <&clks IMX6SX_CLK_OCRAM_S>;
|
||||
};
|
||||
|
||||
ocram: sram@900000 {
|
||||
compatible = "mmio-sram";
|
||||
reg = <0x00900000 0x20000>;
|
||||
ranges = <0 0x00900000 0x20000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
clocks = <&clks IMX6SX_CLK_OCRAM>;
|
||||
};
|
||||
|
||||
|
||||
@@ -206,12 +206,7 @@
|
||||
interrupt-parent = <&gpio2>;
|
||||
interrupts = <29 0>;
|
||||
pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
|
||||
ti,x-min = /bits/ 16 <0>;
|
||||
ti,x-max = /bits/ 16 <0>;
|
||||
ti,y-min = /bits/ 16 <0>;
|
||||
ti,y-max = /bits/ 16 <0>;
|
||||
ti,pressure-max = /bits/ 16 <0>;
|
||||
ti,x-plate-ohms = /bits/ 16 <400>;
|
||||
touchscreen-max-pressure = <255>;
|
||||
wakeup-source;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -10,6 +10,11 @@
|
||||
|
||||
ocp@f1000000 {
|
||||
pinctrl: pin-controller@10000 {
|
||||
/* Non-default UART pins */
|
||||
pmx_uart0: pmx-uart0 {
|
||||
marvell,pins = "mpp4", "mpp5";
|
||||
};
|
||||
|
||||
pmx_power_hdd: pmx-power-hdd {
|
||||
marvell,pins = "mpp10";
|
||||
marvell,function = "gpo";
|
||||
@@ -213,22 +218,11 @@
|
||||
&mdio {
|
||||
status = "okay";
|
||||
|
||||
ethphy0: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
};
|
||||
|
||||
ethphy1: ethernet-phy@8 {
|
||||
reg = <8>;
|
||||
};
|
||||
};
|
||||
|
||||
ð0 {
|
||||
status = "okay";
|
||||
ethernet0-port@0 {
|
||||
phy-handle = <ðphy0>;
|
||||
};
|
||||
};
|
||||
|
||||
ð1 {
|
||||
status = "okay";
|
||||
ethernet1-port@0 {
|
||||
|
||||
@@ -346,7 +346,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
addr = start + i * PMD_SIZE;
|
||||
domain = get_domain_name(pmd);
|
||||
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
|
||||
note_page(st, addr, 3, pmd_val(*pmd), domain);
|
||||
note_page(st, addr, 4, pmd_val(*pmd), domain);
|
||||
else
|
||||
walk_pte(st, pmd, addr, domain);
|
||||
|
||||
|
||||
@@ -264,12 +264,17 @@ void __init kasan_init(void)
|
||||
|
||||
/*
|
||||
* 1. The module global variables are in MODULES_VADDR ~ MODULES_END,
|
||||
* so we need to map this area.
|
||||
* so we need to map this area if CONFIG_KASAN_VMALLOC=n. With
|
||||
* VMALLOC support KASAN will manage this region dynamically,
|
||||
* refer to kasan_populate_vmalloc() and ARM's implementation of
|
||||
* module_alloc().
|
||||
* 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
|
||||
* ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't
|
||||
* use kasan_populate_zero_shadow.
|
||||
*/
|
||||
create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE));
|
||||
if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES))
|
||||
create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END));
|
||||
create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE));
|
||||
|
||||
/*
|
||||
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so
|
||||
|
||||
@@ -300,7 +300,11 @@ static struct mem_type mem_types[] __ro_after_init = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_XN | L_PTE_RDONLY,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
.prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2,
|
||||
#else
|
||||
.prot_sect = PMD_TYPE_SECT,
|
||||
#endif
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_ROM] = {
|
||||
|
||||
@@ -487,6 +487,22 @@ config ARM64_ERRATUM_834220
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1742098
|
||||
bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
|
||||
depends on COMPAT
|
||||
default y
|
||||
help
|
||||
This option removes the AES hwcap for aarch32 user-space to
|
||||
workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
|
||||
|
||||
Affected parts may corrupt the AES state if an interrupt is
|
||||
taken between a pair of AES instructions. These instructions
|
||||
are only present if the cryptography extensions are present.
|
||||
All software should have a fallback implementation for CPUs
|
||||
that don't implement the cryptography extensions.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_845719
|
||||
bool "Cortex-A53: 845719: a load might read incorrect data"
|
||||
depends on COMPAT
|
||||
@@ -596,6 +612,23 @@ config ARM64_ERRATUM_1530923
|
||||
config ARM64_WORKAROUND_REPEAT_TLBI
|
||||
bool
|
||||
|
||||
config ARM64_ERRATUM_2441007
|
||||
bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
|
||||
default y
|
||||
select ARM64_WORKAROUND_REPEAT_TLBI
|
||||
help
|
||||
This option adds a workaround for ARM Cortex-A55 erratum #2441007.
|
||||
|
||||
Under very rare circumstances, affected Cortex-A55 CPUs
|
||||
may not handle a race between a break-before-make sequence on one
|
||||
CPU, and another CPU accessing the same page. This could allow a
|
||||
store to a page that has been unmapped.
|
||||
|
||||
Work around this by adding the affected CPUs to the list that needs
|
||||
TLB sequences to be done twice.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1286807
|
||||
bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
|
||||
default y
|
||||
|
||||
@@ -912,7 +912,7 @@
|
||||
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&usb3_phy0>, <&usb3_phy0>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
snps,dis-u2-freeclk-exists-quirk;
|
||||
snps,gfladj-refclk-lpm-sel-quirk;
|
||||
};
|
||||
|
||||
};
|
||||
@@ -953,7 +953,7 @@
|
||||
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&usb3_phy1>, <&usb3_phy1>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
snps,dis-u2-freeclk-exists-quirk;
|
||||
snps,gfladj-refclk-lpm-sel-quirk;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -967,6 +967,7 @@
|
||||
interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_gauge>;
|
||||
power-supplies = <&bq25895>;
|
||||
maxim,over-heat-temp = <700>;
|
||||
maxim,over-volt = <4500>;
|
||||
maxim,rsns-microohm = <5000>;
|
||||
|
||||
@@ -131,12 +131,6 @@
|
||||
>;
|
||||
};
|
||||
|
||||
main_usbss0_pins_default: main-usbss0-pins-default {
|
||||
pinctrl-single,pins = <
|
||||
J721E_IOPAD(0x120, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */
|
||||
>;
|
||||
};
|
||||
|
||||
vdd_sd_dv_pins_default: vdd-sd-dv-pins-default {
|
||||
pinctrl-single,pins = <
|
||||
J721E_IOPAD(0xd0, PIN_OUTPUT, 7) /* (T5) SPI0_D1.GPIO0_55 */
|
||||
@@ -144,6 +138,14 @@
|
||||
};
|
||||
};
|
||||
|
||||
&main_pmx1 {
|
||||
main_usbss0_pins_default: main-usbss0-pins-default {
|
||||
pinctrl-single,pins = <
|
||||
J721E_IOPAD(0x04, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&wkup_uart0 {
|
||||
/* Wakeup UART is used by System firmware */
|
||||
status = "reserved";
|
||||
|
||||
@@ -295,7 +295,16 @@
|
||||
main_pmx0: pinctrl@11c000 {
|
||||
compatible = "pinctrl-single";
|
||||
/* Proxy 0 addressing */
|
||||
reg = <0x00 0x11c000 0x00 0x2b4>;
|
||||
reg = <0x00 0x11c000 0x00 0x10c>;
|
||||
#pinctrl-cells = <1>;
|
||||
pinctrl-single,register-width = <32>;
|
||||
pinctrl-single,function-mask = <0xffffffff>;
|
||||
};
|
||||
|
||||
main_pmx1: pinctrl@11c11c {
|
||||
compatible = "pinctrl-single";
|
||||
/* Proxy 0 addressing */
|
||||
reg = <0x00 0x11c11c 0x00 0xc>;
|
||||
#pinctrl-cells = <1>;
|
||||
pinctrl-single,register-width = <32>;
|
||||
pinctrl-single,function-mask = <0xffffffff>;
|
||||
|
||||
@@ -40,7 +40,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte);
|
||||
void mte_copy_page_tags(void *kto, const void *kfrom);
|
||||
void mte_thread_init_user(void);
|
||||
void mte_thread_switch(struct task_struct *next);
|
||||
void mte_cpu_setup(void);
|
||||
void mte_suspend_enter(void);
|
||||
void mte_suspend_exit(void);
|
||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
|
||||
long get_mte_ctrl(struct task_struct *task);
|
||||
int mte_ptrace_copy_tags(struct task_struct *child, long request,
|
||||
@@ -69,6 +71,9 @@ static inline void mte_thread_switch(struct task_struct *next)
|
||||
static inline void mte_suspend_enter(void)
|
||||
{
|
||||
}
|
||||
static inline void mte_suspend_exit(void)
|
||||
{
|
||||
}
|
||||
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
{
|
||||
return 0;
|
||||
|
||||
@@ -273,6 +273,8 @@
|
||||
#define TCR_NFD1 (UL(1) << 54)
|
||||
#define TCR_E0PD0 (UL(1) << 55)
|
||||
#define TCR_E0PD1 (UL(1) << 56)
|
||||
#define TCR_TCMA0 (UL(1) << 57)
|
||||
#define TCR_TCMA1 (UL(1) << 58)
|
||||
|
||||
/*
|
||||
* TTBR.
|
||||
|
||||
@@ -1094,10 +1094,6 @@
|
||||
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
|
||||
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
|
||||
|
||||
/* TCR EL1 Bit Definitions */
|
||||
#define SYS_TCR_EL1_TCMA1 (BIT(58))
|
||||
#define SYS_TCR_EL1_TCMA0 (BIT(57))
|
||||
|
||||
/* GCR_EL1 Definitions */
|
||||
#define SYS_GCR_EL1_RRND (BIT(16))
|
||||
#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
|
||||
|
||||
@@ -214,6 +214,11 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
||||
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2441007
|
||||
{
|
||||
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2441009
|
||||
{
|
||||
/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
|
||||
@@ -350,6 +355,14 @@ static const struct midr_range erratum_1463225[] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
static struct midr_range broken_aarch32_aes[] = {
|
||||
MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
@@ -559,6 +572,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
/* Cortex-A510 r0p0-r1p1 */
|
||||
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
{
|
||||
.desc = "ARM erratum 1742098",
|
||||
.capability = ARM64_WORKAROUND_1742098,
|
||||
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/mmu_context.h>
|
||||
@@ -1902,7 +1903,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
|
||||
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
|
||||
isb();
|
||||
|
||||
mte_cpu_setup();
|
||||
|
||||
/*
|
||||
* Clear the tags in the zero page. This needs to be done via the
|
||||
@@ -1915,6 +1917,14 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||
}
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
static void elf_hwcap_fixup(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1742098
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
|
||||
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
|
||||
#endif /* ARM64_ERRATUM_1742098 */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
@@ -2942,8 +2952,10 @@ void __init setup_cpu_features(void)
|
||||
setup_system_capabilities();
|
||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||
|
||||
if (system_supports_32bit_el0())
|
||||
if (system_supports_32bit_el0()) {
|
||||
setup_elf_hwcaps(compat_elf_hwcaps);
|
||||
elf_hwcap_fixup();
|
||||
}
|
||||
|
||||
if (system_uses_ttbr0_pan())
|
||||
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
|
||||
@@ -2995,6 +3007,7 @@ static int enable_mismatched_32bit_el0(unsigned int cpu)
|
||||
cpu_active_mask);
|
||||
get_cpu_device(lucky_winner)->offline_disabled = true;
|
||||
setup_elf_hwcaps(compat_elf_hwcaps);
|
||||
elf_hwcap_fixup();
|
||||
pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
|
||||
cpu, lucky_winner);
|
||||
return 0;
|
||||
|
||||
@@ -217,11 +217,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long pc = rec->ip;
|
||||
u32 old = 0, new;
|
||||
|
||||
new = aarch64_insn_gen_nop();
|
||||
|
||||
/*
|
||||
* When using mcount, callsites in modules may have been initalized to
|
||||
* call an arbitrary module PLT (which redirects to the _mcount stub)
|
||||
* rather than the ftrace PLT we'll use at runtime (which redirects to
|
||||
* the ftrace trampoline). We can ignore the old PLT when initializing
|
||||
* the callsite.
|
||||
*
|
||||
* Note: 'mod' is only set at module load time.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
|
||||
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
|
||||
return aarch64_insn_patch_text_nosync((void *)pc, new);
|
||||
}
|
||||
|
||||
if (!ftrace_find_callable_addr(rec, mod, &addr))
|
||||
return -EINVAL;
|
||||
|
||||
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
|
||||
new = aarch64_insn_gen_nop();
|
||||
|
||||
return ftrace_modify_code(pc, old, new, true);
|
||||
}
|
||||
|
||||
@@ -213,6 +213,49 @@ void mte_thread_switch(struct task_struct *next)
|
||||
mte_check_tfsr_el1();
|
||||
}
|
||||
|
||||
void mte_cpu_setup(void)
|
||||
{
|
||||
u64 rgsr;
|
||||
|
||||
/*
|
||||
* CnP must be enabled only after the MAIR_EL1 register has been set
|
||||
* up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
|
||||
* lead to the wrong memory type being used for a brief window during
|
||||
* CPU power-up.
|
||||
*
|
||||
* CnP is not a boot feature so MTE gets enabled before CnP, but let's
|
||||
* make sure that is the case.
|
||||
*/
|
||||
BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
|
||||
BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
|
||||
|
||||
/* Normal Tagged memory type at the corresponding MAIR index */
|
||||
sysreg_clear_set(mair_el1,
|
||||
MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
|
||||
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
|
||||
MT_NORMAL_TAGGED));
|
||||
|
||||
write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
|
||||
|
||||
/*
|
||||
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
|
||||
* RGSR_EL1.SEED must be non-zero for IRG to produce
|
||||
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
|
||||
* must initialize it.
|
||||
*/
|
||||
rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
|
||||
SYS_RGSR_EL1_SEED_SHIFT;
|
||||
if (rgsr == 0)
|
||||
rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
|
||||
write_sysreg_s(rgsr, SYS_RGSR_EL1);
|
||||
|
||||
/* clear any pending tag check faults in TFSR*_EL1 */
|
||||
write_sysreg_s(0, SYS_TFSR_EL1);
|
||||
write_sysreg_s(0, SYS_TFSRE0_EL1);
|
||||
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
void mte_suspend_enter(void)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
@@ -229,6 +272,14 @@ void mte_suspend_enter(void)
|
||||
mte_check_tfsr_el1();
|
||||
}
|
||||
|
||||
void mte_suspend_exit(void)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
mte_cpu_setup();
|
||||
}
|
||||
|
||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
{
|
||||
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
|
||||
|
||||
@@ -43,6 +43,8 @@ void notrace __cpu_suspend_exit(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
mte_suspend_exit();
|
||||
|
||||
/*
|
||||
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
||||
* We must uninstall the idmap and restore the expected MMU
|
||||
|
||||
@@ -22,46 +22,6 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
void store_cpu_topology(unsigned int cpuid)
|
||||
{
|
||||
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
|
||||
u64 mpidr;
|
||||
|
||||
if (cpuid_topo->package_id != -1)
|
||||
goto topology_populated;
|
||||
|
||||
mpidr = read_cpuid_mpidr();
|
||||
|
||||
/* Uniprocessor systems can rely on default topology values */
|
||||
if (mpidr & MPIDR_UP_BITMASK)
|
||||
return;
|
||||
|
||||
/*
|
||||
* This would be the place to create cpu topology based on MPIDR.
|
||||
*
|
||||
* However, it cannot be trusted to depict the actual topology; some
|
||||
* pieces of the architecture enforce an artificial cap on Aff0 values
|
||||
* (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
|
||||
* artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
|
||||
* having absolutely no relationship to the actual underlying system
|
||||
* topology, and cannot be reasonably used as core / package ID.
|
||||
*
|
||||
* If the MT bit is set, Aff0 *could* be used to define a thread ID, but
|
||||
* we still wouldn't be able to obtain a sane core ID. This means we
|
||||
* need to entirely ignore MPIDR for any topology deduction.
|
||||
*/
|
||||
cpuid_topo->thread_id = -1;
|
||||
cpuid_topo->core_id = cpuid;
|
||||
cpuid_topo->package_id = cpu_to_node(cpuid);
|
||||
|
||||
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
|
||||
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
|
||||
cpuid_topo->thread_id, mpidr);
|
||||
|
||||
topology_populated:
|
||||
update_siblings_masks(cpuid);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool __init acpi_cpu_is_threaded(int cpu)
|
||||
{
|
||||
|
||||
@@ -2096,7 +2096,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
|
||||
|
||||
memset(entry, 0, esz);
|
||||
|
||||
while (len > 0) {
|
||||
while (true) {
|
||||
int next_offset;
|
||||
size_t byte_offset;
|
||||
|
||||
@@ -2109,6 +2109,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
|
||||
return next_offset;
|
||||
|
||||
byte_offset = next_offset * esz;
|
||||
if (byte_offset >= len)
|
||||
break;
|
||||
|
||||
id += next_offset;
|
||||
gpa += byte_offset;
|
||||
len -= byte_offset;
|
||||
|
||||
@@ -46,18 +46,20 @@
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
|
||||
#else
|
||||
#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
|
||||
#elif defined(CONFIG_ARM64_MTE)
|
||||
/*
|
||||
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
|
||||
* TBI being enabled at EL1.
|
||||
*/
|
||||
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
|
||||
#else
|
||||
#define TCR_MTE_FLAGS 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
|
||||
* changed during __cpu_setup to Normal Tagged if the system supports MTE.
|
||||
* changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
|
||||
*/
|
||||
#define MAIR_EL1_SET \
|
||||
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
|
||||
@@ -421,46 +423,8 @@ SYM_FUNC_START(__cpu_setup)
|
||||
mov_q mair, MAIR_EL1_SET
|
||||
mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
||||
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
|
||||
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
|
||||
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
/*
|
||||
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
|
||||
* (ID_AA64PFR1_EL1[11:8] > 1).
|
||||
*/
|
||||
mrs x10, ID_AA64PFR1_EL1
|
||||
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
|
||||
cmp x10, #ID_AA64PFR1_MTE
|
||||
b.lt 1f
|
||||
|
||||
/* Normal Tagged memory type at the corresponding MAIR index */
|
||||
mov x10, #MAIR_ATTR_NORMAL_TAGGED
|
||||
bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
|
||||
|
||||
mov x10, #KERNEL_GCR_EL1
|
||||
msr_s SYS_GCR_EL1, x10
|
||||
|
||||
/*
|
||||
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
|
||||
* RGSR_EL1.SEED must be non-zero for IRG to produce
|
||||
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
|
||||
* must initialize it.
|
||||
*/
|
||||
mrs x10, CNTVCT_EL0
|
||||
ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
|
||||
csinc x10, x10, xzr, ne
|
||||
lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
|
||||
msr_s SYS_RGSR_EL1, x10
|
||||
|
||||
/* clear any pending tag check faults in TFSR*_EL1 */
|
||||
msr_s SYS_TFSR_EL1, xzr
|
||||
msr_s SYS_TFSRE0_EL1, xzr
|
||||
|
||||
/* set the TCR_EL1 bits */
|
||||
mov_q x10, TCR_MTE_FLAGS
|
||||
orr tcr, tcr, x10
|
||||
1:
|
||||
#endif
|
||||
tcr_clear_errata_bits tcr, x9, x5
|
||||
|
||||
#ifdef CONFIG_ARM64_VA_BITS_52
|
||||
|
||||
@@ -54,6 +54,7 @@ WORKAROUND_1418040
|
||||
WORKAROUND_1463225
|
||||
WORKAROUND_1508412
|
||||
WORKAROUND_1542419
|
||||
WORKAROUND_1742098
|
||||
WORKAROUND_2457168
|
||||
WORKAROUND_CAVIUM_23154
|
||||
WORKAROUND_CAVIUM_27456
|
||||
|
||||
@@ -75,5 +75,6 @@ int memory_add_physaddr_to_nid(u64 addr)
|
||||
return 0;
|
||||
return nid;
|
||||
}
|
||||
EXPORT_SYMBOL(memory_add_physaddr_to_nid);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -86,7 +86,7 @@ static __init void prom_init_mem(void)
|
||||
pr_debug("Assume 128MB RAM\n");
|
||||
break;
|
||||
}
|
||||
if (!memcmp(prom_init, prom_init + mem, 32))
|
||||
if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32))
|
||||
break;
|
||||
}
|
||||
lowmem = mem;
|
||||
@@ -159,7 +159,7 @@ void __init bcm47xx_prom_highmem_init(void)
|
||||
|
||||
off = EXTVBASE + __pa(off);
|
||||
for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) {
|
||||
if (!memcmp(prom_init, (void *)(off + extmem), 16))
|
||||
if (!memcmp((void *)prom_init, (void *)(off + extmem), 16))
|
||||
break;
|
||||
}
|
||||
extmem -= lowmem;
|
||||
|
||||
@@ -27,15 +27,18 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
|
||||
{
|
||||
struct xtalk_bridge_platform_data *bd;
|
||||
struct sgi_w1_platform_data *wd;
|
||||
struct platform_device *pdev;
|
||||
struct platform_device *pdev_wd;
|
||||
struct platform_device *pdev_bd;
|
||||
struct resource w1_res;
|
||||
unsigned long offset;
|
||||
|
||||
offset = NODE_OFFSET(nasid);
|
||||
|
||||
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
|
||||
if (!wd)
|
||||
goto no_mem;
|
||||
if (!wd) {
|
||||
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx",
|
||||
offset + (widget << SWIN_SIZE_BITS));
|
||||
@@ -46,22 +49,35 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
|
||||
w1_res.end = w1_res.start + 3;
|
||||
w1_res.flags = IORESOURCE_MEM;
|
||||
|
||||
pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
|
||||
if (!pdev) {
|
||||
kfree(wd);
|
||||
goto no_mem;
|
||||
pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO);
|
||||
if (!pdev_wd) {
|
||||
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
|
||||
goto err_kfree_wd;
|
||||
}
|
||||
platform_device_add_resources(pdev, &w1_res, 1);
|
||||
platform_device_add_data(pdev, wd, sizeof(*wd));
|
||||
platform_device_add(pdev);
|
||||
if (platform_device_add_resources(pdev_wd, &w1_res, 1)) {
|
||||
pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget);
|
||||
goto err_put_pdev_wd;
|
||||
}
|
||||
if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) {
|
||||
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
|
||||
goto err_put_pdev_wd;
|
||||
}
|
||||
if (platform_device_add(pdev_wd)) {
|
||||
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
|
||||
goto err_put_pdev_wd;
|
||||
}
|
||||
/* platform_device_add_data() duplicates the data */
|
||||
kfree(wd);
|
||||
|
||||
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
|
||||
if (!bd)
|
||||
goto no_mem;
|
||||
pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
|
||||
if (!pdev) {
|
||||
kfree(bd);
|
||||
goto no_mem;
|
||||
if (!bd) {
|
||||
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
|
||||
goto err_unregister_pdev_wd;
|
||||
}
|
||||
pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
|
||||
if (!pdev_bd) {
|
||||
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
|
||||
goto err_kfree_bd;
|
||||
}
|
||||
|
||||
|
||||
@@ -82,13 +98,31 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
|
||||
bd->io.flags = IORESOURCE_IO;
|
||||
bd->io_offset = offset;
|
||||
|
||||
platform_device_add_data(pdev, bd, sizeof(*bd));
|
||||
platform_device_add(pdev);
|
||||
if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) {
|
||||
pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget);
|
||||
goto err_put_pdev_bd;
|
||||
}
|
||||
if (platform_device_add(pdev_bd)) {
|
||||
pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget);
|
||||
goto err_put_pdev_bd;
|
||||
}
|
||||
/* platform_device_add_data() duplicates the data */
|
||||
kfree(bd);
|
||||
pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
|
||||
return;
|
||||
|
||||
no_mem:
|
||||
pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
|
||||
err_put_pdev_bd:
|
||||
platform_device_put(pdev_bd);
|
||||
err_kfree_bd:
|
||||
kfree(bd);
|
||||
err_unregister_pdev_wd:
|
||||
platform_device_unregister(pdev_wd);
|
||||
return;
|
||||
err_put_pdev_wd:
|
||||
platform_device_put(pdev_wd);
|
||||
err_kfree_wd:
|
||||
kfree(wd);
|
||||
return;
|
||||
}
|
||||
|
||||
static int probe_one_port(nasid_t nasid, int widget, int masterwid)
|
||||
|
||||
@@ -154,7 +154,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
|
||||
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8)
|
||||
else
|
||||
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5))
|
||||
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4)
|
||||
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
|
||||
endif
|
||||
else ifdef CONFIG_PPC_BOOK3E_64
|
||||
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
|
||||
|
||||
@@ -34,6 +34,7 @@ endif
|
||||
|
||||
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
|
||||
$(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
|
||||
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
|
||||
$(LINUXINCLUDE)
|
||||
|
||||
|
||||
51
arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi
Normal file
51
arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* e500v1 Power ISA Device Tree Source (include)
|
||||
*
|
||||
* Copyright 2012 Freescale Semiconductor Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/ {
|
||||
cpus {
|
||||
power-isa-version = "2.03";
|
||||
power-isa-b; // Base
|
||||
power-isa-e; // Embedded
|
||||
power-isa-atb; // Alternate Time Base
|
||||
power-isa-cs; // Cache Specification
|
||||
power-isa-e.le; // Embedded.Little-Endian
|
||||
power-isa-e.pm; // Embedded.Performance Monitor
|
||||
power-isa-ecl; // Embedded Cache Locking
|
||||
power-isa-mmc; // Memory Coherence
|
||||
power-isa-sp; // Signal Processing Engine
|
||||
power-isa-sp.fs; // SPE.Embedded Float Scalar Single
|
||||
power-isa-sp.fv; // SPE.Embedded Float Vector
|
||||
mmu-type = "power-embedded";
|
||||
};
|
||||
};
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/include/ "e500v2_power_isa.dtsi"
|
||||
/include/ "e500v1_power_isa.dtsi"
|
||||
|
||||
/ {
|
||||
model = "MPC8540ADS";
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/include/ "e500v2_power_isa.dtsi"
|
||||
/include/ "e500v1_power_isa.dtsi"
|
||||
|
||||
/ {
|
||||
model = "MPC8541CDS";
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/include/ "e500v2_power_isa.dtsi"
|
||||
/include/ "e500v1_power_isa.dtsi"
|
||||
|
||||
/ {
|
||||
model = "MPC8555CDS";
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/include/ "e500v2_power_isa.dtsi"
|
||||
/include/ "e500v1_power_isa.dtsi"
|
||||
|
||||
/ {
|
||||
model = "MPC8560ADS";
|
||||
|
||||
@@ -41,6 +41,7 @@ CONFIG_DTL=y
|
||||
CONFIG_SCANLOG=m
|
||||
CONFIG_PPC_SMLPAR=y
|
||||
CONFIG_IBMEBUS=y
|
||||
CONFIG_LIBNVDIMM=m
|
||||
CONFIG_PAPR_SCM=m
|
||||
CONFIG_PPC_SVM=y
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
|
||||
@@ -8,6 +8,18 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
/*
|
||||
* long long munging:
|
||||
* The 32 bit ABI passes long longs in an odd even register pair.
|
||||
* High and low parts are swapped depending on endian mode,
|
||||
* so define a macro (similar to mips linux32) to handle that.
|
||||
*/
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
#define merge_64(low, high) (((u64)high << 32) | low)
|
||||
#else
|
||||
#define merge_64(high, low) (((u64)high << 32) | low)
|
||||
#endif
|
||||
|
||||
struct rtas_args;
|
||||
|
||||
asmlinkage long sys_mmap(unsigned long addr, size_t len,
|
||||
|
||||
@@ -140,7 +140,13 @@ int arch_prepare_kprobe(struct kprobe *p)
|
||||
preempt_disable();
|
||||
prev = get_kprobe(p->addr - 1);
|
||||
preempt_enable_no_resched();
|
||||
if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
|
||||
|
||||
/*
|
||||
* When prev is a ftrace-based kprobe, we don't have an insn, and it
|
||||
* doesn't probe for prefixed instruction.
|
||||
*/
|
||||
if (prev && !kprobe_ftrace(prev) &&
|
||||
ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
|
||||
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -330,6 +330,7 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
|
||||
INIT_LIST_HEAD(&pdn->list);
|
||||
parent = of_get_parent(dn);
|
||||
pdn->parent = parent ? PCI_DN(parent) : NULL;
|
||||
of_node_put(parent);
|
||||
if (pdn->parent)
|
||||
list_add_tail(&pdn->list, &pdn->parent->child_list);
|
||||
|
||||
|
||||
@@ -56,18 +56,6 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
|
||||
return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
|
||||
}
|
||||
|
||||
/*
|
||||
* long long munging:
|
||||
* The 32 bit ABI passes long longs in an odd even register pair.
|
||||
* High and low parts are swapped depending on endian mode,
|
||||
* so define a macro (similar to mips linux32) to handle that.
|
||||
*/
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
#define merge_64(low, high) ((u64)high << 32) | low
|
||||
#else
|
||||
#define merge_64(high, low) ((u64)high << 32) | low
|
||||
#endif
|
||||
|
||||
compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
|
||||
u32 reg6, u32 pos1, u32 pos2)
|
||||
{
|
||||
@@ -94,7 +82,7 @@ asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
|
||||
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2,
|
||||
u32 len1, u32 len2)
|
||||
{
|
||||
return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2,
|
||||
return ksys_fallocate(fd, mode, merge_64(offset1, offset2),
|
||||
merge_64(len1, len2));
|
||||
}
|
||||
|
||||
|
||||
@@ -99,8 +99,8 @@ long ppc64_personality(unsigned long personality)
|
||||
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
|
||||
u32 len_high, u32 len_low)
|
||||
{
|
||||
return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low,
|
||||
(u64)len_high << 32 | len_low, advice);
|
||||
return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low),
|
||||
merge_64(len_high, len_low), advice);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(switch_endian)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/reg.h>
|
||||
|
||||
@@ -892,6 +892,7 @@ static void opal_export_attrs(void)
|
||||
kobj = kobject_create_and_add("exports", opal_kobj);
|
||||
if (!kobj) {
|
||||
pr_warn("kobject_create_and_add() of exports failed\n");
|
||||
of_node_put(np);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -324,7 +324,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
|
||||
* So no unpacking needs to be done.
|
||||
*/
|
||||
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain,
|
||||
VPHN_FLAG_VCPU, smp_processor_id());
|
||||
VPHN_FLAG_VCPU, hard_smp_processor_id());
|
||||
if (rc != H_SUCCESS) {
|
||||
pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc);
|
||||
goto out;
|
||||
|
||||
@@ -211,8 +211,10 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
||||
dev_err(&pdev->dev,
|
||||
"node %pOF has an invalid fsl,msi phandle %u\n",
|
||||
hose->dn, np->phandle);
|
||||
of_node_put(np);
|
||||
return -EINVAL;
|
||||
}
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
for_each_pci_msi_entry(entry, pdev) {
|
||||
|
||||
@@ -46,7 +46,7 @@ config RISCV
|
||||
select CLINT_TIMER if !MMU
|
||||
select COMMON_CLK
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_ARCH_TOPOLOGY if SMP
|
||||
select GENERIC_ARCH_TOPOLOGY
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
|
||||
@@ -39,6 +39,7 @@ else
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_LD_IS_LLD),y)
|
||||
ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0)
|
||||
KBUILD_CFLAGS += -mno-relax
|
||||
KBUILD_AFLAGS += -mno-relax
|
||||
ifndef CONFIG_AS_IS_LLVM
|
||||
@@ -46,6 +47,7 @@ ifndef CONFIG_AS_IS_LLVM
|
||||
KBUILD_AFLAGS += -Wa,-mno-relax
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# ISA string setting
|
||||
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
|
||||
|
||||
@@ -101,9 +101,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
|
||||
__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
|
||||
__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
|
||||
__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
|
||||
#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
|
||||
#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
|
||||
#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
|
||||
#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
|
||||
#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
|
||||
#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
|
||||
|
||||
__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
|
||||
__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
|
||||
@@ -115,22 +115,22 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
|
||||
__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
|
||||
__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
|
||||
__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
|
||||
#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
|
||||
#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
|
||||
#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
|
||||
#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
|
||||
#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
|
||||
#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
|
||||
#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
|
||||
|
||||
__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
|
||||
#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
|
||||
#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
|
||||
|
||||
__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
|
||||
#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
|
||||
|
||||
__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
|
||||
#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
|
||||
#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
|
||||
#endif
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
@@ -260,10 +260,10 @@ static void __init parse_dtb(void)
|
||||
pr_info("Machine model: %s\n", name);
|
||||
dump_stack_set_arch_desc("%s (DT)", name);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
pr_err("No DTB passed to the kernel\n");
|
||||
}
|
||||
|
||||
pr_err("No DTB passed to the kernel\n");
|
||||
#ifdef CONFIG_CMDLINE_FORCE
|
||||
strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
|
||||
pr_info("Forcing kernel command line to: %s\n", boot_command_line);
|
||||
|
||||
@@ -53,6 +53,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
unsigned int curr_cpuid;
|
||||
|
||||
curr_cpuid = smp_processor_id();
|
||||
store_cpu_topology(curr_cpuid);
|
||||
numa_store_cpu_info(curr_cpuid);
|
||||
numa_add_cpu(curr_cpuid);
|
||||
|
||||
@@ -165,9 +166,9 @@ asmlinkage __visible void smp_callin(void)
|
||||
mmgrab(mm);
|
||||
current->active_mm = mm;
|
||||
|
||||
store_cpu_topology(curr_cpuid);
|
||||
notify_cpu_starting(curr_cpuid);
|
||||
numa_add_cpu(curr_cpuid);
|
||||
update_siblings_masks(curr_cpuid);
|
||||
set_cpu_online(curr_cpuid, 1);
|
||||
|
||||
/*
|
||||
|
||||
@@ -18,9 +18,6 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
|
||||
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
|
||||
return -EINVAL;
|
||||
|
||||
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
|
||||
offset >> (PAGE_SHIFT - page_shift_offset));
|
||||
}
|
||||
|
||||
@@ -188,7 +188,8 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
|
||||
}
|
||||
break;
|
||||
case EXC_LOAD_PAGE_FAULT:
|
||||
if (!(vma->vm_flags & VM_READ)) {
|
||||
/* Write implies read */
|
||||
if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
extern long __machvec_start, __machvec_end;
|
||||
extern char __machvec_start[], __machvec_end[];
|
||||
extern char __uncached_start, __uncached_end;
|
||||
extern char __start_eh_frame[], __stop_eh_frame[];
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
#define MV_NAME_SIZE 32
|
||||
|
||||
#define for_each_mv(mv) \
|
||||
for ((mv) = (struct sh_machine_vector *)&__machvec_start; \
|
||||
(mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \
|
||||
for ((mv) = (struct sh_machine_vector *)__machvec_start; \
|
||||
(mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \
|
||||
(mv)++)
|
||||
|
||||
static struct sh_machine_vector * __init get_mv_byname(const char *name)
|
||||
@@ -87,8 +87,8 @@ void __init sh_mv_setup(void)
|
||||
if (!machvec_selected) {
|
||||
unsigned long machvec_size;
|
||||
|
||||
machvec_size = ((unsigned long)&__machvec_end -
|
||||
(unsigned long)&__machvec_start);
|
||||
machvec_size = ((unsigned long)__machvec_end -
|
||||
(unsigned long)__machvec_start);
|
||||
|
||||
/*
|
||||
* Sanity check for machvec section alignment. Ensure
|
||||
@@ -102,7 +102,7 @@ void __init sh_mv_setup(void)
|
||||
* vector (usually the only one) from .machvec.init.
|
||||
*/
|
||||
if (machvec_size >= sizeof(struct sh_machine_vector))
|
||||
sh_mv = *(struct sh_machine_vector *)&__machvec_start;
|
||||
sh_mv = *(struct sh_machine_vector *)__machvec_start;
|
||||
}
|
||||
|
||||
pr_notice("Booting machvec: %s\n", get_system_type());
|
||||
|
||||
@@ -94,7 +94,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return *pos < NR_CPUS ? cpu_data + *pos : NULL;
|
||||
return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
|
||||
}
|
||||
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
||||
@@ -1926,7 +1926,6 @@ config EFI
|
||||
config EFI_STUB
|
||||
bool "EFI stub support"
|
||||
depends on EFI && !X86_USE_3DNOW
|
||||
depends on $(cc-option,-mabi=ms) || X86_32
|
||||
select RELOCATABLE
|
||||
help
|
||||
This kernel feature allows a bzImage to be loaded directly
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
@@ -1348,11 +1350,37 @@ static void pt_addr_filters_fini(struct perf_event *event)
|
||||
event->hw.addr_filters = NULL;
|
||||
}
|
||||
|
||||
static inline bool valid_kernel_ip(unsigned long ip)
|
||||
#ifdef CONFIG_X86_64
|
||||
static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return virt_addr_valid(ip) && kernel_ip(ip);
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
}
|
||||
|
||||
static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return canonical_address(vaddr, vaddr_bits) == vaddr;
|
||||
}
|
||||
|
||||
/* Clamp to a canonical address greater-than-or-equal-to the address given */
|
||||
static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
-BIT_ULL(vaddr_bits - 1);
|
||||
}
|
||||
|
||||
/* Clamp to a canonical address less-than-or-equal-to the address given */
|
||||
static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return is_canonical_address(vaddr, vaddr_bits) ?
|
||||
vaddr :
|
||||
BIT_ULL(vaddr_bits - 1) - 1;
|
||||
}
|
||||
#else
|
||||
#define clamp_to_ge_canonical_addr(x, y) (x)
|
||||
#define clamp_to_le_canonical_addr(x, y) (x)
|
||||
#endif
|
||||
|
||||
static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||
{
|
||||
struct perf_addr_filter *filter;
|
||||
@@ -1367,14 +1395,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||
filter->action == PERF_ADDR_FILTER_ACTION_START)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!filter->path.dentry) {
|
||||
if (!valid_kernel_ip(filter->offset))
|
||||
return -EINVAL;
|
||||
|
||||
if (!valid_kernel_ip(filter->offset + filter->size))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -1398,9 +1418,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
|
||||
if (filter->path.dentry && !fr[range].start) {
|
||||
msr_a = msr_b = 0;
|
||||
} else {
|
||||
/* apply the offset */
|
||||
msr_a = fr[range].start;
|
||||
msr_b = msr_a + fr[range].size - 1;
|
||||
unsigned long n = fr[range].size - 1;
|
||||
unsigned long a = fr[range].start;
|
||||
unsigned long b;
|
||||
|
||||
if (a > ULONG_MAX - n)
|
||||
b = ULONG_MAX;
|
||||
else
|
||||
b = a + n;
|
||||
/*
|
||||
* Apply the offset. 64-bit addresses written to the
|
||||
* MSRs must be canonical, but the range can encompass
|
||||
* non-canonical addresses. Since software cannot
|
||||
* execute at non-canonical addresses, adjusting to
|
||||
* canonical addresses does not affect the result of the
|
||||
* address filter.
|
||||
*/
|
||||
msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
|
||||
msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
|
||||
if (msr_b < msr_a)
|
||||
msr_a = msr_b = 0;
|
||||
}
|
||||
|
||||
filters->filter[range].msr_a = msr_a;
|
||||
|
||||
@@ -529,7 +529,7 @@ struct hv_enlightened_vmcs {
|
||||
u64 guest_rip;
|
||||
|
||||
u32 hv_clean_fields;
|
||||
u32 hv_padding_32;
|
||||
u32 padding32_1;
|
||||
u32 hv_synthetic_controls;
|
||||
struct {
|
||||
u32 nested_flush_hypercall:1;
|
||||
@@ -537,7 +537,7 @@ struct hv_enlightened_vmcs {
|
||||
u32 reserved:30;
|
||||
} __packed hv_enlightenments_control;
|
||||
u32 hv_vp_id;
|
||||
|
||||
u32 padding32_2;
|
||||
u64 hv_vm_id;
|
||||
u64 partition_assist_page;
|
||||
u64 padding64_4[4];
|
||||
|
||||
@@ -17,8 +17,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
|
||||
{
|
||||
u64 start = rmrr->base_address;
|
||||
u64 end = rmrr->end_address + 1;
|
||||
int entry_type;
|
||||
|
||||
if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
|
||||
entry_type = e820__get_entry_type(start, end);
|
||||
if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS)
|
||||
return 0;
|
||||
|
||||
pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
struct ucode_patch {
|
||||
struct list_head plist;
|
||||
void *data; /* Intel uses only this one */
|
||||
unsigned int size;
|
||||
u32 patch_id;
|
||||
u16 equiv_cpu;
|
||||
};
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/tboot.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/vmx.h>
|
||||
#include "cpu.h"
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "x86/cpu: " fmt
|
||||
|
||||
@@ -29,15 +29,26 @@
|
||||
void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
|
||||
{
|
||||
struct mce m;
|
||||
int lsb;
|
||||
|
||||
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Even if the ->validation_bits are set for address mask,
|
||||
* to be extra safe, check and reject an error radius '0',
|
||||
* and fall back to the default page size.
|
||||
*/
|
||||
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
|
||||
lsb = find_first_bit((void *)&mem_err->physical_addr_mask, PAGE_SHIFT);
|
||||
else
|
||||
lsb = PAGE_SHIFT;
|
||||
|
||||
mce_setup(&m);
|
||||
m.bank = -1;
|
||||
/* Fake a memory read error with unknown channel */
|
||||
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
|
||||
m.misc = (MCI_MISC_ADDR_PHYS << 6) | PAGE_SHIFT;
|
||||
m.misc = (MCI_MISC_ADDR_PHYS << 6) | lsb;
|
||||
|
||||
if (severity >= GHES_SEV_RECOVERABLE)
|
||||
m.status |= MCI_STATUS_UC;
|
||||
|
||||
@@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
|
||||
return ret;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
if (rev >= mc->hdr.patch_id)
|
||||
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
*/
|
||||
if (rev > mc->hdr.patch_id)
|
||||
return ret;
|
||||
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
@@ -522,8 +528,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
/* Check whether we have saved a new patch already: */
|
||||
if (*new_rev && rev < mc->hdr.patch_id) {
|
||||
/*
|
||||
* Check whether a new patch has been saved already. Also, allow application of
|
||||
* the same revision in order to pick up SMT-thread-specific configuration even
|
||||
* if the sibling SMT thread already has an up-to-date revision.
|
||||
*/
|
||||
if (*new_rev && rev <= mc->hdr.patch_id) {
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
*new_rev = mc->hdr.patch_id;
|
||||
return;
|
||||
@@ -782,6 +792,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
||||
kfree(patch);
|
||||
return -EINVAL;
|
||||
}
|
||||
patch->size = *patch_size;
|
||||
|
||||
mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
|
||||
proc_id = mc_hdr->processor_rev_id;
|
||||
@@ -863,7 +874,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||
return ret;
|
||||
|
||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
|
||||
memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L3,
|
||||
.name = "L3",
|
||||
.cache_level = 3,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
},
|
||||
.domains = domain_init(RDT_RESOURCE_L3),
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
@@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
|
||||
.rid = RDT_RESOURCE_L2,
|
||||
.name = "L2",
|
||||
.cache_level = 2,
|
||||
.cache = {
|
||||
.min_cbm_bits = 1,
|
||||
},
|
||||
.domains = domain_init(RDT_RESOURCE_L2),
|
||||
.parse_ctrlval = parse_cbm,
|
||||
.format_str = "%d=%0*x",
|
||||
@@ -877,6 +871,7 @@ static __init void rdt_init_res_defs_intel(void)
|
||||
r->cache.arch_has_sparse_bitmaps = false;
|
||||
r->cache.arch_has_empty_bitmaps = false;
|
||||
r->cache.arch_has_per_cpu_cfg = false;
|
||||
r->cache.min_cbm_bits = 1;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
|
||||
hw_res->msr_update = mba_wrmsr_intel;
|
||||
@@ -897,6 +892,7 @@ static __init void rdt_init_res_defs_amd(void)
|
||||
r->cache.arch_has_sparse_bitmaps = true;
|
||||
r->cache.arch_has_empty_bitmaps = true;
|
||||
r->cache.arch_has_per_cpu_cfg = true;
|
||||
r->cache.min_cbm_bits = 0;
|
||||
} else if (r->rid == RDT_RESOURCE_MBA) {
|
||||
hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
|
||||
hw_res->msr_update = mba_wrmsr_amd;
|
||||
|
||||
@@ -420,6 +420,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
|
||||
struct pseudo_lock_region *plr = rdtgrp->plr;
|
||||
u32 rmid_p, closid_p;
|
||||
unsigned long i;
|
||||
u64 saved_msr;
|
||||
#ifdef CONFIG_KASAN
|
||||
/*
|
||||
* The registers used for local register variables are also used
|
||||
@@ -463,6 +464,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
|
||||
* the buffer and evict pseudo-locked memory read earlier from the
|
||||
* cache.
|
||||
*/
|
||||
saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
|
||||
__wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
|
||||
closid_p = this_cpu_read(pqr_state.cur_closid);
|
||||
rmid_p = this_cpu_read(pqr_state.cur_rmid);
|
||||
@@ -514,7 +516,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
|
||||
__wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p);
|
||||
|
||||
/* Re-enable the hardware prefetcher(s) */
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
|
||||
wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
|
||||
local_irq_enable();
|
||||
|
||||
plr->thread_done = 1;
|
||||
@@ -871,6 +873,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
|
||||
static int measure_cycles_lat_fn(void *_plr)
|
||||
{
|
||||
struct pseudo_lock_region *plr = _plr;
|
||||
u32 saved_low, saved_high;
|
||||
unsigned long i;
|
||||
u64 start, end;
|
||||
void *mem_r;
|
||||
@@ -879,6 +882,7 @@ static int measure_cycles_lat_fn(void *_plr)
|
||||
/*
|
||||
* Disable hardware prefetchers.
|
||||
*/
|
||||
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
|
||||
mem_r = READ_ONCE(plr->kmem);
|
||||
/*
|
||||
@@ -895,7 +899,7 @@ static int measure_cycles_lat_fn(void *_plr)
|
||||
end = rdtsc_ordered();
|
||||
trace_pseudo_lock_mem_latency((u32)(end - start));
|
||||
}
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
local_irq_enable();
|
||||
plr->thread_done = 1;
|
||||
wake_up_interruptible(&plr->lock_thread_wq);
|
||||
@@ -940,6 +944,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
|
||||
struct perf_event *miss_event, *hit_event;
|
||||
int hit_pmcnum, miss_pmcnum;
|
||||
u32 saved_low, saved_high;
|
||||
unsigned int line_size;
|
||||
unsigned int size;
|
||||
unsigned long i;
|
||||
@@ -973,6 +978,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
/*
|
||||
* Disable hardware prefetchers.
|
||||
*/
|
||||
rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
|
||||
|
||||
/* Initialize rest of local variables */
|
||||
@@ -1031,7 +1037,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
|
||||
*/
|
||||
rmb();
|
||||
/* Re-enable hardware prefetchers */
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
|
||||
wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
|
||||
local_irq_enable();
|
||||
out_hit:
|
||||
perf_event_release_kernel(hit_event);
|
||||
|
||||
@@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
|
||||
unsigned int core_select_mask, core_level_siblings;
|
||||
unsigned int die_select_mask, die_level_siblings;
|
||||
unsigned int pkg_mask_width;
|
||||
bool die_level_present = false;
|
||||
int leaf;
|
||||
|
||||
@@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
|
||||
sub_index = 1;
|
||||
do {
|
||||
while (true) {
|
||||
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
/*
|
||||
@@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
}
|
||||
|
||||
sub_index++;
|
||||
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
|
||||
if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
|
||||
pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
else
|
||||
break;
|
||||
|
||||
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
|
||||
sub_index++;
|
||||
}
|
||||
|
||||
core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
|
||||
die_select_mask = (~(-1 << die_plus_mask_width)) >>
|
||||
core_plus_mask_width;
|
||||
|
||||
@@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
die_plus_mask_width);
|
||||
pkg_mask_width);
|
||||
/*
|
||||
* Reinit the apicid, now that we have extended initial_apicid.
|
||||
*/
|
||||
|
||||
@@ -1936,7 +1936,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
if (ctxt->modrm_reg == VCPU_SREG_SS)
|
||||
if (seg == VCPU_SREG_SS)
|
||||
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
|
||||
if (ctxt->op_bytes > 2)
|
||||
rsp_increment(ctxt, ctxt->op_bytes - 2);
|
||||
|
||||
@@ -2313,9 +2313,14 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
|
||||
* are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
|
||||
* on the related bits (if supported by the CPU) in the hope that
|
||||
* we can avoid VMWrites during vmx_set_efer().
|
||||
*
|
||||
* Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
|
||||
* loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
|
||||
* do the same for L2.
|
||||
*/
|
||||
exec_control = __vm_entry_controls_get(vmcs01);
|
||||
exec_control |= vmcs12->vm_entry_controls;
|
||||
exec_control |= (vmcs12->vm_entry_controls &
|
||||
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
|
||||
exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
|
||||
if (cpu_has_load_ia32_efer()) {
|
||||
if (guest_efer & EFER_LMA)
|
||||
@@ -3821,7 +3826,16 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
|
||||
u32 intr_info = nr | INTR_INFO_VALID_MASK;
|
||||
|
||||
if (vcpu->arch.exception.has_error_code) {
|
||||
vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
|
||||
/*
|
||||
* Intel CPUs do not generate error codes with bits 31:16 set,
|
||||
* and more importantly VMX disallows setting bits 31:16 in the
|
||||
* injected error code for VM-Entry. Drop the bits to mimic
|
||||
* hardware and avoid inducing failure on nested VM-Entry if L1
|
||||
* chooses to inject the exception back to L2. AMD CPUs _do_
|
||||
* generate "full" 32-bit error codes, so KVM allows userspace
|
||||
* to inject exception error codes with bits 31:16 set.
|
||||
*/
|
||||
vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code;
|
||||
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
|
||||
}
|
||||
|
||||
@@ -4251,14 +4265,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
||||
nested_vmx_abort(vcpu,
|
||||
VMX_ABORT_SAVE_GUEST_MSR_FAIL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop what we picked up for L2 via vmx_complete_interrupts. It is
|
||||
* preserved above and would only end up incorrectly in L1.
|
||||
*/
|
||||
vcpu->arch.nmi_injected = false;
|
||||
kvm_clear_exception_queue(vcpu);
|
||||
kvm_clear_interrupt_queue(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4598,6 +4604,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
|
||||
WARN_ON_ONCE(nested_early_check);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop events/exceptions that were queued for re-injection to L2
|
||||
* (picked up via vmx_complete_interrupts()), as well as exceptions
|
||||
* that were pending for L2. Note, this must NOT be hoisted above
|
||||
* prepare_vmcs12(), events/exceptions queued for re-injection need to
|
||||
* be captured in vmcs12 (see vmcs12_save_pending_event()).
|
||||
*/
|
||||
vcpu->arch.nmi_injected = false;
|
||||
kvm_clear_exception_queue(vcpu);
|
||||
kvm_clear_interrupt_queue(vcpu);
|
||||
|
||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||
|
||||
/* Update any VMCS fields that might have changed while L2 ran */
|
||||
|
||||
@@ -1676,7 +1676,17 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
|
||||
kvm_deliver_exception_payload(vcpu);
|
||||
|
||||
if (has_error_code) {
|
||||
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
|
||||
/*
|
||||
* Despite the error code being architecturally defined as 32
|
||||
* bits, and the VMCS field being 32 bits, Intel CPUs and thus
|
||||
* VMX don't actually supporting setting bits 31:16. Hardware
|
||||
* will (should) never provide a bogus error code, but AMD CPUs
|
||||
* do generate error codes with bits 31:16 set, and so KVM's
|
||||
* ABI lets userspace shove in arbitrary 32-bit values. Drop
|
||||
* the upper bits to avoid VM-Fail, losing information that
|
||||
* does't really exist is preferable to killing the VM.
|
||||
*/
|
||||
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)error_code);
|
||||
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
|
||||
}
|
||||
|
||||
|
||||
@@ -759,6 +759,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
|
||||
{
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
static struct trap_info traps[257];
|
||||
static const struct trap_info zero = { };
|
||||
unsigned out;
|
||||
|
||||
trace_xen_cpu_load_idt(desc);
|
||||
@@ -768,7 +769,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
|
||||
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
|
||||
|
||||
out = xen_convert_trap_info(desc, traps, false);
|
||||
memset(&traps[out], 0, sizeof(traps[0]));
|
||||
traps[out] = zero;
|
||||
|
||||
xen_mc_flush();
|
||||
if (HYPERVISOR_set_trap_table(traps))
|
||||
|
||||
@@ -950,7 +950,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
||||
u64 bps_limit, unsigned long *wait)
|
||||
{
|
||||
bool rw = bio_data_dir(bio);
|
||||
u64 bytes_allowed, extra_bytes, tmp;
|
||||
u64 bytes_allowed, extra_bytes;
|
||||
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
|
||||
unsigned int bio_size = throtl_bio_data_size(bio);
|
||||
|
||||
@@ -967,10 +967,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
||||
jiffy_elapsed_rnd = tg->td->throtl_slice;
|
||||
|
||||
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
|
||||
|
||||
tmp = bps_limit * jiffy_elapsed_rnd;
|
||||
do_div(tmp, HZ);
|
||||
bytes_allowed = tmp;
|
||||
bytes_allowed = mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed_rnd,
|
||||
(u64)HZ);
|
||||
|
||||
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
|
||||
if (wait)
|
||||
|
||||
@@ -841,8 +841,11 @@ int wbt_init(struct request_queue *q)
|
||||
rwb->last_comp = rwb->last_issue = jiffies;
|
||||
rwb->win_nsec = RWB_WINDOW_NSEC;
|
||||
rwb->enable_state = WBT_STATE_ON_DEFAULT;
|
||||
rwb->wc = 1;
|
||||
rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
|
||||
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
|
||||
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
|
||||
|
||||
wbt_queue_depth_changed(&rwb->rqos);
|
||||
|
||||
/*
|
||||
* Assign rwb and add the stats callback.
|
||||
@@ -853,11 +856,6 @@ int wbt_init(struct request_queue *q)
|
||||
|
||||
blk_stat_add_callback(q, rwb->cb);
|
||||
|
||||
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
|
||||
|
||||
wbt_queue_depth_changed(&rwb->rqos);
|
||||
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
|
||||
@@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int akcipher_default_set_key(struct crypto_akcipher *tfm,
|
||||
const void *key, unsigned int keylen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int crypto_register_akcipher(struct akcipher_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->base;
|
||||
@@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
|
||||
alg->encrypt = akcipher_default_op;
|
||||
if (!alg->decrypt)
|
||||
alg->decrypt = akcipher_default_op;
|
||||
if (!alg->set_priv_key)
|
||||
alg->set_priv_key = akcipher_default_set_key;
|
||||
|
||||
akcipher_prepare_alg(alg);
|
||||
return crypto_register_alg(base);
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/edac.h>
|
||||
#include <linux/ras.h>
|
||||
#include <acpi/ghes.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
@@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
||||
int cpu = mce->extcpu;
|
||||
struct acpi_hest_generic_status *estatus, *tmp;
|
||||
struct acpi_hest_generic_data *gdata;
|
||||
const guid_t *fru_id = &guid_null;
|
||||
char *fru_text = "";
|
||||
const guid_t *fru_id;
|
||||
char *fru_text;
|
||||
guid_t *sec_type;
|
||||
static u32 err_seq;
|
||||
|
||||
@@ -160,18 +161,24 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
|
||||
|
||||
/* log event via trace */
|
||||
err_seq++;
|
||||
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
|
||||
apei_estatus_for_each_section(tmp, gdata) {
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
|
||||
fru_id = (guid_t *)gdata->fru_id;
|
||||
else
|
||||
fru_id = &guid_null;
|
||||
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
|
||||
fru_text = gdata->fru_text;
|
||||
else
|
||||
fru_text = "";
|
||||
sec_type = (guid_t *)gdata->section_type;
|
||||
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
|
||||
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
|
||||
|
||||
if (gdata->error_data_length >= sizeof(*mem))
|
||||
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
|
||||
(u8)gdata->error_severity);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mce->kflags |= MCE_HANDLED_EXTLOG;
|
||||
|
||||
@@ -143,6 +143,23 @@ static const struct attribute_group boot_attr_group = {
|
||||
|
||||
static struct kobject *fpdt_kobj;
|
||||
|
||||
#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT
|
||||
#include <linux/processor.h>
|
||||
static bool fpdt_address_valid(u64 address)
|
||||
{
|
||||
/*
|
||||
* On some systems the table contains invalid addresses
|
||||
* with unsuppored high address bits set, check for this.
|
||||
*/
|
||||
return !(address >> boot_cpu_data.x86_phys_bits);
|
||||
}
|
||||
#else
|
||||
static bool fpdt_address_valid(u64 address)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int fpdt_process_subtable(u64 address, u32 subtable_type)
|
||||
{
|
||||
struct fpdt_subtable_header *subtable_header;
|
||||
@@ -151,6 +168,11 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
|
||||
u32 length, offset;
|
||||
int result;
|
||||
|
||||
if (!fpdt_address_valid(address)) {
|
||||
pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
|
||||
if (!subtable_header)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -496,6 +496,22 @@ static const struct dmi_system_id video_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_disable_backlight_sysfs_if,
|
||||
.ident = "Toshiba Satellite Z830",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_disable_backlight_sysfs_if,
|
||||
.ident = "Toshiba Portege Z830",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
|
||||
* but the IDs actually follow the Device ID Scheme.
|
||||
|
||||
@@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
|
||||
ghes_estatus_cache_add(generic, estatus);
|
||||
}
|
||||
|
||||
if (task_work_pending && current->mm != &init_mm) {
|
||||
if (task_work_pending && current->mm) {
|
||||
estatus_node->task_work.func = ghes_kick_task_work;
|
||||
estatus_node->task_work_cpu = smp_processor_id();
|
||||
ret = task_work_add(current, &estatus_node->task_work,
|
||||
|
||||
@@ -500,6 +500,70 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* More Tongfang devices with the same issue as the Clevo NL5xRU and
|
||||
* NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above.
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GKxNRxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GMxNGxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GMxZGxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "TongFang GMxRGxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Desktops which falsely report a backlight and which our heuristics
|
||||
* for this do not catch.
|
||||
|
||||
@@ -198,7 +198,24 @@ static const struct x86_cpu_id storage_d3_cpu_ids[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct dmi_system_id force_storage_d3_dmi[] = {
|
||||
{
|
||||
/*
|
||||
* _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
|
||||
* but .NVME is needed to get StorageD3Enable node
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=216440
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
|
||||
}
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
bool force_storage_d3(void)
|
||||
{
|
||||
return x86_match_cpu(storage_d3_cpu_ids);
|
||||
const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
|
||||
|
||||
return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ enum {
|
||||
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
|
||||
|
||||
/* em constants */
|
||||
EM_MAX_SLOTS = 8,
|
||||
EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
|
||||
EM_MAX_RETRY = 5,
|
||||
|
||||
/* em_ctl bits */
|
||||
|
||||
@@ -1230,4 +1230,4 @@ module_platform_driver(imx_ahci_driver);
|
||||
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
|
||||
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("ahci:imx");
|
||||
MODULE_ALIAS("platform:" DRV_NAME);
|
||||
|
||||
@@ -451,14 +451,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
|
||||
}
|
||||
}
|
||||
|
||||
hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
|
||||
/*
|
||||
* Too many sub-nodes most likely means having something wrong with
|
||||
* the firmware.
|
||||
*/
|
||||
child_nodes = of_get_child_count(dev->of_node);
|
||||
if (child_nodes > AHCI_MAX_PORTS) {
|
||||
rc = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If no sub-node was found, we still need to set nports to
|
||||
* one in order to be able to use the
|
||||
* ahci_platform_[en|dis]able_[phys|regulators] functions.
|
||||
*/
|
||||
if (!child_nodes)
|
||||
if (child_nodes)
|
||||
hpriv->nports = child_nodes;
|
||||
else
|
||||
hpriv->nports = 1;
|
||||
|
||||
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
|
||||
|
||||
@@ -690,4 +690,23 @@ void __init init_cpu_topology(void)
|
||||
else if (of_have_populated_dt() && parse_dt_topology())
|
||||
reset_cpu_topology();
|
||||
}
|
||||
|
||||
void store_cpu_topology(unsigned int cpuid)
|
||||
{
|
||||
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
|
||||
|
||||
if (cpuid_topo->package_id != -1)
|
||||
goto topology_populated;
|
||||
|
||||
cpuid_topo->thread_id = -1;
|
||||
cpuid_topo->core_id = cpuid;
|
||||
cpuid_topo->package_id = cpu_to_node(cpuid);
|
||||
|
||||
pr_debug("CPU%u: package %d core %d thread %d\n",
|
||||
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
|
||||
cpuid_topo->thread_id);
|
||||
|
||||
topology_populated:
|
||||
update_siblings_masks(cpuid);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1351,10 +1351,12 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
ret = wait_event_interruptible(config->recv_wq,
|
||||
atomic_read(&config->recv_threads) == 0);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
sock_shutdown(nbd);
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
nbd_clear_que(nbd);
|
||||
}
|
||||
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
mutex_lock(&nbd->config_lock);
|
||||
nbd_bdev_reset(bdev);
|
||||
/* user requested, ignore socket errors */
|
||||
|
||||
@@ -2274,15 +2274,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
|
||||
&hdev->quirks);
|
||||
if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES,
|
||||
&hdev->quirks);
|
||||
|
||||
err = btintel_legacy_rom_setup(hdev, &ver);
|
||||
break;
|
||||
case 0x0b: /* SfP */
|
||||
case 0x0c: /* WsP */
|
||||
case 0x11: /* JfP */
|
||||
case 0x12: /* ThP */
|
||||
case 0x13: /* HrP */
|
||||
case 0x14: /* CcP */
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
fallthrough;
|
||||
case 0x0c: /* WsP */
|
||||
/* Apply the device specific HCI quirks
|
||||
*
|
||||
* All Legacy bootloader devices support WBS
|
||||
@@ -2290,11 +2295,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
|
||||
&hdev->quirks);
|
||||
|
||||
/* Valid LE States quirk for JfP/ThP familiy */
|
||||
if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES,
|
||||
&hdev->quirks);
|
||||
|
||||
/* Setup MSFT Extension support */
|
||||
btintel_set_msft_opcode(hdev, ver.hw_variant);
|
||||
|
||||
@@ -2361,8 +2361,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
*/
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
||||
|
||||
/* Valid LE States quirk for JfP/ThP familiy */
|
||||
if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
|
||||
/* Set Valid LE States quirk */
|
||||
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||
|
||||
/* Setup MSFT Extension support */
|
||||
|
||||
@@ -2435,15 +2435,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
|
||||
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
|
||||
|
||||
/* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
|
||||
* it needs constantly polling control pipe until the host received the
|
||||
* WMT event, thus, we should require to specifically acquire PM counter
|
||||
* on the USB to prevent the interface from entering auto suspended
|
||||
* while WMT cmd/event in progress.
|
||||
*/
|
||||
err = usb_autopm_get_interface(data->intf);
|
||||
if (err < 0)
|
||||
goto err_free_wc;
|
||||
|
||||
err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
|
||||
|
||||
if (err < 0) {
|
||||
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
|
||||
usb_autopm_put_interface(data->intf);
|
||||
goto err_free_wc;
|
||||
}
|
||||
|
||||
/* Submit control IN URB on demand to process the WMT event */
|
||||
err = btusb_mtk_submit_wmt_recv_urb(hdev);
|
||||
|
||||
usb_autopm_put_interface(data->intf);
|
||||
|
||||
if (err < 0)
|
||||
goto err_free_wc;
|
||||
|
||||
|
||||
@@ -490,6 +490,11 @@ static int hci_uart_tty_open(struct tty_struct *tty)
|
||||
BT_ERR("Can't allocate control structure");
|
||||
return -ENFILE;
|
||||
}
|
||||
if (percpu_init_rwsem(&hu->proto_lock)) {
|
||||
BT_ERR("Can't allocate semaphore structure");
|
||||
kfree(hu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
tty->disc_data = hu;
|
||||
hu->tty = tty;
|
||||
@@ -502,8 +507,6 @@ static int hci_uart_tty_open(struct tty_struct *tty)
|
||||
INIT_WORK(&hu->init_ready, hci_uart_init_work);
|
||||
INIT_WORK(&hu->write_work, hci_uart_write_work);
|
||||
|
||||
percpu_init_rwsem(&hu->proto_lock);
|
||||
|
||||
/* Flush any pending characters in the driver */
|
||||
tty_driver_flush_buffer(tty);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user