Merge bdb575f872 ("Merge tag 'drm-fixes-2021-09-17' of git://anongit.freedesktop.org/drm/drm") into android-mainline

Steps on the way to 5.15-rc2

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Id7ae8fe00ef75a10c28b62553ee8cf42a6db5729
This commit is contained in:
Greg Kroah-Hartman
2021-09-17 16:20:00 +02:00
183 changed files with 1923 additions and 1080 deletions

View File

@@ -259,7 +259,7 @@ Configuring the kernel
Compiling the kernel Compiling the kernel
-------------------- --------------------
- Make sure you have at least gcc 4.9 available. - Make sure you have at least gcc 5.1 available.
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`. For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
Please note that you can still run a.out user programs with this kernel. Please note that you can still run a.out user programs with this kernel.

View File

@@ -19,7 +19,9 @@ properties:
- const: allwinner,sun8i-v3s-emac - const: allwinner,sun8i-v3s-emac
- const: allwinner,sun50i-a64-emac - const: allwinner,sun50i-a64-emac
- items: - items:
- const: allwinner,sun50i-h6-emac - enum:
- allwinner,sun20i-d1-emac
- allwinner,sun50i-h6-emac
- const: allwinner,sun50i-a64-emac - const: allwinner,sun50i-a64-emac
reg: reg:

View File

@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ======================================== ====================== =============== ========================================
Program Minimal version Command to check the version Program Minimal version Command to check the version
====================== =============== ======================================== ====================== =============== ========================================
GNU C 4.9 gcc --version GNU C 5.1 gcc --version
Clang/LLVM (optional) 10.0.1 clang --version Clang/LLVM (optional) 10.0.1 clang --version
GNU make 3.81 make --version GNU make 3.81 make --version
binutils 2.23 ld -v binutils 2.23 ld -v

View File

@@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
编译内核 编译内核
--------- ---------
- 确保您至少有gcc 4.9可用。 - 确保您至少有gcc 5.1可用。
有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>`
请注意您仍然可以使用此内核运行a.out用户程序。 请注意您仍然可以使用此内核运行a.out用户程序。

View File

@@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
編譯內核 編譯內核
--------- ---------
- 確保您至少有gcc 4.9可用。 - 確保您至少有gcc 5.1可用。
有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>`
請注意您仍然可以使用此內核運行a.out用戶程序。 請注意您仍然可以使用此內核運行a.out用戶程序。

View File

@@ -862,12 +862,6 @@ endif
DEBUG_CFLAGS := DEBUG_CFLAGS :=
# Workaround for GCC versions < 5.0
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
ifdef CONFIG_CC_IS_GCC
DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
endif
ifdef CONFIG_DEBUG_INFO ifdef CONFIG_DEBUG_INFO
ifdef CONFIG_DEBUG_INFO_SPLIT ifdef CONFIG_DEBUG_INFO_SPLIT

View File

@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
* Change virtual addresses to physical addresses and vv. * Change virtual addresses to physical addresses and vv.
*/ */
#ifdef USE_48_BIT_KSEG #ifdef USE_48_BIT_KSEG
static inline unsigned long virt_to_phys(void *address) static inline unsigned long virt_to_phys(volatile void *address)
{ {
return (unsigned long)address - IDENT_ADDR; return (unsigned long)address - IDENT_ADDR;
} }
@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
return (void *) (address + IDENT_ADDR); return (void *) (address + IDENT_ADDR);
} }
#else #else
static inline unsigned long virt_to_phys(void *address) static inline unsigned long virt_to_phys(volatile void *address)
{ {
unsigned long phys = (unsigned long)address; unsigned long phys = (unsigned long)address;
@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
extern unsigned long __direct_map_base; extern unsigned long __direct_map_base;
extern unsigned long __direct_map_size; extern unsigned long __direct_map_size;
static inline unsigned long __deprecated virt_to_bus(void *address) static inline unsigned long __deprecated virt_to_bus(volatile void *address)
{ {
unsigned long phys = virt_to_phys(address); unsigned long phys = virt_to_phys(address);
unsigned long bus = phys + __direct_map_base; unsigned long bus = phys + __direct_map_base;

View File

@@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ALPHA_SETUP_H
#define __ALPHA_SETUP_H
#include <uapi/asm/setup.h>
/*
* We leave one page for the initial stack page, and one page for
* the initial process structure. Also, the console eats 3 MB for
* the initial bootloader (one of which we can reclaim later).
*/
#define BOOT_PCB 0x20000000
#define BOOT_ADDR 0x20000000
/* Remove when official MILO sources have ELF support: */
#define BOOT_SIZE (16*1024)
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
#else
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
#endif
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
#define SWAPPER_PGD KERNEL_START
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
/*
* This is setup by the secondary bootstrap loader. Because
* the zero page is zeroed out as soon as the vm system is
* initialized, we need to copy things out into a more permanent
* place.
*/
#define PARAM ZERO_PGE
#define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000)))
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
#endif

View File

@@ -1,43 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ALPHA_SETUP_H #ifndef _UAPI__ALPHA_SETUP_H
#define __ALPHA_SETUP_H #define _UAPI__ALPHA_SETUP_H
#define COMMAND_LINE_SIZE 256 #define COMMAND_LINE_SIZE 256
/* #endif /* _UAPI__ALPHA_SETUP_H */
* We leave one page for the initial stack page, and one page for
* the initial process structure. Also, the console eats 3 MB for
* the initial bootloader (one of which we can reclaim later).
*/
#define BOOT_PCB 0x20000000
#define BOOT_ADDR 0x20000000
/* Remove when official MILO sources have ELF support: */
#define BOOT_SIZE (16*1024)
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
#else
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
#endif
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
#define SWAPPER_PGD KERNEL_START
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
/*
* This is setup by the secondary bootstrap loader. Because
* the zero page is zeroed out as soon as the vm system is
* initialized, we need to copy things out into a more permanent
* place.
*/
#define PARAM ZERO_PGE
#define COMMAND_LINE ((char*)(PARAM + 0x0000))
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
#endif

View File

@@ -86,7 +86,7 @@ config ARM64
select ARCH_SUPPORTS_LTO_CLANG_THIN select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_CFI_CLANG select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_DEFAULT_BPF_JIT

View File

@@ -17,21 +17,21 @@
* two accesses to memory, which may be undesirable for some devices. * two accesses to memory, which may be undesirable for some devices.
*/ */
#define in_8(addr) \ #define in_8(addr) \
({ u8 __v = (*(__force volatile u8 *) (addr)); __v; }) ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
#define in_be16(addr) \ #define in_be16(addr) \
({ u16 __v = (*(__force volatile u16 *) (addr)); __v; }) ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
#define in_be32(addr) \ #define in_be32(addr) \
({ u32 __v = (*(__force volatile u32 *) (addr)); __v; }) ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
#define in_le16(addr) \ #define in_le16(addr) \
({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; }) ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
#define in_le32(addr) \ #define in_le32(addr) \
({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; }) ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b)) #define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w)) #define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) #define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w)) #define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
#define raw_inb in_8 #define raw_inb in_8
#define raw_inw in_be16 #define raw_inw in_be16

View File

@@ -171,7 +171,6 @@ static int bcd2int (unsigned char b)
int mvme147_hwclk(int op, struct rtc_time *t) int mvme147_hwclk(int op, struct rtc_time *t)
{ {
#warning check me!
if (!op) { if (!op) {
m147_rtc->ctrl = RTC_READ; m147_rtc->ctrl = RTC_READ;
t->tm_year = bcd2int (m147_rtc->bcd_year); t->tm_year = bcd2int (m147_rtc->bcd_year);
@@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t)
m147_rtc->ctrl = 0; m147_rtc->ctrl = 0;
if (t->tm_year < 70) if (t->tm_year < 70)
t->tm_year += 100; t->tm_year += 100;
} else {
/* FIXME Setting the time is not yet supported */
return -EOPNOTSUPP;
} }
return 0; return 0;
} }

View File

@@ -436,7 +436,6 @@ int bcd2int (unsigned char b)
int mvme16x_hwclk(int op, struct rtc_time *t) int mvme16x_hwclk(int op, struct rtc_time *t)
{ {
#warning check me!
if (!op) { if (!op) {
rtc->ctrl = RTC_READ; rtc->ctrl = RTC_READ;
t->tm_year = bcd2int (rtc->bcd_year); t->tm_year = bcd2int (rtc->bcd_year);
@@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
rtc->ctrl = 0; rtc->ctrl = 0;
if (t->tm_year < 70) if (t->tm_year < 70)
t->tm_year += 100; t->tm_year += 100;
} else {
/* FIXME Setting the time is not yet supported */
return -EOPNOTSUPP;
} }
return 0; return 0;
} }

View File

@@ -184,7 +184,7 @@ extern int npmem_ranges;
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#include <asm/pdc.h> #include <asm/pdc.h>
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) #define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
/* DEFINITION OF THE ZERO-PAGE (PAG0) */ /* DEFINITION OF THE ZERO-PAGE (PAG0) */
/* based on work by Jason Eckhardt (jason@equator.com) */ /* based on work by Jason Eckhardt (jason@equator.com) */

View File

@@ -35,7 +35,6 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-include $(srctree)/include/linux/compiler_attributes.h \
$(LINUXINCLUDE) $(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER ifdef CONFIG_PPC64_BOOT_WRAPPER
@@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
BOOTCFLAGS += -fno-stack-protector BOOTCFLAGS += -fno-stack-protector
endif endif
BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj) BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
DTC_FLAGS ?= -p 1024 DTC_FLAGS ?= -p 1024

View File

@@ -12,16 +12,6 @@
# define ASM_CONST(x) __ASM_CONST(x) # define ASM_CONST(x) __ASM_CONST(x)
#endif #endif
/*
* Inline assembly memory constraint
*
* GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
*
*/
#if defined(GCC_VERSION) && GCC_VERSION < 50000
#define UPD_CONSTR ""
#else
#define UPD_CONSTR "<>" #define UPD_CONSTR "<>"
#endif
#endif /* _ASM_POWERPC_ASM_CONST_H */ #endif /* _ASM_POWERPC_ASM_CONST_H */

View File

@@ -236,7 +236,7 @@ config ARCH_RV32I
config ARCH_RV64I config ARCH_RV64I
bool "RV64I" bool "RV64I"
select 64BIT select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8) select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL

View File

@@ -39,6 +39,7 @@ struct mdesc_hdr {
u32 node_sz; /* node block size */ u32 node_sz; /* node block size */
u32 name_sz; /* name block size */ u32 name_sz; /* name block size */
u32 data_sz; /* data block size */ u32 data_sz; /* data block size */
char data[];
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
struct mdesc_elem { struct mdesc_elem {
@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{ {
return (struct mdesc_elem *) (mdesc + 1); return (struct mdesc_elem *) mdesc->data;
} }
static void *name_block(struct mdesc_hdr *mdesc) static void *name_block(struct mdesc_hdr *mdesc)

View File

@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
/* /*
* IPI implementation on Hyper-V. * IPI implementation on Hyper-V.
*/ */
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
bool exclude_self)
{ {
struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex **arg;
struct hv_send_ipi_ex *ipi_arg; struct hv_send_ipi_ex *ipi_arg;
@@ -123,6 +124,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
if (!cpumask_equal(mask, cpu_present_mask)) { if (!cpumask_equal(mask, cpu_present_mask)) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
if (exclude_self)
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
else
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
} }
if (nr_bank < 0) if (nr_bank < 0)
@@ -138,15 +142,25 @@ ipi_mask_ex_done:
return hv_result_success(status); return hv_result_success(status);
} }
static bool __send_ipi_mask(const struct cpumask *mask, int vector) static bool __send_ipi_mask(const struct cpumask *mask, int vector,
bool exclude_self)
{ {
int cur_cpu, vcpu; int cur_cpu, vcpu, this_cpu = smp_processor_id();
struct hv_send_ipi ipi_arg; struct hv_send_ipi ipi_arg;
u64 status; u64 status;
unsigned int weight;
trace_hyperv_send_ipi_mask(mask, vector); trace_hyperv_send_ipi_mask(mask, vector);
if (cpumask_empty(mask)) weight = cpumask_weight(mask);
/*
* Do nothing if
* 1. the mask is empty
* 2. the mask only contains self when exclude_self is true
*/
if (weight == 0 ||
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
return true; return true;
if (!hv_hypercall_pg) if (!hv_hypercall_pg)
@@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
ipi_arg.cpu_mask = 0; ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) { for_each_cpu(cur_cpu, mask) {
if (exclude_self && cur_cpu == this_cpu)
continue;
vcpu = hv_cpu_number_to_vp_number(cur_cpu); vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL) if (vcpu == VP_INVAL)
return false; return false;
@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
return hv_result_success(status); return hv_result_success(status);
do_ex_hypercall: do_ex_hypercall:
return __send_ipi_mask_ex(mask, vector); return __send_ipi_mask_ex(mask, vector, exclude_self);
} }
static bool __send_ipi_one(int cpu, int vector) static bool __send_ipi_one(int cpu, int vector)
@@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
return false; return false;
if (vp >= 64) if (vp >= 64)
return __send_ipi_mask_ex(cpumask_of(cpu), vector); return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
return hv_result_success(status); return hv_result_success(status);
@@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
static void hv_send_ipi_mask(const struct cpumask *mask, int vector) static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
{ {
if (!__send_ipi_mask(mask, vector)) if (!__send_ipi_mask(mask, vector, false))
orig_apic.send_IPI_mask(mask, vector); orig_apic.send_IPI_mask(mask, vector);
} }
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{ {
unsigned int this_cpu = smp_processor_id(); if (!__send_ipi_mask(mask, vector, true))
struct cpumask new_mask;
const struct cpumask *local_mask;
cpumask_copy(&new_mask, mask);
cpumask_clear_cpu(this_cpu, &new_mask);
local_mask = &new_mask;
if (!__send_ipi_mask(local_mask, vector))
orig_apic.send_IPI_mask_allbutself(mask, vector); orig_apic.send_IPI_mask_allbutself(mask, vector);
} }
@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
static void hv_send_ipi_all(int vector) static void hv_send_ipi_all(int vector)
{ {
if (!__send_ipi_mask(cpu_online_mask, vector)) if (!__send_ipi_mask(cpu_online_mask, vector, false))
orig_apic.send_IPI_all(vector); orig_apic.send_IPI_all(vector);
} }

View File

@@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
static void __init pcpu_fc_free(void *ptr, size_t size) static void __init pcpu_fc_free(void *ptr, size_t size)
{ {
memblock_free(__pa(ptr), size); memblock_free_ptr(ptr, size);
} }
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)

View File

@@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
p = early_alloc(PMD_SIZE, nid, false); p = early_alloc(PMD_SIZE, nid, false);
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
return; return;
else if (p) memblock_free_ptr(p, PMD_SIZE);
memblock_free(__pa(p), PMD_SIZE);
} }
p = early_alloc(PAGE_SIZE, nid, true); p = early_alloc(PAGE_SIZE, nid, true);
@@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
p = early_alloc(PUD_SIZE, nid, false); p = early_alloc(PUD_SIZE, nid, false);
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
return; return;
else if (p) memblock_free_ptr(p, PUD_SIZE);
memblock_free(__pa(p), PUD_SIZE);
} }
p = early_alloc(PAGE_SIZE, nid, true); p = early_alloc(PAGE_SIZE, nid, true);

View File

@@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
/* numa_distance could be 1LU marking allocation failure, test cnt */ /* numa_distance could be 1LU marking allocation failure, test cnt */
if (numa_distance_cnt) if (numa_distance_cnt)
memblock_free(__pa(numa_distance), size); memblock_free_ptr(numa_distance, size);
numa_distance_cnt = 0; numa_distance_cnt = 0;
numa_distance = NULL; /* enable table creation */ numa_distance = NULL; /* enable table creation */
} }

View File

@@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
} }
/* free the copied physical distance table */ /* free the copied physical distance table */
if (phys_dist) memblock_free_ptr(phys_dist, phys_size);
memblock_free(__pa(phys_dist), phys_size);
return; return;
no_emu: no_emu:

View File

@@ -264,7 +264,7 @@ void __init numa_free_distance(void)
size = numa_distance_cnt * numa_distance_cnt * size = numa_distance_cnt * numa_distance_cnt *
sizeof(numa_distance[0]); sizeof(numa_distance[0]);
memblock_free(__pa(numa_distance), size); memblock_free_ptr(numa_distance, size);
numa_distance_cnt = 0; numa_distance_cnt = 0;
numa_distance = NULL; numa_distance = NULL;
} }

View File

@@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{ {
struct device *cpu_dev; struct device *cpu_dev;
int cur_cluster = cpu_to_cluster(policy->cpu);
cpu_dev = get_cpu_device(policy->cpu); cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) { if (!cpu_dev) {

View File

@@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
MAX_HWIP MAX_HWIP
}; };
#define HWIP_MAX_INSTANCE 8 #define HWIP_MAX_INSTANCE 10
struct amd_powerplay { struct amd_powerplay {
void *pp_handle; void *pp_handle;

View File

@@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
kgd2kfd_suspend(adev->kfd.dev, run_pm); kgd2kfd_suspend(adev->kfd.dev, run_pm);
} }
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
{
int r = 0;
if (adev->kfd.dev)
r = kgd2kfd_resume_iommu(adev->kfd.dev);
return r;
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{ {
int r = 0; int r = 0;

View File

@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry); const void *ih_ring_entry);
@@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources); const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{ {
} }
static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
return 0;
}
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{ {
return 0; return 0;

View File

@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
struct dentry *ent; struct dentry *ent;
int r, i; int r, i;
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt); &fops_ib_preempt);
if (!ent) { if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO; return PTR_ERR(ent);
} }
ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
&fops_sclk_set); &fops_sclk_set);
if (!ent) { if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
return -EIO; return PTR_ERR(ent);
} }
/* Register debugfs entries for amdgpu_ttm */ /* Register debugfs entries for amdgpu_ttm */

View File

@@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r) if (r)
goto init_failed; goto init_failed;
r = amdgpu_amdkfd_resume_iommu(adev);
if (r)
goto init_failed;
r = amdgpu_device_ip_hw_init_phase1(adev); r = amdgpu_device_ip_hw_init_phase1(adev);
if (r) if (r)
goto init_failed; goto init_failed;
@@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{ {
int r; int r;
r = amdgpu_amdkfd_resume_iommu(adev);
if (r)
return r;
r = amdgpu_device_ip_resume_phase1(adev); r = amdgpu_device_ip_resume_phase1(adev);
if (r) if (r)
return r; return r;
@@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
dev_warn(tmp_adev->dev, "asic atom init failed!"); dev_warn(tmp_adev->dev, "asic atom init failed!");
} else { } else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_amdkfd_resume_iommu(tmp_adev);
if (r)
goto out;
r = amdgpu_device_ip_resume_phase1(tmp_adev); r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r) if (r)
goto out; goto out;

View File

@@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break; break;
default: default:
adev->gmc.tmz_enabled = false; adev->gmc.tmz_enabled = false;
dev_warn(adev->dev, dev_info(adev->dev,
"Trusted Memory Zone (TMZ) feature not supported\n"); "Trusted Memory Zone (TMZ) feature not supported\n");
break; break;
} }

View File

@@ -757,7 +757,7 @@ Out:
return res; return res;
} }
inline uint32_t amdgpu_ras_eeprom_max_record_count(void) uint32_t amdgpu_ras_eeprom_max_record_count(void)
{ {
return RAS_MAX_RECORD_COUNT; return RAS_MAX_RECORD_COUNT;
} }

View File

@@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, const u32 num); struct eeprom_table_record *records, const u32 num);
inline uint32_t amdgpu_ras_eeprom_max_record_count(void); uint32_t amdgpu_ras_eeprom_max_record_count(void);
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);

View File

@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name, ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root, S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops); ring, &amdgpu_debugfs_ring_fops);
if (!ent) if (IS_ERR(ent))
return -ENOMEM; return PTR_ERR(ent);
i_size_write(ent->d_inode, ring->ring_size + 12); i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent; ring->ent = ent;

View File

@@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out; goto out;
} }
if (bo->type == ttm_bo_type_device &&
new_mem->mem_type == TTM_PL_VRAM &&
old_mem->mem_type != TTM_PL_VRAM) {
/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
* accesses the BO after it's moved.
*/
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
if (adev->mman.buffer_funcs_enabled) { if (adev->mman.buffer_funcs_enabled) {
if (((old_mem->mem_type == TTM_PL_SYSTEM && if (((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) || new_mem->mem_type == TTM_PL_VRAM) ||
@@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r; return r;
} }
if (bo->type == ttm_bo_type_device &&
new_mem->mem_type == TTM_PL_VRAM &&
old_mem->mem_type != TTM_PL_VRAM) {
/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
* accesses the BO after it's moved.
*/
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
out: out:
/* update statistics */ /* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved); atomic64_add(bo->base.size, &adev->num_bytes_moved);

View File

@@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 145,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 145,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 145,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 4, .num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED, .mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = false, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 1, .num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2, .num_sdma_queues_per_engine = 2,
@@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 1, .num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED, .mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = false, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 1, .num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2, .num_sdma_queues_per_engine = 2,
@@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
if (!kfd) if (!kfd)
return NULL; return NULL;
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
* supported.
*/
kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
if (device_info->needs_pci_atomics &&
!kfd->pci_atomic_requested) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics\n",
pdev->vendor, pdev->device);
kfree(kfd);
return NULL;
}
kfd->kgd = kgd; kfd->kgd = kgd;
kfd->device_info = device_info; kfd->device_info = device_info;
kfd->pdev = pdev; kfd->pdev = pdev;
@@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1; - kfd->vm_info.first_vmid_kfd + 1;
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
* supported.
*/
kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
if (!kfd->pci_atomic_requested &&
kfd->device_info->needs_pci_atomics &&
(!kfd->device_info->no_atomic_fw_version ||
kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics %d<%d\n",
kfd->pdev->vendor, kfd->pdev->device,
kfd->mec_fw_version,
kfd->device_info->no_atomic_fw_version);
return false;
}
/* Verify module parameters regarding mapped process number*/ /* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0) if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@@ -1057,18 +1069,22 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret; return ret;
} }
static int kfd_resume(struct kfd_dev *kfd) int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{ {
int err = 0; int err = 0;
err = kfd_iommu_resume(kfd); err = kfd_iommu_resume(kfd);
if (err) { if (err)
dev_err(kfd_device, dev_err(kfd_device,
"Failed to resume IOMMU for device %x:%x\n", "Failed to resume IOMMU for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->pdev->vendor, kfd->pdev->device);
return err; return err;
} }
static int kfd_resume(struct kfd_dev *kfd)
{
int err = 0;
err = kfd->dqm->ops.start(kfd->dqm); err = kfd->dqm->ops.start(kfd->dqm);
if (err) { if (err) {
dev_err(kfd_device, dev_err(kfd_device,

View File

@@ -207,6 +207,7 @@ struct kfd_device_info {
bool supports_cwsr; bool supports_cwsr;
bool needs_iommu_device; bool needs_iommu_device;
bool needs_pci_atomics; bool needs_pci_atomics;
uint32_t no_atomic_fw_version;
unsigned int num_sdma_engines; unsigned int num_sdma_engines;
unsigned int num_xgmi_sdma_engines; unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine; unsigned int num_sdma_queues_per_engine;

View File

@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
uint32_t agp_base, agp_bot, agp_top; uint32_t agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
memset(pa_config, 0, sizeof(*pa_config));
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
@@ -6024,6 +6026,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return 0; return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
if (dm->vblank_control_workqueue) {
work = kzalloc(sizeof(*work), GFP_ATOMIC); work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) if (!work)
return -ENOMEM; return -ENOMEM;
@@ -6039,6 +6042,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
} }
queue_work(dm->vblank_control_workqueue, &work->work); queue_work(dm->vblank_control_workqueue, &work->work);
}
#endif #endif
return 0; return 0;
@@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct dc_state *dc_state) struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
{ {
struct dc_stream_state *stream = NULL; struct dc_stream_state *stream = NULL;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_state *new_con_state; struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state; struct dm_connector_state *dm_conn_state;
int i, j, clock, bpp; int i, j, clock;
int vcpi, pbn_div, pbn = 0; int vcpi, pbn_div, pbn = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) { for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
} }
pbn_div = dm_mst_get_pbn_divider(stream->link); pbn_div = dm_mst_get_pbn_divider(stream->link);
bpp = stream->timing.dsc_cfg.bits_per_pixel;
clock = stream->timing.pix_clk_100hz / 10; clock = stream->timing.pix_clk_100hz / 10;
pbn = drm_dp_calc_pbn_mode(clock, bpp, true); /* pbn is calculated by compute_mst_dsc_configs_for_state*/
for (j = 0; j < dc_state->stream_count; j++) {
if (vars[j].aconnector == aconnector) {
pbn = vars[j].pbn;
break;
}
}
vcpi = drm_dp_mst_atomic_enable_dsc(state, vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port, aconnector->port,
pbn, pbn_div, pbn, pbn_div,
@@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
} }
} }
static void amdgpu_set_panel_orientation(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
const struct drm_display_mode *native_mode;
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
amdgpu_encoder = to_amdgpu_encoder(encoder);
native_mode = &amdgpu_encoder->native_mode;
if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
return;
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
native_mode->hdisplay,
native_mode->vdisplay);
}
static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
struct edid *edid) struct edid *edid)
{ {
@@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here. * restored here.
*/ */
amdgpu_dm_update_freesync_caps(connector, edid); amdgpu_dm_update_freesync_caps(connector, edid);
amdgpu_set_panel_orientation(connector);
} else { } else {
amdgpu_dm_connector->num_modes = 0; amdgpu_dm_connector->num_modes = 0;
} }
@@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled /* Stream removed and re-enabled
* hot-plug, headless s3, dpms *
* Can sometimes overlap with the HPD case,
* thus set update_hdcp to false to avoid
* setting HDCP multiple times.
*
* Handles: DESIRED -> DESIRED (Special case)
*/
if (!(old_state->crtc && old_state->crtc->enabled) &&
state->crtc && state->crtc->enabled &&
connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
dm_con_state->update_hdcp = false;
return true;
}
/* Hot-plug, headless s3, dpms
*
* Only start HDCP if the display is connected/enabled.
* update_hdcp flag will be set to false until the next
* HPD comes in.
* *
* Handles: DESIRED -> DESIRED (Special case) * Handles: DESIRED -> DESIRED (Special case)
*/ */
@@ -8648,6 +8705,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* If PSR or idle optimizations are enabled then flush out * If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming. * any pending work before hardware programming.
*/ */
if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue); flush_workqueue(dm->vblank_control_workqueue);
#endif #endif
@@ -8983,6 +9041,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* if there mode set or reset, disable eDP PSR */ /* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) { if (mode_set_reset_required) {
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue); flush_workqueue(dm->vblank_control_workqueue);
#endif #endif
amdgpu_dm_psr_disable_all(dm); amdgpu_dm_psr_disable_all(dm);
@@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int ret, i; int ret, i;
bool lock_and_validation_needed = false; bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state; struct dm_crtc_state *dm_old_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
#endif
trace_amdgpu_dm_atomic_check_begin(state); trace_amdgpu_dm_atomic_check_begin(state);
@@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail; goto fail;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
goto fail; goto fail;
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
if (ret) if (ret)
goto fail; goto fail;
#endif #endif
@@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail; goto fail;
status = dc_validate_global_state(dc, dm_state->context, false); status = dc_validate_global_state(dc, dm_state->context, false);
if (status != DC_OK) { if (status != DC_OK) {
DC_LOG_WARNING("DC global validation failure: %s (%d)", drm_dbg_atomic(dev,
"DC global validation failure: %s (%d)",
dc_status_to_str(status), status); dc_status_to_str(status), status);
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;

View File

@@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
uint32_t num_slices_h; uint32_t num_slices_h;
uint32_t num_slices_v; uint32_t num_slices_v;
uint32_t bpp_overwrite; uint32_t bpp_overwrite;
}; struct amdgpu_dm_connector *aconnector;
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
int bpp_x16;
}; };
static int kbps_to_peak_pbn(int kbps) static int kbps_to_peak_pbn(int kbps)
@@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state, struct dc_state *dc_state,
struct dc_link *dc_link) struct dc_link *dc_link,
struct dsc_mst_fairness_vars *vars)
{ {
int i; int i;
struct dc_stream_state *stream; struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES]; struct dsc_mst_fairness_params params[MAX_PIPES];
struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
int count = 0; int count = 0;
bool debugfs_overwrite = false; bool debugfs_overwrite = false;
@@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].timing = &stream->timing; params[count].timing = &stream->timing;
params[count].sink = stream->sink; params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].aconnector = aconnector;
params[count].port = aconnector->port; params[count].port = aconnector->port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
} }
/* Try no compression */ /* Try no compression */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vars[i].aconnector = params[i].aconnector;
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i].dsc_enabled = false; vars[i].dsc_enabled = false;
vars[i].bpp_x16 = 0; vars[i].bpp_x16 = 0;
@@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
} }
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state) struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
{ {
int i, j; int i, j;
struct dc_stream_state *stream; struct dc_stream_state *stream;
@@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
return false; return false;
mutex_lock(&aconnector->mst_mgr.lock); mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
mutex_unlock(&aconnector->mst_mgr.lock); mutex_unlock(&aconnector->mst_mgr.lock);
return false; return false;
} }

View File

@@ -39,8 +39,17 @@ void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
int bpp_x16;
struct amdgpu_dm_connector *aconnector;
};
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state); struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars);
#endif #endif
#endif #endif

View File

@@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
depth = *pcpu; depth = *pcpu;
put_cpu_ptr(&fpu_recursion_depth); put_cpu_ptr(&fpu_recursion_depth);
ASSERT(depth > 1); ASSERT(depth >= 1);
} }
/** /**

View File

@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
int dc_link_get_backlight_level(const struct dc_link *link) int dc_link_get_backlight_level(const struct dc_link *link)
{ {
struct abm *abm = get_abm_from_stream_res(link); struct abm *abm = get_abm_from_stream_res(link);
struct panel_cntl *panel_cntl = link->panel_cntl;
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
bool fw_set_brightness = true;
if (abm == NULL || abm->funcs->get_current_backlight == NULL) if (dmcu)
return DC_ERROR_UNEXPECTED; fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
return panel_cntl->funcs->get_current_backlight(panel_cntl);
else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
return (int) abm->funcs->get_current_backlight(abm); return (int) abm->funcs->get_current_backlight(abm);
else
return DC_ERROR_UNEXPECTED;
} }
int dc_link_get_target_backlight_pwm(const struct dc_link *link) int dc_link_get_target_backlight_pwm(const struct dc_link *link)

View File

@@ -1,4 +1,26 @@
/* Copyright 2015 Advanced Micro Devices, Inc. */ /*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*/
#include "dm_services.h" #include "dm_services.h"
#include "dc.h" #include "dc.h"
#include "dc_link_dp.h" #include "dc_link_dp.h"
@@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries(
dp_disable_link_phy(link, signal); dp_disable_link_phy(link, signal);
/* Abort link training if failure due to sink being unplugged. */ /* Abort link training if failure due to sink being unplugged. */
if (status == LINK_TRAINING_ABORT) if (status == LINK_TRAINING_ABORT) {
enum dc_connection_type type = dc_connection_none;
dc_link_detect_sink(link, &type);
if (type == dc_connection_none)
break; break;
else if (do_fallback) { } else if (do_fallback) {
decide_fallback_link_setting(*link_setting, &current_setting, status); decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets /* Fail link training if reduced link bandwidth no longer meets
* stream requirements. * stream requirements.

View File

@@ -49,7 +49,6 @@
static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
{ {
uint64_t current_backlight; uint64_t current_backlight;
uint32_t round_result;
uint32_t bl_period, bl_int_count; uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en; uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask; uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
current_backlight = div_u64(current_backlight, bl_period); current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1; current_backlight = (current_backlight + 1) >> 1;
current_backlight = (uint64_t)(current_backlight) * bl_period;
round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
round_result = (round_result >> (bl_int_count-1)) & 1;
current_backlight >>= bl_int_count;
current_backlight += round_result;
return (uint32_t)(current_backlight); return (uint32_t)(current_backlight);
} }

View File

@@ -33,63 +33,47 @@
#define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging #define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging
#define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible #define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible
#define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible #define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible
#define TABLE_COUNT 6 #define TABLE_SMU_METRICS 6 // Called by Driver
#define TABLE_COUNT 7
#define NUM_DSPCLK_LEVELS 8 typedef struct SmuMetricsTable_t {
#define NUM_SOCCLK_DPM_LEVELS 8 //CPU status
#define NUM_DCEFCLK_DPM_LEVELS 4 uint16_t CoreFrequency[6]; //[MHz]
#define NUM_FCLK_DPM_LEVELS 4 uint32_t CorePower[6]; //[mW]
#define NUM_MEMCLK_DPM_LEVELS 4 uint16_t CoreTemperature[6]; //[centi-Celsius]
uint16_t L3Frequency[2]; //[MHz]
uint16_t L3Temperature[2]; //[centi-Celsius]
uint16_t C0Residency[6]; //Percentage
#define NUMBER_OF_PSTATES 8 // GFX status
#define NUMBER_OF_CORES 8 uint16_t GfxclkFrequency; //[MHz]
uint16_t GfxTemperature; //[centi-Celsius]
typedef enum { // SOC IP info
S3_TYPE_ENTRY, uint16_t SocclkFrequency; //[MHz]
S5_TYPE_ENTRY, uint16_t VclkFrequency; //[MHz]
} Sleep_Type_e; uint16_t DclkFrequency; //[MHz]
uint16_t MemclkFrequency; //[MHz]
typedef enum { // power, VF info for CPU/GFX telemetry rails, and then socket power total
GFX_OFF = 0, uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX
GFX_ON = 1, uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX
} GFX_Mode_e; uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX
uint32_t CurrentSocketPower; //[mW]
typedef enum { uint16_t SocTemperature; //[centi-Celsius]
CPU_P0 = 0, uint16_t EdgeTemperature;
CPU_P1, uint16_t ThrottlerStatus;
CPU_P2, uint16_t Spare;
CPU_P3,
CPU_P4,
CPU_P5,
CPU_P6,
CPU_P7
} CPU_PState_e;
typedef enum { } SmuMetricsTable_t;
CPU_CORE0 = 0,
CPU_CORE1,
CPU_CORE2,
CPU_CORE3,
CPU_CORE4,
CPU_CORE5,
CPU_CORE6,
CPU_CORE7
} CORE_ID_e;
typedef enum { typedef struct SmuMetrics_t {
DF_DPM0 = 0, SmuMetricsTable_t Current;
DF_DPM1, SmuMetricsTable_t Average;
DF_DPM2, uint32_t SampleStartTime;
DF_DPM3, uint32_t SampleStopTime;
DF_PState_Count uint32_t Accnt;
} DF_PState_e; } SmuMetrics_t;
typedef enum {
GFX_DPM0 = 0,
GFX_DPM1,
GFX_DPM2,
GFX_DPM3,
GFX_PState_Count
} GFX_PState_e;
#endif #endif

View File

@@ -226,7 +226,10 @@
__SMU_DUMMY_MAP(SetUclkDpmMode), \ __SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \ __SMU_DUMMY_MAP(LightSBR), \
__SMU_DUMMY_MAP(GfxDriverResetRecovery), \ __SMU_DUMMY_MAP(GfxDriverResetRecovery), \
__SMU_DUMMY_MAP(BoardPowerCalibration), __SMU_DUMMY_MAP(BoardPowerCalibration), \
__SMU_DUMMY_MAP(RequestGfxclk), \
__SMU_DUMMY_MAP(ForceGfxVid), \
__SMU_DUMMY_MAP(UnforceGfxVid),
#undef __SMU_DUMMY_MAP #undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type #define __SMU_DUMMY_MAP(type) SMU_MSG_##type

View File

@@ -65,6 +65,13 @@
#define PPSMC_MSG_SetDriverTableVMID 0x34 #define PPSMC_MSG_SetDriverTableVMID 0x34
#define PPSMC_MSG_SetSoftMinCclk 0x35 #define PPSMC_MSG_SetSoftMinCclk 0x35
#define PPSMC_MSG_SetSoftMaxCclk 0x36 #define PPSMC_MSG_SetSoftMaxCclk 0x36
#define PPSMC_Message_Count 0x37 #define PPSMC_MSG_GetGfxFrequency 0x37
#define PPSMC_MSG_GetGfxVid 0x38
#define PPSMC_MSG_ForceGfxFreq 0x39
#define PPSMC_MSG_UnForceGfxFreq 0x3A
#define PPSMC_MSG_ForceGfxVid 0x3B
#define PPSMC_MSG_UnforceGfxVid 0x3C
#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D
#define PPSMC_Message_Count 0x3E
#endif #endif

View File

@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/ */
if (smu->uploading_custom_pp_table && if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) && (adev->asic_type >= CHIP_NAVI10) &&
(adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) (adev->asic_type <= CHIP_BEIGE_GOBY))
return smu_disable_all_features_with_exception(smu, return smu_disable_all_features_with_exception(smu,
true, true,
SMU_FEATURE_COUNT); SMU_FEATURE_COUNT);

View File

@@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = NULL; struct smu_11_0_dpm_context *dpm_context = NULL;
uint32_t gen_speed, lane_width; uint32_t gen_speed, lane_width;
if (amdgpu_ras_intr_triggered()) smu_cmn_get_sysfs_buf(&buf, &size);
return sysfs_emit(buf, "unavailable\n");
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
return size;
}
dpm_context = smu_dpm->dpm_context; dpm_context = smu_dpm->dpm_context;

View File

@@ -44,6 +44,27 @@
#undef pr_info #undef pr_info
#undef pr_debug #undef pr_debug
/* unit: MHz */
#define CYAN_SKILLFISH_SCLK_MIN 1000
#define CYAN_SKILLFISH_SCLK_MAX 2000
#define CYAN_SKILLFISH_SCLK_DEFAULT 1800
/* unit: mV */
#define CYAN_SKILLFISH_VDDC_MIN 700
#define CYAN_SKILLFISH_VDDC_MAX 1129
#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe
static struct gfx_user_settings {
uint32_t sclk;
uint32_t vddc;
} cyan_skillfish_user_settings;
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \
FEATURE_MASK(FEATURE_GFX_DPM_BIT))
static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
@@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0),
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0),
MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0),
MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0),
}; };
static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(SMU_METRICS),
};
static int cyan_skillfish_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
sizeof(SmuMetrics_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
smu_table->metrics_time = 0;
return 0;
err1_out:
smu_table->gpu_metrics_table_size = 0;
kfree(smu_table->metrics_table);
err0_out:
return -ENOMEM;
}
static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
{
int ret = 0;
ret = cyan_skillfish_tables_init(smu);
if (ret)
return ret;
return smu_v11_0_init_smc_tables(smu);
}
static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
kfree(smu_table->metrics_table);
smu_table->metrics_table = NULL;
kfree(smu_table->gpu_metrics_table);
smu_table->gpu_metrics_table = NULL;
smu_table->gpu_metrics_table_size = 0;
smu_table->metrics_time = 0;
return 0;
}
static int
cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
mutex_lock(&smu->metrics_lock);
ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
if (ret) {
mutex_unlock(&smu->metrics_lock);
return ret;
}
switch (member) {
case METRICS_CURR_GFXCLK:
*value = metrics->Current.GfxclkFrequency;
break;
case METRICS_CURR_SOCCLK:
*value = metrics->Current.SocclkFrequency;
break;
case METRICS_CURR_VCLK:
*value = metrics->Current.VclkFrequency;
break;
case METRICS_CURR_DCLK:
*value = metrics->Current.DclkFrequency;
break;
case METRICS_CURR_UCLK:
*value = metrics->Current.MemclkFrequency;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->Current.CurrentSocketPower << 8) /
1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->Current.GfxTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_HOTSPOT:
*value = metrics->Current.SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Current.Voltage[0];
break;
case METRICS_VOLTAGE_VDDGFX:
*value = metrics->Current.Voltage[1];
break;
case METRICS_THROTTLER_STATUS:
*value = metrics->Current.ThrottlerStatus;
break;
default:
*value = UINT_MAX;
break;
}
mutex_unlock(&smu->metrics_lock);
return ret;
}
static int cyan_skillfish_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data,
uint32_t *size)
{
int ret = 0;
if (!data || !size)
return -EINVAL;
mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_CURR_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDNB:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDGFX:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data);
*size = 4;
break;
default:
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&smu->sensor_lock);
return ret;
}
static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
MetricsMember_t member_type;
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
member_type = METRICS_CURR_GFXCLK;
break;
case SMU_FCLK:
case SMU_MCLK:
member_type = METRICS_CURR_UCLK;
break;
case SMU_SOCCLK:
member_type = METRICS_CURR_SOCCLK;
break;
case SMU_VCLK:
member_type = METRICS_CURR_VCLK;
break;
case SMU_DCLK:
member_type = METRICS_CURR_DCLK;
break;
default:
return -EINVAL;
}
return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
}
static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
int ret = 0, size = 0;
uint32_t cur_value = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) {
case SMU_OD_SCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
if (ret)
return ret;
size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
break;
case SMU_OD_VDDC_CURVE:
ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
if (ret)
return ret;
size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
break;
case SMU_OD_RANGE:
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
break;
case SMU_GFXCLK:
case SMU_SCLK:
case SMU_FCLK:
case SMU_MCLK:
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
return ret;
size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
break;
default:
dev_warn(smu->adev->dev, "Unsupported clock type\n");
return ret;
}
return size;
}
static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t feature_mask[2];
uint64_t feature_enabled;
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
return false;
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
if (ret)
return false;
feature_enabled = (uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32);
return !!(feature_enabled & SMC_DPM_FEATURE);
}
static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v2_2 *gpu_metrics =
(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
int i, ret = 0;
ret = smu_cmn_get_metrics_table(smu, &metrics, true);
if (ret)
return ret;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
gpu_metrics->average_soc_power = metrics.Current.Power[0];
gpu_metrics->average_gfx_power = metrics.Current.Power[1];
gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
for (i = 0; i < 6; i++) {
gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
}
for (i = 0; i < 2; i++) {
gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
}
gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v2_2);
}
static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
int ret = 0;
uint32_t vid;
switch (type) {
case PP_OD_EDIT_VDDC_CURVE:
if (size != 3 || input[0] != 0) {
dev_err(smu->adev->dev, "Invalid parameter!\n");
return -EINVAL;
}
if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
input[1] > CYAN_SKILLFISH_SCLK_MAX) {
dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
return -EINVAL;
}
if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
input[2] > CYAN_SKILLFISH_VDDC_MAX) {
dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
return -EINVAL;
}
cyan_skillfish_user_settings.sclk = input[1];
cyan_skillfish_user_settings.vddc = input[2];
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
dev_err(smu->adev->dev, "Invalid parameter!\n");
return -EINVAL;
}
cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
break;
case PP_OD_COMMIT_DPM_TABLE:
if (size != 0) {
dev_err(smu->adev->dev, "Invalid parameter!\n");
return -EINVAL;
}
if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
return -EINVAL;
}
if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
(cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
cyan_skillfish_user_settings.sclk, NULL);
if (ret) {
dev_err(smu->adev->dev, "Set sclk failed!\n");
return ret;
}
if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
if (ret) {
dev_err(smu->adev->dev, "Unforce vddc failed!\n");
return ret;
}
} else {
/*
* PMFW accepts SVI2 VID code, convert voltage to VID:
* vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
*/
vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
if (ret) {
dev_err(smu->adev->dev, "Force vddc failed!\n");
return ret;
}
}
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static const struct pptable_funcs cyan_skillfish_ppt_funcs = { static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status, .check_fw_status = smu_v11_0_check_fw_status,
.check_fw_version = smu_v11_0_check_fw_version, .check_fw_version = smu_v11_0_check_fw_version,
.init_power = smu_v11_0_init_power, .init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power, .fini_power = smu_v11_0_fini_power,
.init_smc_tables = cyan_skillfish_init_smc_tables,
.fini_smc_tables = cyan_skillfish_finit_smc_tables,
.read_sensor = cyan_skillfish_read_sensor,
.print_clk_levels = cyan_skillfish_print_clk_levels,
.is_dpm_running = cyan_skillfish_is_dpm_running,
.get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
.od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
.register_irq_handler = smu_v11_0_register_irq_handler, .register_irq_handler = smu_v11_0_register_irq_handler,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
{ {
smu->ppt_funcs = &cyan_skillfish_ppt_funcs; smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
smu->message_map = cyan_skillfish_message_map; smu->message_map = cyan_skillfish_message_map;
smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true; smu->is_apu = true;
} }

View File

@@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
struct smu_11_0_overdrive_table *od_settings = smu->od_settings; struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
uint32_t min_value, max_value; uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
@@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (!smu->od_enabled || !od_table || !od_settings) if (!smu->od_enabled || !od_table || !od_settings)
break; break;
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) { if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) /*
* This aims the case below:
* amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
*
* For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
* make that possible, PMFW needs to acknowledge the dstate transition
* process for both gfx(function 0) and audio(function 1) function of
* the ASIC.
*
* The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
* device representing the audio function of the ASIC. And that means
* even if the sound driver(snd_hda_intel) was not loaded yet, it's still
* possible runpm suspend kicked on the ASIC. However without the dstate
* transition notification from audio function, pmfw cannot handle the
* BACO in/exit correctly. And that will cause driver hang on runpm
* resuming.
*
* To address this, we revert to legacy message way(driver masters the
* timing for BACO in/exit) on sound driver missing.
*/
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else else
return smu_v11_0_baco_enter(smu); return smu_v11_0_baco_enter(smu);
@@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) { if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */ /* Wait for PMFW handling for the Dstate change */
msleep(10); msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);

View File

@@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value; uint32_t min_value, max_value;
uint32_t smu_version; uint32_t smu_version;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
@@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (!smu->od_enabled || !od_table || !od_settings) if (!smu->od_enabled || !od_table || !od_settings)
break; break;
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) { if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN, sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else else
return smu_v11_0_baco_enter(smu); return smu_v11_0_baco_enter(smu);
@@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) { if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */ /* Wait for PMFW handling for the Dstate change */
msleep(10); msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);

View File

@@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_CCLK: case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_CCLK: case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",

View File

@@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {

View File

@@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
uint32_t freq_values[3] = {0}; uint32_t freq_values[3] = {0};
uint32_t min_clk, max_clk; uint32_t min_clk, max_clk;
if (amdgpu_ras_intr_triggered()) smu_cmn_get_sysfs_buf(&buf, &size);
return sysfs_emit(buf, "unavailable\n");
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
return size;
}
dpm_context = smu_dpm->dpm_context; dpm_context = smu_dpm->dpm_context;
switch (type) { switch (type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
size = sysfs_emit(buf, "%s:\n", "GFXCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
fallthrough; fallthrough;
case SMU_SCLK: case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_MCLK: case SMU_OD_MCLK:
size = sysfs_emit(buf, "%s:\n", "MCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
fallthrough; fallthrough;
case SMU_MCLK: case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);

View File

@@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0; int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break; break;
case SMU_OD_RANGE: case SMU_OD_RANGE:
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
break; break;

View File

@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret; return ret;
} }
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
bool snd_driver_loaded;
/*
* If the ASIC comes with no audio function, we always assume
* it is "enabled".
*/
p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
adev->pdev->bus->number, 1);
if (!p)
return true;
snd_driver_loaded = pci_is_enabled(p) ? true : false;
pci_dev_put(p);
return snd_driver_loaded;
}

View File

@@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu, int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state); enum pp_mp1_state mp1_state);
/*
* Helper function to make sysfs_emit_at() happy. Align buf to
* the current page boundary and record the offset.
*/
static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
{
if (!*buf || !offset)
return;
*offset = offset_in_page(*buf);
*buf -= *offset;
}
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
#endif #endif
#endif #endif

View File

@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_mmu_context) { if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context; struct etnaviv_iommu_context *old_context = gpu->mmu_context;
etnaviv_iommu_context_get(mmu_context); gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
gpu->mmu_context = mmu_context;
etnaviv_iommu_context_put(old_context); etnaviv_iommu_context_put(old_context);
} }

View File

@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node); list_del(&mapping->obj_node);
} }
etnaviv_iommu_context_get(mmu_context); mapping->context = etnaviv_iommu_context_get(mmu_context);
mapping->context = mmu_context;
mapping->use = 1; mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,

View File

@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects; goto err_submit_objects;
submit->ctx = file->driver_priv; submit->ctx = file->driver_priv;
etnaviv_iommu_context_get(submit->ctx->mmu); submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->mmu_context = submit->ctx->mmu;
submit->exec_state = args->exec_state; submit->exec_state = args->exec_state;
submit->flags = args->flags; submit->flags = args->flags;

View File

@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */ /* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu); etnaviv_gpu_update_clock(gpu);
gpu->fe_running = false;
gpu->exec_state = -1;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = NULL;
return 0; return 0;
} }
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
} }
gpu->fe_running = true;
} }
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{ {
u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping);
u16 prefetch; u16 prefetch;
u32 address;
/* setup the MMU */ /* setup the MMU */
etnaviv_iommu_restore(gpu, gpu->mmu_context); etnaviv_iommu_restore(gpu, context);
/* Start command processor */ /* Start command processor */
prefetch = etnaviv_buffer_init(gpu); prefetch = etnaviv_buffer_init(gpu);
address = etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch); etnaviv_gpu_start_fe(gpu, address, prefetch);
} }
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Now program the hardware */ /* Now program the hardware */
mutex_lock(&gpu->lock); mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu); etnaviv_gpu_hw_init(gpu);
gpu->exec_state = -1;
mutex_unlock(&gpu->lock); mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev); pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
spin_unlock(&gpu->event_spinlock); spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu); etnaviv_gpu_hw_init(gpu);
gpu->exec_state = -1;
gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock); mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev); pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock; goto out_unlock;
} }
if (!gpu->mmu_context) { if (!gpu->fe_running)
etnaviv_iommu_context_get(submit->mmu_context); etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
gpu->mmu_context = submit->mmu_context;
etnaviv_gpu_start_fe_idleloop(gpu); if (submit->prev_mmu_context)
} else { etnaviv_iommu_context_put(submit->prev_mmu_context);
etnaviv_iommu_context_get(gpu->mmu_context); submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
submit->prev_mmu_context = gpu->mmu_context;
}
if (submit->nr_pmrs) { if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{ {
if (gpu->initialized && gpu->mmu_context) { if (gpu->initialized && gpu->fe_running) {
/* Replace the last WAIT with END */ /* Replace the last WAIT with END */
mutex_lock(&gpu->lock); mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu); etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/ */
etnaviv_gpu_wait_idle(gpu, 100); etnaviv_gpu_wait_idle(gpu, 100);
etnaviv_iommu_context_put(gpu->mmu_context); gpu->fe_running = false;
gpu->mmu_context = NULL;
} }
gpu->exec_state = -1; gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu); etnaviv_gpu_hw_suspend(gpu);
#endif #endif
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
if (gpu->initialized) { if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu); etnaviv_iommu_global_fini(gpu);

View File

@@ -101,6 +101,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct drm_gpu_scheduler sched; struct drm_gpu_scheduler sched;
bool initialized; bool initialized;
bool fe_running;
/* 'ring'-buffer: */ /* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer; struct etnaviv_cmdbuf buffer;

View File

@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable; u32 pgtable;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
/* set base addresses */ /* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);

View File

@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return; return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
prefetch = etnaviv_buffer_config_mmuv2(gpu, prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma, (u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma); (u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return; return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma)); lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,

View File

@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
*/ */
list_for_each_entry_safe(m, n, &list, scan_node) { list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_remove_mapping(context, m); etnaviv_iommu_remove_mapping(context, m);
etnaviv_iommu_context_put(m->context);
m->context = NULL; m->context = NULL;
list_del_init(&m->mmu_node); list_del_init(&m->mmu_node);
list_del_init(&m->scan_node); list_del_init(&m->scan_node);

View File

@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
struct etnaviv_iommu_context * struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc); struct etnaviv_cmdbuf_suballoc *suballoc);
static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) static inline struct etnaviv_iommu_context *
etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{ {
kref_get(&ctx->refcount); kref_get(&ctx->refcount);
return ctx;
} }
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,

View File

@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings # clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare) subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror

View File

@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/ */
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd)) sizeof(intel_dp->edp_dpcd)) {
drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd), (int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd); intel_dp->edp_dpcd);
intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
}
/* /*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]

View File

@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
} }
if (ret) if (ret)
intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
if (intel_dp->set_idle_link_train) if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state); intel_dp->set_idle_link_train(intel_dp, crtc_state);

View File

@@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
trace_i915_context_free(ctx); trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex); mutex_destroy(&ctx->lut_mutex);
@@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
if (vm) if (vm)
i915_vm_close(vm); i915_vm_close(vm);
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
ctx->file_priv = ERR_PTR(-EBADF); ctx->file_priv = ERR_PTR(-EBADF);
/* /*

View File

@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
err = PTR_ERR(import); err = PTR_ERR(import);
goto out_dmabuf; goto out_dmabuf;
} }
import_obj = to_intel_bo(import);
if (import != &obj->base) { if (import != &obj->base) {
pr_err("i915_gem_prime_import created a new object!\n"); pr_err("i915_gem_prime_import created a new object!\n");
err = -EINVAL; err = -EINVAL;
goto out_import; goto out_import;
} }
import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL); i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj); err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n", pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
PTR_ERR(import)); PTR_ERR(import));
err = PTR_ERR(import); err = PTR_ERR(import);
} else {
err = 0;
} }
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
err = PTR_ERR(import); err = PTR_ERR(import);
goto out_dmabuf; goto out_dmabuf;
} }
import_obj = to_intel_bo(import);
if (import == &obj->base) { if (import == &obj->base) {
pr_err("i915_gem_prime_import reused gem object!\n"); pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import; goto out_import;
} }
import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL); i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj); err = __i915_gem_object_get_pages(import_obj);
if (err) { if (err) {

View File

@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
return I915_MMAP_TYPE_GTT; return I915_MMAP_TYPE_GTT;
} }
static struct drm_i915_gem_object *
create_sys_or_internal(struct drm_i915_private *i915,
unsigned long size)
{
if (HAS_LMEM(i915)) {
struct intel_memory_region *sys_region =
i915->mm.regions[INTEL_REGION_SMEM];
return __i915_gem_object_create_user(i915, size, &sys_region, 1);
}
return i915_gem_object_create_internal(i915, size);
}
static bool assert_mmap_offset(struct drm_i915_private *i915, static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size, unsigned long size,
int expected) int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
u64 offset; u64 offset;
int ret; int ret;
obj = i915_gem_object_create_internal(i915, size); obj = create_sys_or_internal(i915, size);
if (IS_ERR(obj)) if (IS_ERR(obj))
return expected && expected == PTR_ERR(obj); return expected && expected == PTR_ERR(obj);
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm_node *hole, *next; struct drm_mm_node *hole, *next;
int loop, err = 0; int loop, err = 0;
u64 offset; u64 offset;
int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
/* Disable background reaper */ /* Disable background reaper */
disable_retire_worker(i915); disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
} }
/* Too large */ /* Too large */
if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
/* Fill the hole, further allocation attempts should then fail */ /* Fill the hole, further allocation attempts should then fail */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = create_sys_or_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n"); pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj; goto err_obj;
} }
if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL; err = -EINVAL;
goto err_obj; goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map; bool no_map;
if (HAS_LMEM(i915)) if (obj->ops->mmap_offset)
return type == I915_MMAP_TYPE_FIXED; return type == I915_MMAP_TYPE_FIXED;
else if (type == I915_MMAP_TYPE_FIXED) else if (type == I915_MMAP_TYPE_FIXED)
return false; return false;

View File

@@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
u32 intel_rps_read_punit_req(struct intel_rps *rps) u32 intel_rps_read_punit_req(struct intel_rps *rps)
{ {
struct intel_uncore *uncore = rps_to_uncore(rps); struct intel_uncore *uncore = rps_to_uncore(rps);
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
intel_wakeref_t wakeref;
u32 freq = 0;
return intel_uncore_read(uncore, GEN6_RPNSWREQ); with_intel_runtime_pm_if_in_use(rpm, wakeref)
freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
return freq;
} }
static u32 intel_rps_get_req(u32 pureq) static u32 intel_rps_get_req(u32 pureq)

View File

@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
__uc_free_load_err_log(uc); __uc_free_load_err_log(uc);
} }
static inline bool guc_communication_enabled(struct intel_guc *guc)
{
return intel_guc_ct_enabled(&guc->ct);
}
/* /*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our * register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc) static void guc_handle_mmio_msg(struct intel_guc *guc)
{ {
/* we need communication to be enabled to reply to GuC */ /* we need communication to be enabled to reply to GuC */
GEM_BUG_ON(!guc_communication_enabled(guc)); GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
spin_lock_irq(&guc->irq_lock); spin_lock_irq(&guc->irq_lock);
if (guc->mmio_msg) { if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
struct drm_i915_private *i915 = gt->i915; struct drm_i915_private *i915 = gt->i915;
int ret; int ret;
GEM_BUG_ON(guc_communication_enabled(guc)); GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
ret = i915_inject_probe_error(i915, -ENXIO); ret = i915_inject_probe_error(i915, -ENXIO);
if (ret) if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return 0; return 0;
/* Make sure we enable communication if and only if it's disabled */ /* Make sure we enable communication if and only if it's disabled */
GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
if (enable_communication) if (enable_communication)
guc_enable_communication(guc); guc_enable_communication(guc);

View File

@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
#endif #endif
if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
rdev->agp = radeon_agp_head_init(rdev->ddev); rdev->agp = radeon_agp_head_init(dev);
if (rdev->agp) { if (rdev->agp) {
rdev->agp->agp_mtrr = arch_phys_wc_add( rdev->agp->agp_mtrr = arch_phys_wc_add(
rdev->agp->agp_info.aper_base, rdev->agp->agp_info.aper_base,

View File

@@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev)
return ret; return ret;
} }
static int cdn_dp_resume(struct device *dev) static __maybe_unused int cdn_dp_resume(struct device *dev)
{ {
struct cdn_dp_device *dp = dev_get_drvdata(dev); struct cdn_dp_device *dp = dev_get_drvdata(dev);

View File

@@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else else
gfp_flags |= GFP_HIGHUSER; gfp_flags |= GFP_HIGHUSER;
for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) { order = min_t(unsigned int, order, __fls(num_pages))) {
bool apply_caching = false; bool apply_caching = false;
struct ttm_pool_type *pt; struct ttm_pool_type *pt;

View File

@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
mutex_unlock(&ring_info->ring_buffer_mutex); mutex_unlock(&ring_info->ring_buffer_mutex);
kfree(ring_info->pkt_buffer); kfree(ring_info->pkt_buffer);
ring_info->pkt_buffer = NULL;
ring_info->pkt_buffer_size = 0; ring_info->pkt_buffer_size = 0;
} }

View File

@@ -570,7 +570,7 @@ fail_msg_node:
fail_db_node: fail_db_node:
of_node_put(smu->db_node); of_node_put(smu->db_node);
fail_bootmem: fail_bootmem:
memblock_free(__pa(smu), sizeof(struct smu_device)); memblock_free_ptr(smu, sizeof(struct smu_device));
smu = NULL; smu = NULL;
fail_np: fail_np:
of_node_put(np); of_node_put(np);

View File

@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
reset_control_assert(gphy_fw->reset); reset_control_assert(gphy_fw->reset);
/* The vendor BSP uses a 200ms delay after asserting the reset line.
* Without this some users are observing that the PHY is not coming up
* on the MDIO bus.
*/
msleep(200);
ret = request_firmware(&fw, gphy_fw->fw_name, dev); ret = request_firmware(&fw, gphy_fw->fw_name, dev);
if (ret) { if (ret) {
dev_err(dev, "failed to load firmware: %s, error: %i\n", dev_err(dev, "failed to load firmware: %s, error: %i\n",

View File

@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
} }
static int static int
qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
{ {
struct qca8k_priv *priv = salve_bus->priv;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page; u16 r1, r2, page;
u32 val; u32 val;
int ret; int ret;
@@ -682,10 +680,8 @@ exit:
} }
static int static int
qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
{ {
struct qca8k_priv *priv = salve_bus->priv;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page; u16 r1, r2, page;
u32 val; u32 val;
int ret; int ret;
@@ -726,6 +722,24 @@ exit:
return ret; return ret;
} }
static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
struct mii_bus *bus = priv->bus;
return qca8k_mdio_write(bus, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
struct mii_bus *bus = priv->bus;
return qca8k_mdio_read(bus, phy, regnum);
}
static int static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{ {
@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
bus->priv = (void *)priv; bus->priv = (void *)priv;
bus->name = "qca8k slave mii"; bus->name = "qca8k slave mii";
bus->read = qca8k_mdio_read; bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_mdio_write; bus->write = qca8k_internal_mdio_write;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
ds->index); ds->index);

View File

@@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
#ifdef VORTEX_BUS_MASTER #ifdef VORTEX_BUS_MASTER
if (vp->bus_master) { if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */ /* Set the bus-master controller to transfer the packet. */
outl((int) (skb->data), ioaddr + Wn7_MasterAddr); outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb; vp->tx_skb = skb;
outw(StartDMADown, ioaddr + EL3_CMD); outw(StartDMADown, ioaddr + EL3_CMD);

View File

@@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
} }
} }
#ifdef MODULE
static int __init ne_init(void) static int __init ne_init(void)
{ {
int retval; int retval;
if (IS_MODULE(CONFIG_NE2000))
ne_add_devices(); ne_add_devices();
retval = platform_driver_probe(&ne_driver, ne_drv_probe); retval = platform_driver_probe(&ne_driver, ne_drv_probe);
if (retval) {
if (IS_MODULE(CONFIG_NE2000) && retval) {
if (io[0] == 0) if (io[0] == 0)
pr_notice("ne.c: You must supply \"io=0xNNN\"" pr_notice("ne.c: You must supply \"io=0xNNN\""
" value(s) for ISA cards.\n"); " value(s) for ISA cards.\n");
@@ -941,18 +944,8 @@ static int __init ne_init(void)
return retval; return retval;
} }
module_init(ne_init); module_init(ne_init);
#else /* MODULE */
static int __init ne_init(void)
{
int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
/* Unregister unused platform_devices. */ #if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
ne_loop_rm_unreg(0);
return retval;
}
module_init(ne_init);
#ifdef CONFIG_NETDEV_LEGACY_INIT
struct net_device * __init ne_probe(int unit) struct net_device * __init ne_probe(int unit)
{ {
int this_dev; int this_dev;
@@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
} }
#endif #endif
#endif /* MODULE */
static void __exit ne_exit(void) static void __exit ne_exit(void)
{ {

View File

@@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
#ifdef XMT_VIA_SKB #ifdef XMT_VIA_SKB
skb_save[i] = p->tmd_skb[i]; skb_save[i] = p->tmd_skb[i];
#endif #endif
buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
blen[i] = tmdp->blen; blen[i] = tmdp->blen;
tmdp->u.s.status = 0x0; tmdp->u.s.status = 0x0;
} }

View File

@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SR-IOV capability was enabled but there are no VFs*/ /* SR-IOV capability was enabled but there are no VFs*/
if (iov->total == 0) { if (iov->total == 0) {
err = -EINVAL; err = 0;
goto failed; goto failed;
} }

View File

@@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
DIV_ROUND_UP(fw_health->polling_dsecs * HZ, DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10); bp->current_interval * 10);
fw_health->tmr_counter = fw_health->tmr_multiplier; fw_health->tmr_counter = fw_health->tmr_multiplier;
if (!fw_health->enabled) { if (!fw_health->enabled)
fw_health->last_fw_heartbeat = fw_health->last_fw_heartbeat =
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt = fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
}
netif_info(bp, drv, bp->dev, netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt, fw_health->master, fw_health->last_fw_reset_cnt,
@@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j; int j;
if (!txr->tx_buf_ring)
continue;
for (j = 0; j < max_idx;) { for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb; struct sk_buff *skb;
@@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
} }
skip_rx_tpa_free: skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
goto skip_rx_buf_free;
for (i = 0; i < max_idx; i++) { for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
dma_addr_t mapping = rx_buf->mapping; dma_addr_t mapping = rx_buf->mapping;
@@ -2836,6 +2841,11 @@ skip_rx_tpa_free:
kfree(data); kfree(data);
} }
} }
skip_rx_buf_free:
if (!rxr->rx_agg_ring)
goto skip_rx_agg_free;
for (i = 0; i < max_agg_idx; i++) { for (i = 0; i < max_agg_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page; struct page *page = rx_agg_buf->page;
@@ -2852,6 +2862,8 @@ skip_rx_tpa_free:
__free_page(page); __free_page(page);
} }
skip_rx_agg_free:
if (rxr->rx_page) { if (rxr->rx_page) {
__free_page(rxr->rx_page); __free_page(rxr->rx_page);
rxr->rx_page = NULL; rxr->rx_page = NULL;
@@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
struct pci_dev *pdev = bp->pdev; struct pci_dev *pdev = bp->pdev;
int i; int i;
if (!rmem->pg_arr)
goto skip_pages;
for (i = 0; i < rmem->nr_pages; i++) { for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i]) if (!rmem->pg_arr[i])
continue; continue;
@@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL; rmem->pg_arr[i] = NULL;
} }
skip_pages:
if (rmem->pg_tbl) { if (rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8; size_t pg_tbl_size = rmem->nr_pages * 8;
@@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
{ {
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
kfree(cpr->cp_desc_ring); kfree(cpr->cp_desc_ring);
cpr->cp_desc_ring = NULL; cpr->cp_desc_ring = NULL;
ring->ring_mem.pg_arr = NULL;
kfree(cpr->cp_desc_mapping); kfree(cpr->cp_desc_mapping);
cpr->cp_desc_mapping = NULL; cpr->cp_desc_mapping = NULL;
ring->ring_mem.dma_arr = NULL;
} }
static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return; return;
} }
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
bp->fw_health->enabled) {
bp->fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
}
bp->fw_reset_state = 0; bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */ /* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic(); smp_mb__before_atomic();

View File

@@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
{ {
struct bnxt_flower_indr_block_cb_priv *cb_priv; struct bnxt_flower_indr_block_cb_priv *cb_priv;
/* All callback list access should be protected by RTNL. */
ASSERT_RTNL();
list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
if (cb_priv->tunnel_netdev == netdev) if (cb_priv->tunnel_netdev == netdev)
return cb_priv; return cb_priv;

View File

@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
struct platform_device *plat_dev = pci_get_drvdata(pdev); struct platform_device *plat_dev = pci_get_drvdata(pdev);
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev); struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
platform_device_unregister(plat_dev);
clk_unregister(plat_data->pclk); clk_unregister(plat_data->pclk);
clk_unregister(plat_data->hclk); clk_unregister(plat_data->hclk);
platform_device_unregister(plat_dev);
} }
static const struct pci_device_id dev_id_table[] = { static const struct pci_device_id dev_id_table[] = {

View File

@@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600); module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
static bool page_pool_enabled = true;
module_param(page_pool_enabled, bool, 0400);
#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
sizeof(struct sg_table)) sizeof(struct sg_table))
#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
#define HNS3_OUTER_VLAN_TAG 2 #define HNS3_OUTER_VLAN_TAG 2
#define HNS3_MIN_TX_LEN 33U #define HNS3_MIN_TX_LEN 33U
#define HNS3_MIN_TUN_PKT_LEN 65U
/* hns3_pci_tbl - PCI Device ID Table /* hns3_pci_tbl - PCI Device ID Table
* *
@@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
l4.tcp->doff); l4.tcp->doff);
break; break;
case IPPROTO_UDP: case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb)) if (hns3_tunnel_csum_bug(skb)) {
return skb_checksum_help(skb); int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
return ret ? ret : skb_checksum_help(skb);
}
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -4753,6 +4760,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
goto out_with_desc_cb; goto out_with_desc_cb;
if (!HNAE3_IS_TX_RING(ring)) { if (!HNAE3_IS_TX_RING(ring)) {
if (page_pool_enabled)
hns3_alloc_page_pool(ring); hns3_alloc_page_pool(ring);
ret = hns3_alloc_ring_buffers(ring); ret = hns3_alloc_ring_buffers(ring);

View File

@@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
} }
bd_num = le32_to_cpu(req->bd_num); bd_num = le32_to_cpu(req->bd_num);
if (!bd_num) {
dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
return -EINVAL;
}
desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc_src) if (!desc_src)

View File

@@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg; struct hclge_cfg cfg;
unsigned int i; unsigned int i;
int ret; int node, ret;
ret = hclge_get_cfg(hdev, &cfg); ret = hclge_get_cfg(hdev, &cfg);
if (ret) if (ret)
@@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev); hclge_init_kdump_kernel_config(hdev);
/* Set the init affinity based on pci func number */ /* Set the affinity based on numa node */
i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); node = dev_to_node(&hdev->pdev->dev);
i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; if (node != NUMA_NO_NODE)
cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), cpumask = cpumask_of_node(node);
&hdev->affinity_mask);
cpumask_copy(&hdev->affinity_mask, cpumask);
return ret; return ret;
} }
@@ -8125,11 +8127,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_clear_arfs_rules(hdev); hclge_clear_arfs_rules(hdev);
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC, /* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here. * so it only need to stop phy here.
*/ */
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET) { hdev->reset_type != HNAE3_FUNC_RESET &&
hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev); hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
return; return;

View File

@@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
hclgevf_enable_vector(&hdev->misc_vector, false); hclgevf_enable_vector(&hdev->misc_vector, false);
event_cause = hclgevf_check_evt_cause(hdev, &clearval); event_cause = hclgevf_check_evt_cause(hdev, &clearval);
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_clear_event_cause(hdev, clearval);
switch (event_cause) { switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST: case HCLGEVF_VECTOR0_EVENT_RST:
@@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break; break;
} }
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_clear_event_cause(hdev, clearval);
hclgevf_enable_vector(&hdev->misc_vector, true); hclgevf_enable_vector(&hdev->misc_vector, true);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void)
err = -ENODEV; err = -ENODEV;
goto out; goto out;
} }
memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
dev->base_addr = MVME_I596_BASE; dev->base_addr = MVME_I596_BASE;
dev->irq = (unsigned) MVME16x_IRQ_I596; dev->irq = (unsigned) MVME16x_IRQ_I596;
goto found; goto found;

View File

@@ -4700,6 +4700,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0; return 0;
} }
if (adapter->failover_pending) {
adapter->init_done_rc = -EAGAIN;
netdev_dbg(netdev, "Failover pending, ignoring login response\n");
complete(&adapter->init_done);
/* login response buffer will be released on reset */
return 0;
}
if (adapter->failover_pending) {
adapter->init_done_rc = -EAGAIN;
netdev_dbg(netdev, "Failover pending, ignoring login response\n");
complete(&adapter->init_done);
/* login response buffer will be released on reset */
return 0;
}
netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); netdev_dbg(adapter->netdev, "Login Response Buffer:\n");

View File

@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{ {
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags); set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags);
ice_plug_aux_dev(pf); ice_plug_aux_dev(pf);
} }
} }
@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{ {
ice_unplug_aux_dev(pf); ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
} }
#endif /* _ICE_H_ */ #endif /* _ICE_H_ */

View File

@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev; struct auxiliary_device *adev;
int ret; int ret;
/* if this PF doesn't support a technology that requires auxiliary
* devices, then gracefully exit
*/
if (!ice_is_aux_ena(pf))
return 0;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev) if (!iadev)
return -ENOMEM; return -ENOMEM;

View File

@@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
if (pci_using_dac) if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features; netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
/* MTU range: 68 - 9216 */ /* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU; netdev->min_mtu = ETH_MIN_MTU;

View File

@@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param =
static int mlx5_devlink_rdma_param_register(struct devlink *devlink) static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value; union devlink_param_value value;
int err; int err;
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0; return 0;
err = devlink_param_register(devlink, &enable_rdma_param); err = devlink_param_register(devlink, &enable_rdma_param);
@@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
return; return;
devlink_param_unpublish(devlink, &enable_rdma_param); devlink_param_unpublish(devlink, &enable_rdma_param);

View File

@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) { if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
return err; goto err_cancel_work;
} }
err = mlx5_fw_tracer_create_mkey(tracer); err = mlx5_fw_tracer_create_mkey(tracer);
@@ -1031,6 +1031,7 @@ err_notifier_unregister:
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd: err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn); mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work); cancel_work_sync(&tracer->read_fw_strings_work);
return err; return err;
} }

Some files were not shown because too many files have changed in this diff Show More