Merge 5.15.58 into android14-5.15

Changes in 5.15.58
	pinctrl: stm32: fix optional IRQ support to gpios
	riscv: add as-options for modules with assembly compontents
	mlxsw: spectrum_router: Fix IPv4 nexthop gateway indication
	lockdown: Fix kexec lockdown bypass with ima policy
	drm/ttm: fix locking in vmap/vunmap TTM GEM helpers
	bus: mhi: host: pci_generic: add Telit FN980 v1 hardware revision
	bus: mhi: host: pci_generic: add Telit FN990
	Revert "selftest/vm: verify remap destination address in mremap_test"
	Revert "selftest/vm: verify mmap addr in mremap_test"
	PCI: hv: Fix multi-MSI to allow more than one MSI vector
	PCI: hv: Fix hv_arch_irq_unmask() for multi-MSI
	PCI: hv: Reuse existing IRTE allocation in compose_msi_msg()
	PCI: hv: Fix interrupt mapping for multi-MSI
	serial: mvebu-uart: correctly report configured baudrate value
	batman-adv: Use netif_rx_any_context() any.
	Revert "mt76: mt7921: Fix the error handling path of mt7921_pci_probe()"
	Revert "mt76: mt7921e: fix possible probe failure after reboot"
	mt76: mt7921: use physical addr to unify register access
	mt76: mt7921e: fix possible probe failure after reboot
	mt76: mt7921: Fix the error handling path of mt7921_pci_probe()
	xfs: fix maxlevels comparisons in the btree staging code
	xfs: fold perag loop iteration logic into helper function
	xfs: rename the next_agno perag iteration variable
	xfs: terminate perag iteration reliably on agcount
	xfs: fix perag reference leak on iteration race with growfs
	xfs: prevent a WARN_ONCE() in xfs_ioc_attr_list()
	r8152: fix a WOL issue
	ip: Fix data-races around sysctl_ip_default_ttl.
	xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup()
	power/reset: arm-versatile: Fix refcount leak in versatile_reboot_probe
	RDMA/irdma: Do not advertise 1GB page size for x722
	RDMA/irdma: Fix sleep from invalid context BUG
	pinctrl: ralink: rename MT7628(an) functions to MT76X8
	pinctrl: ralink: rename pinctrl-rt2880 to pinctrl-ralink
	pinctrl: ralink: Check for null return of devm_kcalloc
	perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
	ipv4/tcp: do not use per netns ctl sockets
	net: tun: split run_ebpf_filter() and pskb_trim() into different "if statement"
	mm/pagealloc: sysctl: change watermark_scale_factor max limit to 30%
	sysctl: move some boundary constants from sysctl.c to sysctl_vals
	tcp: Fix data-races around sysctl_tcp_ecn.
	drm/amd/display: Support for DMUB HPD interrupt handling
	drm/amd/display: Add option to defer works of hpd_rx_irq
	drm/amd/display: Fork thread to offload work of hpd_rx_irq
	drm/amdgpu/display: add quirk handling for stutter mode
	drm/amd/display: Ignore First MST Sideband Message Return Error
	scsi: megaraid: Clear READ queue map's nr_queues
	scsi: ufs: core: Drop loglevel of WriteBoost message
	nvme: check for duplicate identifiers earlier
	nvme: fix block device naming collision
	e1000e: Enable GPT clock before sending message to CSME
	Revert "e1000e: Fix possible HW unit hang after an s0ix exit"
	igc: Reinstate IGC_REMOVED logic and implement it properly
	ip: Fix data-races around sysctl_ip_no_pmtu_disc.
	ip: Fix data-races around sysctl_ip_fwd_use_pmtu.
	ip: Fix data-races around sysctl_ip_fwd_update_priority.
	ip: Fix data-races around sysctl_ip_nonlocal_bind.
	ip: Fix a data-race around sysctl_ip_autobind_reuse.
	ip: Fix a data-race around sysctl_fwmark_reflect.
	tcp/dccp: Fix a data-race around sysctl_tcp_fwmark_accept.
	tcp: sk->sk_bound_dev_if once in inet_request_bound_dev_if()
	tcp: Fix data-races around sysctl_tcp_l3mdev_accept.
	tcp: Fix data-races around sysctl_tcp_mtu_probing.
	tcp: Fix data-races around sysctl_tcp_base_mss.
	tcp: Fix data-races around sysctl_tcp_min_snd_mss.
	tcp: Fix a data-race around sysctl_tcp_mtu_probe_floor.
	tcp: Fix a data-race around sysctl_tcp_probe_threshold.
	tcp: Fix a data-race around sysctl_tcp_probe_interval.
	net: stmmac: fix pm runtime issue in stmmac_dvr_remove()
	net: stmmac: fix unbalanced ptp clock issue in suspend/resume flow
	mtd: rawnand: gpmi: validate controller clock rate
	mtd: rawnand: gpmi: Set WAIT_FOR_READY timeout based on program/erase times
	net: dsa: microchip: ksz_common: Fix refcount leak bug
	net: skb: introduce kfree_skb_reason()
	net: skb: use kfree_skb_reason() in tcp_v4_rcv()
	net: skb: use kfree_skb_reason() in __udp4_lib_rcv()
	net: socket: rename SKB_DROP_REASON_SOCKET_FILTER
	net: skb_drop_reason: add document for drop reasons
	net: netfilter: use kfree_drop_reason() for NF_DROP
	net: ipv4: use kfree_skb_reason() in ip_rcv_core()
	net: ipv4: use kfree_skb_reason() in ip_rcv_finish_core()
	i2c: mlxcpld: Fix register setting for 400KHz frequency
	i2c: cadence: Change large transfer count reset logic to be unconditional
	perf tests: Fix Convert perf time to TSC test for hybrid
	net: stmmac: fix dma queue left shift overflow issue
	net/tls: Fix race in TLS device down flow
	igmp: Fix data-races around sysctl_igmp_llm_reports.
	igmp: Fix a data-race around sysctl_igmp_max_memberships.
	igmp: Fix data-races around sysctl_igmp_max_msf.
	tcp: Fix data-races around keepalive sysctl knobs.
	tcp: Fix data-races around sysctl_tcp_syn(ack)?_retries.
	tcp: Fix data-races around sysctl_tcp_syncookies.
	tcp: Fix data-races around sysctl_tcp_migrate_req.
	tcp: Fix data-races around sysctl_tcp_reordering.
	tcp: Fix data-races around some timeout sysctl knobs.
	tcp: Fix a data-race around sysctl_tcp_notsent_lowat.
	tcp: Fix a data-race around sysctl_tcp_tw_reuse.
	tcp: Fix data-races around sysctl_max_syn_backlog.
	tcp: Fix data-races around sysctl_tcp_fastopen.
	tcp: Fix data-races around sysctl_tcp_fastopen_blackhole_timeout.
	iavf: Fix handling of dummy receive descriptors
	pinctrl: armada-37xx: Use temporary variable for struct device
	pinctrl: armada-37xx: Make use of the devm_platform_ioremap_resource()
	pinctrl: armada-37xx: Convert to use dev_err_probe()
	pinctrl: armada-37xx: use raw spinlocks for regmap to avoid invalid wait context
	i40e: Fix erroneous adapter reinitialization during recovery process
	ixgbe: Add locking to prevent panic when setting sriov_numvfs to zero
	net: stmmac: remove redunctant disable xPCS EEE call
	gpio: pca953x: only use single read/write for No AI mode
	gpio: pca953x: use the correct range when do regmap sync
	gpio: pca953x: use the correct register address when regcache sync during init
	be2net: Fix buffer overflow in be_get_module_eeprom
	net: dsa: sja1105: silent spi_device_id warnings
	net: dsa: vitesse-vsc73xx: silent spi_device_id warnings
	drm/imx/dcss: Add missing of_node_put() in fail path
	ipv4: Fix a data-race around sysctl_fib_multipath_use_neigh.
	ipv4: Fix data-races around sysctl_fib_multipath_hash_policy.
	ipv4: Fix data-races around sysctl_fib_multipath_hash_fields.
	ip: Fix data-races around sysctl_ip_prot_sock.
	udp: Fix a data-race around sysctl_udp_l3mdev_accept.
	tcp: Fix data-races around sysctl knobs related to SYN option.
	tcp: Fix a data-race around sysctl_tcp_early_retrans.
	tcp: Fix data-races around sysctl_tcp_recovery.
	tcp: Fix a data-race around sysctl_tcp_thin_linear_timeouts.
	tcp: Fix data-races around sysctl_tcp_slow_start_after_idle.
	tcp: Fix a data-race around sysctl_tcp_retrans_collapse.
	tcp: Fix a data-race around sysctl_tcp_stdurg.
	tcp: Fix a data-race around sysctl_tcp_rfc1337.
	tcp: Fix a data-race around sysctl_tcp_abort_on_overflow.
	tcp: Fix data-races around sysctl_tcp_max_reordering.
	gpio: gpio-xilinx: Fix integer overflow
	KVM: selftests: Fix target thread to be migrated in rseq_test
	spi: bcm2835: bcm2835_spi_handle_err(): fix NULL pointer deref for non DMA transfers
	KVM: Don't null dereference ops->destroy
	mm/mempolicy: fix uninit-value in mpol_rebind_policy()
	bpf: Make sure mac_header was set before using it
	sched/deadline: Fix BUG_ON condition for deboosted tasks
	x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
	dlm: fix pending remove if msg allocation fails
	x86/uaccess: Implement macros for CMPXCHG on user addresses
	x86/extable: Tidy up redundant handler functions
	x86/extable: Get rid of redundant macros
	x86/mce: Deduplicate exception handling
	x86/extable: Rework the exception table mechanics
	x86/extable: Provide EX_TYPE_DEFAULT_MCE_SAFE and EX_TYPE_FAULT_MCE_SAFE
	bitfield.h: Fix "type of reg too small for mask" test
	x86/entry_32: Remove .fixup usage
	x86/extable: Extend extable functionality
	x86/msr: Remove .fixup usage
	x86/futex: Remove .fixup usage
	KVM: x86: Use __try_cmpxchg_user() to emulate atomic accesses
	xhci: dbc: refactor xhci_dbc_init()
	xhci: dbc: create and remove dbc structure in dbgtty driver.
	xhci: dbc: Rename xhci_dbc_init and xhci_dbc_exit
	xhci: Set HCD flag to defer primary roothub registration
	mt76: fix use-after-free by removing a non-RCU wcid pointer
	iwlwifi: fw: uefi: add missing include guards
	crypto: qat - set to zero DH parameters before free
	crypto: qat - use pre-allocated buffers in datapath
	crypto: qat - refactor submission logic
	crypto: qat - add backlog mechanism
	crypto: qat - fix memory leak in RSA
	crypto: qat - remove dma_free_coherent() for RSA
	crypto: qat - remove dma_free_coherent() for DH
	crypto: qat - add param check for RSA
	crypto: qat - add param check for DH
	crypto: qat - re-enable registration of algorithms
	exfat: fix referencing wrong parent directory information after renaming
	tracing: Have event format check not flag %p* on __get_dynamic_array()
	tracing: Place trace_pid_list logic into abstract functions
	tracing: Fix return value of trace_pid_write()
	um: virtio_uml: Allow probing from devicetree
	um: virtio_uml: Fix broken device handling in time-travel
	Bluetooth: Add bt_skb_sendmsg helper
	Bluetooth: Add bt_skb_sendmmsg helper
	Bluetooth: SCO: Replace use of memcpy_from_msg with bt_skb_sendmsg
	Bluetooth: RFCOMM: Replace use of memcpy_from_msg with bt_skb_sendmmsg
	Bluetooth: Fix passing NULL to PTR_ERR
	Bluetooth: SCO: Fix sco_send_frame returning skb->len
	Bluetooth: Fix bt_skb_sendmmsg not allocating partial chunks
	exfat: use updated exfat_chain directly during renaming
	drm/amd/display: Reset DMCUB before HW init
	drm/amd/display: Optimize bandwidth on following fast update
	drm/amd/display: Fix surface optimization regression on Carrizo
	x86/amd: Use IBPB for firmware calls
	x86/alternative: Report missing return thunk details
	watchqueue: make sure to serialize 'wqueue->defunct' properly
	tty: drivers/tty/, stop using tty_schedule_flip()
	tty: the rest, stop using tty_schedule_flip()
	tty: drop tty_schedule_flip()
	tty: extract tty_flip_buffer_commit() from tty_flip_buffer_push()
	tty: use new tty_insert_flip_string_and_push_buffer() in pty_write()
	net: usb: ax88179_178a needs FLAG_SEND_ZLP
	watch-queue: remove spurious double semicolon
	drm/amd/display: Don't lock connection_mutex for DMUB HPD
	drm/amd/display: invalid parameter check in dmub_hpd_callback
	x86/extable: Prefer local labels in .set directives
	KVM: x86: fix typo in __try_cmpxchg_user causing non-atomicness
	x86: drop bogus "cc" clobber from __try_cmpxchg_user_asm()
	drm/amdgpu: Off by one in dm_dmub_outbox1_low_irq()
	x86/entry_32: Fix segment exceptions
	drm/amd/display: Fix wrong format specifier in amdgpu_dm.c
	Linux 5.15.58

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I6655a937b4226d3011278d13df84b25e5ab4b9ef
This commit is contained in:
Greg Kroah-Hartman
2022-08-02 08:37:15 +02:00
209 changed files with 3384 additions and 1903 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 57 SUBLEVEL = 58
EXTRAVERSION = EXTRAVERSION =
NAME = Trick or Treat NAME = Trick or Treat

View File

@@ -59,7 +59,7 @@ srmcons_do_receive_chars(struct tty_port *port)
} while((result.bits.status & 1) && (++loops < 10)); } while((result.bits.status & 1) && (++loops < 10));
if (count) if (count)
tty_schedule_flip(port); tty_flip_buffer_push(port);
return count; return count;
} }

View File

@@ -75,6 +75,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
endif endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing # GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses. While unaligned accesses are explicitly allowed in the # unaligned accesses. While unaligned accesses are explicitly allowed in the

View File

@@ -21,6 +21,7 @@
* Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd. * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/virtio.h> #include <linux/virtio.h>
@@ -49,6 +50,7 @@ struct virtio_uml_platform_data {
struct virtio_uml_device { struct virtio_uml_device {
struct virtio_device vdev; struct virtio_device vdev;
struct platform_device *pdev; struct platform_device *pdev;
struct virtio_uml_platform_data *pdata;
spinlock_t sock_lock; spinlock_t sock_lock;
int sock, req_fd, irq; int sock, req_fd, irq;
@@ -61,6 +63,7 @@ struct virtio_uml_device {
u8 config_changed_irq:1; u8 config_changed_irq:1;
uint64_t vq_irq_vq_map; uint64_t vq_irq_vq_map;
int recv_rc;
}; };
struct virtio_uml_vq_info { struct virtio_uml_vq_info {
@@ -146,14 +149,6 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev,
rc = vhost_user_recv_header(fd, msg); rc = vhost_user_recv_header(fd, msg);
if (rc == -ECONNRESET && vu_dev->registered) {
struct virtio_uml_platform_data *pdata;
pdata = vu_dev->pdev->dev.platform_data;
virtio_break_device(&vu_dev->vdev);
schedule_work(&pdata->conn_broken_wk);
}
if (rc) if (rc)
return rc; return rc;
size = msg->header.size; size = msg->header.size;
@@ -162,6 +157,21 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev,
return full_read(fd, &msg->payload, size, false); return full_read(fd, &msg->payload, size, false);
} }
static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
int rc)
{
struct virtio_uml_platform_data *pdata = vu_dev->pdata;
if (rc != -ECONNRESET)
return;
if (!vu_dev->registered)
return;
virtio_break_device(&vu_dev->vdev);
schedule_work(&pdata->conn_broken_wk);
}
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev, static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg, struct vhost_user_msg *msg,
size_t max_payload_size) size_t max_payload_size)
@@ -169,8 +179,10 @@ static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
max_payload_size, true); max_payload_size, true);
if (rc) if (rc) {
vhost_user_check_reset(vu_dev, rc);
return rc; return rc;
}
if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION)) if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
return -EPROTO; return -EPROTO;
@@ -367,6 +379,7 @@ static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
sizeof(msg.msg.payload) + sizeof(msg.msg.payload) +
sizeof(msg.extra_payload)); sizeof(msg.extra_payload));
vu_dev->recv_rc = rc;
if (rc) if (rc)
return IRQ_NONE; return IRQ_NONE;
@@ -410,7 +423,9 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
if (!um_irq_timetravel_handler_used()) if (!um_irq_timetravel_handler_used())
ret = vu_req_read_message(vu_dev, NULL); ret = vu_req_read_message(vu_dev, NULL);
if (vu_dev->vq_irq_vq_map) { if (vu_dev->recv_rc) {
vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
} else if (vu_dev->vq_irq_vq_map) {
struct virtqueue *vq; struct virtqueue *vq;
virtio_device_for_each_vq((&vu_dev->vdev), vq) { virtio_device_for_each_vq((&vu_dev->vdev), vq) {
@@ -1115,21 +1130,63 @@ void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
no_vq_suspend ? "dis" : "en"); no_vq_suspend ? "dis" : "en");
} }
static void vu_of_conn_broken(struct work_struct *wk)
{
/*
* We can't remove the device from the devicetree so the only thing we
* can do is warn.
*/
WARN_ON(1);
}
/* Platform device */ /* Platform device */
static struct virtio_uml_platform_data *
virtio_uml_create_pdata(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct virtio_uml_platform_data *pdata;
int ret;
if (!np)
return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
pdata->pdev = pdev;
ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
if (ret)
return ERR_PTR(ret);
ret = of_property_read_u32(np, "virtio-device-id",
&pdata->virtio_device_id);
if (ret)
return ERR_PTR(ret);
return pdata;
}
static int virtio_uml_probe(struct platform_device *pdev) static int virtio_uml_probe(struct platform_device *pdev)
{ {
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
struct virtio_uml_device *vu_dev; struct virtio_uml_device *vu_dev;
int rc; int rc;
if (!pdata) if (!pdata) {
return -EINVAL; pdata = virtio_uml_create_pdata(pdev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
if (!vu_dev) if (!vu_dev)
return -ENOMEM; return -ENOMEM;
vu_dev->pdata = pdata;
vu_dev->vdev.dev.parent = &pdev->dev; vu_dev->vdev.dev.parent = &pdev->dev;
vu_dev->vdev.dev.release = virtio_uml_release_dev; vu_dev->vdev.dev.release = virtio_uml_release_dev;
vu_dev->vdev.config = &virtio_uml_config_ops; vu_dev->vdev.config = &virtio_uml_config_ops;

View File

@@ -268,19 +268,16 @@
1: popl %ds 1: popl %ds
2: popl %es 2: popl %es
3: popl %fs 3: popl %fs
addl $(4 + \pop), %esp /* pop the unused "gs" slot */ 4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */
IRET_FRAME IRET_FRAME
.pushsection .fixup, "ax"
4: movl $0, (%esp) /*
jmp 1b * There is no _ASM_EXTABLE_TYPE_REG() for ASM, however since this is
5: movl $0, (%esp) * ASM the registers are known and we can trivially hard-code them.
jmp 2b */
6: movl $0, (%esp) _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_ZERO|EX_REG_DS)
jmp 3b _ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_ZERO|EX_REG_ES)
.popsection _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS)
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 5b)
_ASM_EXTABLE(3b, 6b)
.endm .endm
.macro RESTORE_ALL_NMI cr3_reg:req pop=0 .macro RESTORE_ALL_NMI cr3_reg:req pop=0
@@ -923,10 +920,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
sti sti
sysexit sysexit
.pushsection .fixup, "ax"
2: movl $0, PT_FS(%esp) 2: movl $0, PT_FS(%esp)
jmp 1b jmp 1b
.popsection
_ASM_EXTABLE(1b, 2b) _ASM_EXTABLE(1b, 2b)
.Lsysenter_fix_flags: .Lsysenter_fix_flags:
@@ -994,8 +989,7 @@ restore_all_switch_stack:
*/ */
iret iret
.section .fixup, "ax" .Lasm_iret_error:
SYM_CODE_START(asm_iret_error)
pushl $0 # no error code pushl $0 # no error code
pushl $iret_error pushl $iret_error
@@ -1012,9 +1006,8 @@ SYM_CODE_START(asm_iret_error)
#endif #endif
jmp handle_exception jmp handle_exception
SYM_CODE_END(asm_iret_error)
.previous _ASM_EXTABLE(.Lirq_return, .Lasm_iret_error)
_ASM_EXTABLE(.Lirq_return, asm_iret_error)
SYM_FUNC_END(entry_INT80_32) SYM_FUNC_END(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK .macro FIXUP_ESPFIX_STACK

View File

@@ -122,28 +122,19 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
# include <asm/extable_fixup_types.h>
/* Exception table entry */ /* Exception table entry */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
# define _ASM_EXTABLE_TYPE(from, to, type) \
.pushsection "__ex_table","a" ; \ .pushsection "__ex_table","a" ; \
.balign 4 ; \ .balign 4 ; \
.long (from) - . ; \ .long (from) - . ; \
.long (to) - . ; \ .long (to) - . ; \
.long (handler) - . ; \ .long type ; \
.popsection .popsection
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
# define _ASM_EXTABLE_CPY(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
# ifdef CONFIG_KPROBES # ifdef CONFIG_KPROBES
# define _ASM_NOKPROBE(entry) \ # define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \ .pushsection "_kprobe_blacklist","aw" ; \
@@ -155,26 +146,51 @@
# endif # endif
#else /* ! __ASSEMBLY__ */ #else /* ! __ASSEMBLY__ */
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \ # define DEFINE_EXTABLE_TYPE_REG \
".macro extable_type_reg type:req reg:req\n" \
".set .Lfound, 0\n" \
".set .Lregnr, 0\n" \
".irp rs,rax,rcx,rdx,rbx,rsp,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15\n" \
".ifc \\reg, %%\\rs\n" \
".set .Lfound, .Lfound+1\n" \
".long \\type + (.Lregnr << 8)\n" \
".endif\n" \
".set .Lregnr, .Lregnr+1\n" \
".endr\n" \
".set .Lregnr, 0\n" \
".irp rs,eax,ecx,edx,ebx,esp,ebp,esi,edi,r8d,r9d,r10d,r11d,r12d,r13d,r14d,r15d\n" \
".ifc \\reg, %%\\rs\n" \
".set .Lfound, .Lfound+1\n" \
".long \\type + (.Lregnr << 8)\n" \
".endif\n" \
".set .Lregnr, .Lregnr+1\n" \
".endr\n" \
".if (.Lfound != 1)\n" \
".error \"extable_type_reg: bad register argument\"\n" \
".endif\n" \
".endm\n"
# define UNDEFINE_EXTABLE_TYPE_REG \
".purgem extable_type_reg\n"
# define _ASM_EXTABLE_TYPE(from, to, type) \
" .pushsection \"__ex_table\",\"a\"\n" \ " .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \ " .balign 4\n" \
" .long (" #from ") - .\n" \ " .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \ " .long (" #to ") - .\n" \
" .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \ " .long " __stringify(type) " \n" \
" .popsection\n" " .popsection\n"
# define _ASM_EXTABLE(from, to) \ # define _ASM_EXTABLE_TYPE_REG(from, to, type, reg) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default) " .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
# define _ASM_EXTABLE_UA(from, to) \ " .long (" #from ") - .\n" \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) " .long (" #to ") - .\n" \
DEFINE_EXTABLE_TYPE_REG \
# define _ASM_EXTABLE_CPY(from, to) \ "extable_type_reg reg=" __stringify(reg) ", type=" __stringify(type) " \n"\
_ASM_EXTABLE_HANDLE(from, to, ex_handler_copy) UNDEFINE_EXTABLE_TYPE_REG \
" .popsection\n"
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
/* For C file, we already have NOKPROBE_SYMBOL macro */ /* For C file, we already have NOKPROBE_SYMBOL macro */
@@ -188,6 +204,17 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_DEFAULT)
#define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_UACCESS)
#define _ASM_EXTABLE_CPY(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_COPY)
#define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT)
#endif /* __KERNEL__ */
#endif /* _ASM_X86_ASM_H */ #endif /* _ASM_X86_ASM_H */

View File

@@ -300,6 +300,7 @@
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */

View File

@@ -1,12 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_EXTABLE_H #ifndef _ASM_X86_EXTABLE_H
#define _ASM_X86_EXTABLE_H #define _ASM_X86_EXTABLE_H
#include <asm/extable_fixup_types.h>
/* /*
* The exception table consists of triples of addresses relative to the * The exception table consists of two addresses relative to the
* exception table entry itself. The first address is of an instruction * exception table entry itself and a type selector field.
* that is allowed to fault, the second is the target at which the program *
* should continue. The third is a handler function to deal with the fault * The first address is of an instruction that is allowed to fault, the
* caused by the instruction in the first field. * second is the target at which the program should continue.
*
* The type entry is used by fixup_exception() to select the handler to
* deal with the fault caused by the instruction in the first field.
* *
* All the routines below use bits of fixup code that are out of line * All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well, * with the main instruction path. This means when everything is well,
@@ -15,7 +21,7 @@
*/ */
struct exception_table_entry { struct exception_table_entry {
int insn, fixup, handler; int insn, fixup, data;
}; };
struct pt_regs; struct pt_regs;
@@ -25,21 +31,27 @@ struct pt_regs;
do { \ do { \
(a)->fixup = (b)->fixup + (delta); \ (a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \ (b)->fixup = (tmp).fixup - (delta); \
(a)->handler = (b)->handler + (delta); \ (a)->data = (b)->data; \
(b)->handler = (tmp).handler - (delta); \ (b)->data = (tmp).data; \
} while (0) } while (0)
enum handler_type {
EX_HANDLER_NONE,
EX_HANDLER_FAULT,
EX_HANDLER_UACCESS,
EX_HANDLER_OTHER
};
extern int fixup_exception(struct pt_regs *regs, int trapnr, extern int fixup_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr); unsigned long error_code, unsigned long fault_addr);
extern int fixup_bug(struct pt_regs *regs, int trapnr); extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern enum handler_type ex_get_fault_handler_type(unsigned long ip); extern int ex_get_fixup_type(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr); extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
#ifdef CONFIG_X86_MCE
extern void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr);
#else
static inline void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) { }
#endif
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_X86_64)
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs);
#else
static inline bool ex_handler_bpf(const struct exception_table_entry *x,
struct pt_regs *regs) { return false; }
#endif
#endif #endif

View File

@@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_EXTABLE_FIXUP_TYPES_H
#define _ASM_X86_EXTABLE_FIXUP_TYPES_H
/*
* Our IMM is signed, as such it must live at the top end of the word. Also,
* since C99 hex constants are of ambigious type, force cast the mask to 'int'
* so that FIELD_GET() will DTRT and sign extend the value when it extracts it.
*/
#define EX_DATA_TYPE_MASK ((int)0x000000FF)
#define EX_DATA_REG_MASK ((int)0x00000F00)
#define EX_DATA_FLAG_MASK ((int)0x0000F000)
#define EX_DATA_IMM_MASK ((int)0xFFFF0000)
#define EX_DATA_REG_SHIFT 8
#define EX_DATA_FLAG_SHIFT 12
#define EX_DATA_IMM_SHIFT 16
#define EX_DATA_REG(reg) ((reg) << EX_DATA_REG_SHIFT)
#define EX_DATA_FLAG(flag) ((flag) << EX_DATA_FLAG_SHIFT)
#define EX_DATA_IMM(imm) ((imm) << EX_DATA_IMM_SHIFT)
/* segment regs */
#define EX_REG_DS EX_DATA_REG(8)
#define EX_REG_ES EX_DATA_REG(9)
#define EX_REG_FS EX_DATA_REG(10)
#define EX_REG_GS EX_DATA_REG(11)
/* flags */
#define EX_FLAG_CLEAR_AX EX_DATA_FLAG(1)
#define EX_FLAG_CLEAR_DX EX_DATA_FLAG(2)
#define EX_FLAG_CLEAR_AX_DX EX_DATA_FLAG(3)
/* types */
#define EX_TYPE_NONE 0
#define EX_TYPE_DEFAULT 1
#define EX_TYPE_FAULT 2
#define EX_TYPE_UACCESS 3
#define EX_TYPE_COPY 4
#define EX_TYPE_CLEAR_FS 5
#define EX_TYPE_FPU_RESTORE 6
#define EX_TYPE_BPF 7
#define EX_TYPE_WRMSR 8
#define EX_TYPE_RDMSR 9
#define EX_TYPE_WRMSR_SAFE 10 /* reg := -EIO */
#define EX_TYPE_RDMSR_SAFE 11 /* reg := -EIO */
#define EX_TYPE_WRMSR_IN_MCE 12
#define EX_TYPE_RDMSR_IN_MCE 13
#define EX_TYPE_DEFAULT_MCE_SAFE 14
#define EX_TYPE_FAULT_MCE_SAFE 15
#define EX_TYPE_POP_REG 16 /* sp += sizeof(long) */
#define EX_TYPE_POP_ZERO (EX_TYPE_POP_REG | EX_DATA_IMM(0))
#define EX_TYPE_IMM_REG 17 /* reg := (long)imm */
#define EX_TYPE_EFAULT_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(-EFAULT))
#endif

View File

@@ -126,7 +126,7 @@ extern void save_fpregs_to_fpstate(struct fpu *fpu);
#define kernel_insn(insn, output, input...) \ #define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \ asm volatile("1:" #insn "\n\t" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FPU_RESTORE) \
: output : input) : output : input)
static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx) static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx)
@@ -253,7 +253,7 @@ static inline void fxsave(struct fxregs_state *fx)
XRSTORS, X86_FEATURE_XSAVES) \ XRSTORS, X86_FEATURE_XSAVES) \
"\n" \ "\n" \
"3:\n" \ "3:\n" \
_ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE) \
: \ : \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory") : "memory")

View File

@@ -17,13 +17,9 @@ do { \
int oldval = 0, ret; \ int oldval = 0, ret; \
asm volatile("1:\t" insn "\n" \ asm volatile("1:\t" insn "\n" \
"2:\n" \ "2:\n" \
"\t.section .fixup,\"ax\"\n" \ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %1) \
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
_ASM_EXTABLE_UA(1b, 3b) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0)); \ : "0" (oparg), "1" (0)); \
if (ret) \ if (ret) \
goto label; \ goto label; \
*oval = oldval; \ *oval = oldval; \
@@ -39,15 +35,11 @@ do { \
"3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t2b\n" \ "\tjnz\t2b\n" \
"4:\n" \ "4:\n" \
"\t.section .fixup,\"ax\"\n" \ _ASM_EXTABLE_TYPE_REG(1b, 4b, EX_TYPE_EFAULT_REG, %1) \
"5:\tmov\t%5, %1\n" \ _ASM_EXTABLE_TYPE_REG(3b, 4b, EX_TYPE_EFAULT_REG, %1) \
"\tjmp\t4b\n" \
"\t.previous\n" \
_ASM_EXTABLE_UA(1b, 5b) \
_ASM_EXTABLE_UA(3b, 5b) \
: "=&a" (oldval), "=&r" (ret), \ : "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \ "+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0)); \ : "r" (oparg), "1" (0)); \
if (ret) \ if (ret) \
goto label; \ goto label; \
*oval = oldval; \ *oval = oldval; \
@@ -95,15 +87,11 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!user_access_begin(uaddr, sizeof(u32))) if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
asm volatile("\n" asm volatile("\n"
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
"2:\n" "2:\n"
"\t.section .fixup, \"ax\"\n" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
"3:\tmov %3, %0\n"
"\tjmp 2b\n"
"\t.previous\n"
_ASM_EXTABLE_UA(1b, 3b)
: "+r" (ret), "=a" (oldval), "+m" (*uaddr) : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "1" (oldval) : "r" (newval), "1" (oldval)
: "memory" : "memory"
); );
user_access_end(); user_access_end();

View File

@@ -15,6 +15,8 @@
#define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf) #define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf)
#define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4)) #define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4))
int pt_regs_offset(struct pt_regs *regs, int regno);
bool insn_has_rep_prefix(struct insn *insn); bool insn_has_rep_prefix(struct insn *insn);
void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);

View File

@@ -176,13 +176,6 @@ bool hv_vcpu_is_preempted(int vcpu);
static inline void hv_apic_init(void) {} static inline void hv_apic_init(void) {}
#endif #endif
static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
struct msi_desc *msi_desc)
{
msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
msi_entry->data.as_uint32 = msi_desc->msg.data;
}
struct irq_domain *hv_create_pci_msi_domain(void); struct irq_domain *hv_create_pci_msi_domain(void);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,

View File

@@ -92,7 +92,7 @@ static __always_inline unsigned long long __rdmsr(unsigned int msr)
asm volatile("1: rdmsr\n" asm volatile("1: rdmsr\n"
"2:\n" "2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe) _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
: EAX_EDX_RET(val, low, high) : "c" (msr)); : EAX_EDX_RET(val, low, high) : "c" (msr));
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
@@ -102,7 +102,7 @@ static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
{ {
asm volatile("1: wrmsr\n" asm volatile("1: wrmsr\n"
"2:\n" "2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
: : "c" (msr), "a"(low), "d" (high) : "memory"); : : "c" (msr), "a"(low), "d" (high) : "memory");
} }
@@ -137,17 +137,11 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
{ {
DECLARE_ARGS(val, low, high); DECLARE_ARGS(val, low, high);
asm volatile("2: rdmsr ; xor %[err],%[err]\n" asm volatile("1: rdmsr ; xor %[err],%[err]\n"
"1:\n\t" "2:\n\t"
".section .fixup,\"ax\"\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
"3: mov %[fault],%[err]\n\t"
"xorl %%eax, %%eax\n\t"
"xorl %%edx, %%edx\n\t"
"jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: [err] "=r" (*err), EAX_EDX_RET(val, low, high) : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EIO)); : "c" (msr));
if (tracepoint_enabled(read_msr)) if (tracepoint_enabled(read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
@@ -169,15 +163,11 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high)
{ {
int err; int err;
asm volatile("2: wrmsr ; xor %[err],%[err]\n" asm volatile("1: wrmsr ; xor %[err],%[err]\n"
"1:\n\t" "2:\n\t"
".section .fixup,\"ax\"\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
"3: mov %[fault],%[err] ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: [err] "=a" (err) : [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high), : "c" (msr), "0" (low), "d" (high)
[fault] "i" (-EIO)
: "memory"); : "memory");
if (tracepoint_enabled(write_msr)) if (tracepoint_enabled(write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err); do_trace_write_msr(msr, ((u64)high << 32 | low), err);

View File

@@ -298,6 +298,8 @@ do { \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \ alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \ spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \ X86_FEATURE_USE_IBRS_FW); \
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
X86_FEATURE_USE_IBPB_FW); \
} while (0) } while (0)
#define firmware_restrict_branch_speculation_end() \ #define firmware_restrict_branch_speculation_end() \

View File

@@ -339,7 +339,7 @@ static inline void __loadsegment_fs(unsigned short value)
"1: movw %0, %%fs \n" "1: movw %0, %%fs \n"
"2: \n" "2: \n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs) _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_CLEAR_FS)
: : "rm" (value) : "memory"); : : "rm" (value) : "memory");
} }

View File

@@ -414,6 +414,103 @@ do { \
#endif // CONFIG_CC_ASM_GOTO_OUTPUT #endif // CONFIG_CC_ASM_GOTO_OUTPUT
#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_volatile_goto("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
[ptr] "+m" (*_ptr), \
[old] "+a" (__old) \
: [new] ltype (__new) \
: "memory" \
: label); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })
#ifdef CONFIG_X86_32
#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_volatile_goto("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
"+A" (__old), \
[ptr] "+m" (*_ptr) \
: "b" ((u32)__new), \
"c" ((u32)((u64)__new >> 32)) \
: "memory" \
: label); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })
#endif // CONFIG_X86_32
#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
int __err = 0; \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm volatile("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
CC_SET(z) \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
%[errout]) \
: CC_OUT(z) (success), \
[errout] "+r" (__err), \
[ptr] "+m" (*_ptr), \
[old] "+a" (__old) \
: [new] ltype (__new) \
: "memory"); \
if (unlikely(__err)) \
goto label; \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })
#ifdef CONFIG_X86_32
/*
* Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
* There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
* hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
* both ESI and EDI for the memory operand, compilation will fail if the error
* is an input+output as there will be no register available for input.
*/
#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
int __result; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm volatile("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
"mov $0, %%ecx\n\t" \
"setz %%cl\n" \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
: [result]"=c" (__result), \
"+A" (__old), \
[ptr] "+m" (*_ptr) \
: "b" ((u32)__new), \
"c" ((u32)((u64)__new >> 32)) \
: "memory", "cc"); \
if (unlikely(__result < 0)) \
goto label; \
if (unlikely(!__result)) \
*_old = __old; \
likely(__result); })
#endif // CONFIG_X86_32
#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
/* FIXME: this hack is definitely wrong -AK */ /* FIXME: this hack is definitely wrong -AK */
struct __large_struct { unsigned long buf[100]; }; struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x)) #define __m(x) (*(struct __large_struct __user *)(x))
@@ -506,6 +603,51 @@ do { \
} while (0) } while (0)
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
extern void __try_cmpxchg_user_wrong_size(void);
#ifndef CONFIG_X86_32
#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
__try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
#endif
/*
* Force the pointer to u<size> to match the size expected by the asm helper.
* clang/LLVM compiles all cases and only discards the unused paths after
* processing errors, which breaks i386 if the pointer is an 8-byte value.
*/
#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
bool __ret; \
__chk_user_ptr(_ptr); \
switch (sizeof(*(_ptr))) { \
case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
(__force u8 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
(__force u16 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
(__force u32 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
(_nval), _label); \
break; \
default: __try_cmpxchg_user_wrong_size(); \
} \
__ret; })
/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
int __ret = -EFAULT; \
__uaccess_begin_nospec(); \
__ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
_label: \
__uaccess_end(); \
__ret; \
})
/* /*
* We want the unsafe accessors to always be inlined and use * We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games. * the error labels - thus the macro games.

View File

@@ -554,7 +554,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
dest = addr + insn.length + insn.immediate.value; dest = addr + insn.length + insn.immediate.value;
if (__static_call_fixup(addr, op, dest) || if (__static_call_fixup(addr, op, dest) ||
WARN_ON_ONCE(dest != &__x86_return_thunk)) WARN_ONCE(dest != &__x86_return_thunk,
"missing return thunk: %pS-%pS: %*ph",
addr, dest, 5, addr))
continue; continue;
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS", DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",

View File

@@ -968,6 +968,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL #ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state) void unpriv_ebpf_notify(int new_state)
@@ -1408,6 +1409,8 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_IBRS: case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
break; break;
case SPECTRE_V2_LFENCE: case SPECTRE_V2_LFENCE:
@@ -1509,7 +1512,16 @@ static void __init spectre_v2_select_mitigation(void)
* the CPU supports Enhanced IBRS, kernel might un-intentionally not * the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls. * enable IBRS around firmware calls.
*/ */
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
if (retbleed_cmd != RETBLEED_CMD_IBPB) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
pr_info("Enabling Speculation Barrier for firmware calls\n");
}
} else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n"); pr_info("Enabling Restricted Speculation for firmware calls\n");
} }

View File

@@ -382,13 +382,16 @@ static int msr_to_offset(u32 msr)
return -1; return -1;
} }
__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr)
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{ {
if (wrmsr) {
pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
regs->ip, (void *)regs->ip);
} else {
pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip); (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
}
show_stack_regs(regs); show_stack_regs(regs);
@@ -396,8 +399,6 @@ __visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
while (true) while (true)
cpu_relax(); cpu_relax();
return true;
} }
/* MSR access wrappers used for error injection */ /* MSR access wrappers used for error injection */
@@ -429,32 +430,13 @@ static noinstr u64 mce_rdmsrl(u32 msr)
*/ */
asm volatile("1: rdmsr\n" asm volatile("1: rdmsr\n"
"2:\n" "2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault) _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE)
: EAX_EDX_RET(val, low, high) : "c" (msr)); : EAX_EDX_RET(val, low, high) : "c" (msr));
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
} }
__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
regs->ip, (void *)regs->ip);
show_stack_regs(regs);
panic("MCA architectural violation!\n");
while (true)
cpu_relax();
return true;
}
static noinstr void mce_wrmsrl(u32 msr, u64 v) static noinstr void mce_wrmsrl(u32 msr, u64 v)
{ {
u32 low, high; u32 low, high;
@@ -479,7 +461,7 @@ static noinstr void mce_wrmsrl(u32 msr, u64 v)
/* See comment in mce_rdmsrl() */ /* See comment in mce_rdmsrl() */
asm volatile("1: wrmsr\n" asm volatile("1: wrmsr\n"
"2:\n" "2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault) _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE)
: : "c" (msr), "a"(low), "d" (high) : "memory"); : : "c" (msr), "a"(low), "d" (high) : "memory");
} }

View File

@@ -186,14 +186,4 @@ extern bool amd_filter_mce(struct mce *m);
static inline bool amd_filter_mce(struct mce *m) { return false; }; static inline bool amd_filter_mce(struct mce *m) { return false; };
#endif #endif
__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr);
__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr);
#endif /* __X86_MCE_INTERNAL_H__ */ #endif /* __X86_MCE_INTERNAL_H__ */

View File

@@ -265,25 +265,26 @@ static bool is_copy_from_user(struct pt_regs *regs)
*/ */
static int error_context(struct mce *m, struct pt_regs *regs) static int error_context(struct mce *m, struct pt_regs *regs)
{ {
enum handler_type t;
if ((m->cs & 3) == 3) if ((m->cs & 3) == 3)
return IN_USER; return IN_USER;
if (!mc_recoverable(m->mcgstatus)) if (!mc_recoverable(m->mcgstatus))
return IN_KERNEL; return IN_KERNEL;
t = ex_get_fault_handler_type(m->ip); switch (ex_get_fixup_type(m->ip)) {
if (t == EX_HANDLER_FAULT) { case EX_TYPE_UACCESS:
m->kflags |= MCE_IN_KERNEL_RECOV; case EX_TYPE_COPY:
return IN_KERNEL_RECOV; if (!regs || !is_copy_from_user(regs))
}
if (t == EX_HANDLER_UACCESS && regs && is_copy_from_user(regs)) {
m->kflags |= MCE_IN_KERNEL_RECOV;
m->kflags |= MCE_IN_KERNEL_COPYIN;
return IN_KERNEL_RECOV;
}
return IN_KERNEL; return IN_KERNEL;
m->kflags |= MCE_IN_KERNEL_COPYIN;
fallthrough;
case EX_TYPE_FAULT:
case EX_TYPE_FAULT_MCE_SAFE:
case EX_TYPE_DEFAULT_MCE_SAFE:
m->kflags |= MCE_IN_KERNEL_RECOV;
return IN_KERNEL_RECOV;
default:
return IN_KERNEL;
}
} }
static int mce_severity_amd_smca(struct mce *m, enum context err_ctx) static int mce_severity_amd_smca(struct mce *m, enum context err_ctx)

View File

@@ -6894,15 +6894,8 @@ static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
exception, &write_emultor); exception, &write_emultor);
} }
#define CMPXCHG_TYPE(t, ptr, old, new) \ #define emulator_try_cmpxchg_user(t, ptr, old, new) \
(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
#ifdef CONFIG_X86_64
# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
#else
# define CMPXCHG64(ptr, old, new) \
(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
#endif
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr, unsigned long addr,
@@ -6911,12 +6904,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int bytes, unsigned int bytes,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct kvm_host_map map;
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u64 page_line_mask; u64 page_line_mask;
unsigned long hva;
gpa_t gpa; gpa_t gpa;
char *kaddr; int r;
bool exchanged;
/* guests cmpxchg8b have to be emulated atomically */ /* guests cmpxchg8b have to be emulated atomically */
if (bytes > 8 || (bytes & (bytes - 1))) if (bytes > 8 || (bytes & (bytes - 1)))
@@ -6940,31 +6932,32 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
goto emul_write; goto emul_write;
if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
if (kvm_is_error_hva(hva))
goto emul_write; goto emul_write;
kaddr = map.hva + offset_in_page(gpa); hva += offset_in_page(gpa);
switch (bytes) { switch (bytes) {
case 1: case 1:
exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); r = emulator_try_cmpxchg_user(u8, hva, old, new);
break; break;
case 2: case 2:
exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); r = emulator_try_cmpxchg_user(u16, hva, old, new);
break; break;
case 4: case 4:
exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); r = emulator_try_cmpxchg_user(u32, hva, old, new);
break; break;
case 8: case 8:
exchanged = CMPXCHG64(kaddr, old, new); r = emulator_try_cmpxchg_user(u64, hva, old, new);
break; break;
default: default:
BUG(); BUG();
} }
kvm_vcpu_unmap(vcpu, &map, true); if (r < 0)
goto emul_write;
if (!exchanged) if (r)
return X86EMUL_CMPXCHG_FAILED; return X86EMUL_CMPXCHG_FAILED;
kvm_page_track_write(vcpu, gpa, new, bytes); kvm_page_track_write(vcpu, gpa, new, bytes);

View File

@@ -412,12 +412,7 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
} }
static int get_reg_offset(struct insn *insn, struct pt_regs *regs, static const int pt_regoff[] = {
enum reg_type type)
{
int regno = 0;
static const int regoff[] = {
offsetof(struct pt_regs, ax), offsetof(struct pt_regs, ax),
offsetof(struct pt_regs, cx), offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx), offsetof(struct pt_regs, dx),
@@ -435,9 +430,26 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r13),
offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r14),
offsetof(struct pt_regs, r15), offsetof(struct pt_regs, r15),
#else
offsetof(struct pt_regs, ds),
offsetof(struct pt_regs, es),
offsetof(struct pt_regs, fs),
offsetof(struct pt_regs, gs),
#endif #endif
}; };
int nr_registers = ARRAY_SIZE(regoff);
int pt_regs_offset(struct pt_regs *regs, int regno)
{
if ((unsigned)regno < ARRAY_SIZE(pt_regoff))
return pt_regoff[regno];
return -EDOM;
}
static int get_regno(struct insn *insn, enum reg_type type)
{
int nr_registers = ARRAY_SIZE(pt_regoff);
int regno = 0;
/* /*
* Don't possibly decode a 32-bit instructions as * Don't possibly decode a 32-bit instructions as
* reading a 64-bit-only register. * reading a 64-bit-only register.
@@ -505,7 +517,18 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
WARN_ONCE(1, "decoded an instruction with an invalid register"); WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL; return -EINVAL;
} }
return regoff[regno]; return regno;
}
static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
enum reg_type type)
{
int regno = get_regno(insn, type);
if (regno < 0)
return regno;
return pt_regs_offset(regs, regno);
} }
/** /**

View File

@@ -2,48 +2,50 @@
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
#include <linux/bitfield.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
#include <asm/sev.h> #include <asm/sev.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/insn-eval.h>
typedef bool (*ex_handler_t)(const struct exception_table_entry *, static inline unsigned long *pt_regs_nr(struct pt_regs *regs, int nr)
struct pt_regs *, int, unsigned long, {
unsigned long); int reg_offset = pt_regs_offset(regs, nr);
static unsigned long __dummy;
if (WARN_ON_ONCE(reg_offset < 0))
return &__dummy;
return (unsigned long *)((unsigned long)regs + reg_offset);
}
static inline unsigned long static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x) ex_fixup_addr(const struct exception_table_entry *x)
{ {
return (unsigned long)&x->fixup + x->fixup; return (unsigned long)&x->fixup + x->fixup;
} }
static inline ex_handler_t
ex_fixup_handler(const struct exception_table_entry *x)
{
return (ex_handler_t)((unsigned long)&x->handler + x->handler);
}
__visible bool ex_handler_default(const struct exception_table_entry *fixup, static bool ex_handler_default(const struct exception_table_entry *e,
struct pt_regs *regs, int trapnr, struct pt_regs *regs)
unsigned long error_code,
unsigned long fault_addr)
{ {
regs->ip = ex_fixup_addr(fixup); if (e->data & EX_FLAG_CLEAR_AX)
regs->ax = 0;
if (e->data & EX_FLAG_CLEAR_DX)
regs->dx = 0;
regs->ip = ex_fixup_addr(e);
return true; return true;
} }
EXPORT_SYMBOL(ex_handler_default);
__visible bool ex_handler_fault(const struct exception_table_entry *fixup, static bool ex_handler_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr, struct pt_regs *regs, int trapnr)
unsigned long error_code,
unsigned long fault_addr)
{ {
regs->ip = ex_fixup_addr(fixup);
regs->ax = trapnr; regs->ax = trapnr;
return true; return ex_handler_default(fixup, regs);
} }
EXPORT_SYMBOL_GPL(ex_handler_fault);
/* /*
* Handler for when we fail to restore a task's FPU state. We should never get * Handler for when we fail to restore a task's FPU state. We should never get
@@ -55,10 +57,8 @@ EXPORT_SYMBOL_GPL(ex_handler_fault);
* of vulnerability by restoring from the initial state (essentially, zeroing * of vulnerability by restoring from the initial state (essentially, zeroing
* out all the FPU registers) if we can't restore from the task's FPU state. * out all the FPU registers) if we can't restore from the task's FPU state.
*/ */
__visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr, struct pt_regs *regs)
unsigned long error_code,
unsigned long fault_addr)
{ {
regs->ip = ex_fixup_addr(fixup); regs->ip = ex_fixup_addr(fixup);
@@ -68,98 +68,75 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
__restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate()); __restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate());
return true; return true;
} }
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, static bool ex_handler_uaccess(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr, struct pt_regs *regs, int trapnr)
unsigned long error_code,
unsigned long fault_addr)
{ {
WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
regs->ip = ex_fixup_addr(fixup); return ex_handler_default(fixup, regs);
return true;
} }
EXPORT_SYMBOL(ex_handler_uaccess);
__visible bool ex_handler_copy(const struct exception_table_entry *fixup, static bool ex_handler_copy(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr, struct pt_regs *regs, int trapnr)
unsigned long error_code,
unsigned long fault_addr)
{ {
WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
regs->ip = ex_fixup_addr(fixup); return ex_handler_fault(fixup, regs, trapnr);
regs->ax = trapnr;
return true;
} }
EXPORT_SYMBOL(ex_handler_copy);
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup, static bool ex_handler_msr(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr, struct pt_regs *regs, bool wrmsr, bool safe, int reg)
unsigned long error_code,
unsigned long fault_addr)
{ {
if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", if (!safe && wrmsr &&
(unsigned int)regs->cx, regs->ip, (void *)regs->ip)) pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
show_stack_regs(regs);
/* Pretend that the read succeeded and returned 0. */
regs->ip = ex_fixup_addr(fixup);
regs->ax = 0;
regs->dx = 0;
return true;
}
EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
__visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->cx, (unsigned int)regs->dx,
(unsigned int)regs->ax, regs->ip, (void *)regs->ip)) (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
show_stack_regs(regs); show_stack_regs(regs);
/* Pretend that the write succeeded. */ if (!safe && !wrmsr &&
regs->ip = ex_fixup_addr(fixup); pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
return true; (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
} show_stack_regs(regs);
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
__visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup, if (!wrmsr) {
struct pt_regs *regs, int trapnr, /* Pretend that the read succeeded and returned 0. */
unsigned long error_code, regs->ax = 0;
unsigned long fault_addr) regs->dx = 0;
}
if (safe)
*pt_regs_nr(regs, reg) = -EIO;
return ex_handler_default(fixup, regs);
}
static bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
struct pt_regs *regs)
{ {
if (static_cpu_has(X86_BUG_NULL_SEG)) if (static_cpu_has(X86_BUG_NULL_SEG))
asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS)); asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
asm volatile ("mov %0, %%fs" : : "rm" (0)); asm volatile ("mov %0, %%fs" : : "rm" (0));
return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr); return ex_handler_default(fixup, regs);
} }
EXPORT_SYMBOL(ex_handler_clear_fs);
enum handler_type ex_get_fault_handler_type(unsigned long ip) static bool ex_handler_imm_reg(const struct exception_table_entry *fixup,
struct pt_regs *regs, int reg, int imm)
{ {
const struct exception_table_entry *e; *pt_regs_nr(regs, reg) = (long)imm;
ex_handler_t handler; return ex_handler_default(fixup, regs);
}
e = search_exception_tables(ip); int ex_get_fixup_type(unsigned long ip)
if (!e) {
return EX_HANDLER_NONE; const struct exception_table_entry *e = search_exception_tables(ip);
handler = ex_fixup_handler(e);
if (handler == ex_handler_fault) return e ? FIELD_GET(EX_DATA_TYPE_MASK, e->data) : EX_TYPE_NONE;
return EX_HANDLER_FAULT;
else if (handler == ex_handler_uaccess || handler == ex_handler_copy)
return EX_HANDLER_UACCESS;
else
return EX_HANDLER_OTHER;
} }
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
unsigned long fault_addr) unsigned long fault_addr)
{ {
const struct exception_table_entry *e; const struct exception_table_entry *e;
ex_handler_t handler; int type, reg, imm;
#ifdef CONFIG_PNPBIOS #ifdef CONFIG_PNPBIOS
if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
@@ -179,8 +156,48 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
if (!e) if (!e)
return 0; return 0;
handler = ex_fixup_handler(e); type = FIELD_GET(EX_DATA_TYPE_MASK, e->data);
return handler(e, regs, trapnr, error_code, fault_addr); reg = FIELD_GET(EX_DATA_REG_MASK, e->data);
imm = FIELD_GET(EX_DATA_IMM_MASK, e->data);
switch (type) {
case EX_TYPE_DEFAULT:
case EX_TYPE_DEFAULT_MCE_SAFE:
return ex_handler_default(e, regs);
case EX_TYPE_FAULT:
case EX_TYPE_FAULT_MCE_SAFE:
return ex_handler_fault(e, regs, trapnr);
case EX_TYPE_UACCESS:
return ex_handler_uaccess(e, regs, trapnr);
case EX_TYPE_COPY:
return ex_handler_copy(e, regs, trapnr);
case EX_TYPE_CLEAR_FS:
return ex_handler_clear_fs(e, regs);
case EX_TYPE_FPU_RESTORE:
return ex_handler_fprestore(e, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(e, regs);
case EX_TYPE_WRMSR:
return ex_handler_msr(e, regs, true, false, reg);
case EX_TYPE_RDMSR:
return ex_handler_msr(e, regs, false, false, reg);
case EX_TYPE_WRMSR_SAFE:
return ex_handler_msr(e, regs, true, true, reg);
case EX_TYPE_RDMSR_SAFE:
return ex_handler_msr(e, regs, false, true, reg);
case EX_TYPE_WRMSR_IN_MCE:
ex_handler_msr_mce(regs, true);
break;
case EX_TYPE_RDMSR_IN_MCE:
ex_handler_msr_mce(regs, false);
break;
case EX_TYPE_POP_REG:
regs->sp += sizeof(long);
fallthrough;
case EX_TYPE_IMM_REG:
return ex_handler_imm_reg(e, regs, reg, imm);
}
BUG();
} }
extern unsigned int early_recursion_flag; extern unsigned int early_recursion_flag;

View File

@@ -832,9 +832,7 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
return 0; return 0;
} }
static bool ex_handler_bpf(const struct exception_table_entry *x, bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr)
{ {
u32 reg = x->fixup >> 8; u32 reg = x->fixup >> 8;
@@ -1344,12 +1342,7 @@ st: if (is_imm8(insn->off))
} }
ex->insn = delta; ex->insn = delta;
delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler; ex->data = EX_TYPE_BPF;
if (!is_simm32(delta)) {
pr_err("extable->handler doesn't fit into 32-bit\n");
return -EFAULT;
}
ex->handler = delta;
if (dst_reg > BPF_REG_9) { if (dst_reg > BPF_REG_9) {
pr_err("verifier error\n"); pr_err("verifier error\n");

View File

@@ -88,7 +88,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
} }
if (!ldisc_data->buf_free) if (!ldisc_data->buf_free)
/* ttyio_in will tty_schedule_flip */ /* ttyio_in will tty_flip_buffer_push */
return 0; return 0;
/* Make sure the consumer has read buf before we have seen /* Make sure the consumer has read buf before we have seen
@@ -312,7 +312,7 @@ static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
mb(); mb();
ldisc_data->buf_free = true; ldisc_data->buf_free = true;
/* Let TTY push more characters */ /* Let TTY push more characters */
tty_schedule_flip(tty->port); tty_flip_buffer_push(tty->port);
return rv; return rv;
} }

View File

@@ -406,11 +406,90 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {
.mru_default = 32768, .mru_default = 32768,
}; };
static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
};
static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
};
static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
.ch_cfg = mhi_telit_fn980_hw_v1_channels,
.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
.event_cfg = mhi_telit_fn980_hw_v1_events,
};
static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
.name = "telit-fn980-hwv1",
.fw = "qcom/sdx55m/sbl1.mbn",
.edl = "qcom/sdx55m/edl.mbn",
.config = &modem_telit_fn980_hw_v1_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.mru_default = 32768,
.sideband_wake = false,
};
static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
static struct mhi_event_config mhi_telit_fn990_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_DATA(1, 128),
MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
};
static const struct mhi_controller_config modem_telit_fn990_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
.ch_cfg = mhi_telit_fn990_channels,
.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
.event_cfg = mhi_telit_fn990_events,
};
static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
.name = "telit-fn990",
.config = &modem_telit_fn990_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.sideband_wake = false,
.mru_default = 32768,
};
static const struct pci_device_id mhi_pci_id_table[] = { static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FN980 hardware revision v1 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
/* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
{ PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */

View File

@@ -52,13 +52,6 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret) if (ret)
goto err; goto err;
/* Temporarily set the number of crypto instances to zero to avoid
* registering the crypto algorithms.
* This will be removed when the algorithms will support the
* CRYPTO_TFM_REQ_MAY_BACKLOG flag
*/
instances = 0;
for (i = 0; i < instances; i++) { for (i = 0; i < instances; i++) {
val = i; val = i;
bank = i * 2; bank = i * 2;

View File

@@ -15,6 +15,7 @@ intel_qat-objs := adf_cfg.o \
qat_crypto.o \ qat_crypto.o \
qat_algs.o \ qat_algs.o \
qat_asym_algs.o \ qat_asym_algs.o \
qat_algs_send.o \
qat_uclo.o \ qat_uclo.o \
qat_hal.o qat_hal.o

View File

@@ -8,6 +8,9 @@
#include "adf_cfg.h" #include "adf_cfg.h"
#include "adf_common_drv.h" #include "adf_common_drv.h"
#define ADF_MAX_RING_THRESHOLD 80
#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
static inline u32 adf_modulo(u32 data, u32 shift) static inline u32 adf_modulo(u32 data, u32 shift)
{ {
u32 div = data >> shift; u32 div = data >> shift;
@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
bank->irq_mask); bank->irq_mask);
} }
bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
{
return atomic_read(ring->inflights) > ring->threshold;
}
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{ {
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_bank_data *bank; struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring; struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
int max_inflights;
u32 ring_num; u32 ring_num;
int ret; int ret;
@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
ring->head = 0; ring->head = 0;
ring->tail = 0; ring->tail = 0;
max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
atomic_set(ring->inflights, 0); atomic_set(ring->inflights, 0);
ret = adf_init_ring(ring); ret = adf_init_ring(ring);
if (ret) if (ret)

View File

@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
const char *ring_name, adf_callback_fn callback, const char *ring_name, adf_callback_fn callback,
int poll_mode, struct adf_etr_ring_data **ring_ptr); int poll_mode, struct adf_etr_ring_data **ring_ptr);
bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring); void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif #endif

View File

@@ -22,6 +22,7 @@ struct adf_etr_ring_data {
spinlock_t lock; /* protects ring data struct */ spinlock_t lock; /* protects ring data struct */
u16 head; u16 head;
u16 tail; u16 tail;
u32 threshold;
u8 ring_number; u8 ring_number;
u8 ring_size; u8 ring_size;
u8 msg_size; u8 msg_size;

View File

@@ -17,7 +17,7 @@
#include <crypto/xts.h> #include <crypto/xts.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include "adf_accel_devices.h" #include "adf_accel_devices.h"
#include "adf_transport.h" #include "qat_algs_send.h"
#include "adf_common_drv.h" #include "adf_common_drv.h"
#include "qat_crypto.h" #include "qat_crypto.h"
#include "icp_qat_hw.h" #include "icp_qat_hw.h"
@@ -46,19 +46,6 @@
static DEFINE_MUTEX(algs_lock); static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs; static unsigned int active_devs;
struct qat_alg_buf {
u32 len;
u32 resrvd;
u64 addr;
} __packed;
struct qat_alg_buf_list {
u64 resrvd;
u32 num_bufs;
u32 num_mapped_bufs;
struct qat_alg_buf bufers[];
} __packed __aligned(64);
/* Common content descriptor */ /* Common content descriptor */
struct qat_alg_cd { struct qat_alg_cd {
union { union {
@@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
bl->bufers[i].len, DMA_BIDIRECTIONAL); bl->bufers[i].len, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
if (!qat_req->buf.sgl_src_valid)
kfree(bl); kfree(bl);
if (blp != blpout) { if (blp != blpout) {
/* If out of place operation dma unmap only data */ /* If out of place operation dma unmap only data */
int bufless = blout->num_bufs - blout->num_mapped_bufs; int bufless = blout->num_bufs - blout->num_mapped_bufs;
@@ -704,6 +694,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
if (!qat_req->buf.sgl_dst_valid)
kfree(blout); kfree(blout);
} }
} }
@@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp = DMA_MAPPING_ERROR; dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR; dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg; struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n + 1); size_t sz_out, sz = struct_size(bufl, bufers, n);
int node = dev_to_node(&GET_DEV(inst->accel_dev));
if (unlikely(!n)) if (unlikely(!n))
return -EINVAL; return -EINVAL;
bufl = kzalloc_node(sz, GFP_ATOMIC, qat_req->buf.sgl_src_valid = false;
dev_to_node(&GET_DEV(inst->accel_dev))); qat_req->buf.sgl_dst_valid = false;
if (n > QAT_MAX_BUFF_DESC) {
bufl = kzalloc_node(sz, GFP_ATOMIC, node);
if (unlikely(!bufl)) if (unlikely(!bufl))
return -ENOMEM; return -ENOMEM;
} else {
bufl = &qat_req->buf.sgl_src.sgl_hdr;
memset(bufl, 0, sizeof(struct qat_alg_buf_list));
qat_req->buf.sgl_src_valid = true;
}
for_each_sg(sgl, sg, n, i) for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR; bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers; struct qat_alg_buf *bufers;
n = sg_nents(sglout); n = sg_nents(sglout);
sz_out = struct_size(buflout, bufers, n + 1); sz_out = struct_size(buflout, bufers, n);
sg_nctr = 0; sg_nctr = 0;
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev))); if (n > QAT_MAX_BUFF_DESC) {
buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
if (unlikely(!buflout)) if (unlikely(!buflout))
goto err_in; goto err_in;
} else {
buflout = &qat_req->buf.sgl_dst.sgl_hdr;
memset(buflout, 0, sizeof(struct qat_alg_buf_list));
qat_req->buf.sgl_dst_valid = true;
}
bufers = buflout->bufers; bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i) for_each_sg(sglout, sg, n, i)
@@ -810,6 +817,8 @@ err_out:
dma_unmap_single(dev, buflout->bufers[i].addr, dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len, buflout->bufers[i].len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (!qat_req->buf.sgl_dst_valid)
kfree(buflout); kfree(buflout);
err_in: err_in:
@@ -823,6 +832,7 @@ err_in:
bufl->bufers[i].len, bufl->bufers[i].len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (!qat_req->buf.sgl_src_valid)
kfree(bufl); kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n"); dev_err(dev, "Failed to map buf for dma\n");
@@ -925,8 +935,25 @@ void qat_alg_callback(void *resp)
struct icp_qat_fw_la_resp *qat_resp = resp; struct icp_qat_fw_la_resp *qat_resp = resp;
struct qat_crypto_request *qat_req = struct qat_crypto_request *qat_req =
(void *)(__force long)qat_resp->opaque_data; (void *)(__force long)qat_resp->opaque_data;
struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req); qat_req->cb(qat_resp, qat_req);
qat_alg_send_backlog(backlog);
}
static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
struct qat_crypto_instance *inst,
struct crypto_async_request *base)
{
struct qat_alg_req *alg_req = &qat_req->alg_req;
alg_req->fw_req = (u32 *)&qat_req->req;
alg_req->tx_ring = inst->sym_tx;
alg_req->base = base;
alg_req->backlog = &inst->backlog;
return qat_alg_send_message(alg_req);
} }
static int qat_alg_aead_dec(struct aead_request *areq) static int qat_alg_aead_dec(struct aead_request *areq)
@@ -939,7 +966,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg; struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm); int digst_size = crypto_aead_authsize(aead_tfm);
int ret, ctr = 0; int ret;
u32 cipher_len; u32 cipher_len;
cipher_len = areq->cryptlen - digst_size; cipher_len = areq->cryptlen - digst_size;
@@ -965,15 +992,12 @@ static int qat_alg_aead_dec(struct aead_request *areq)
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0; auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
do {
ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) { ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
return -EBUSY;
} return ret;
return -EINPROGRESS;
} }
static int qat_alg_aead_enc(struct aead_request *areq) static int qat_alg_aead_enc(struct aead_request *areq)
@@ -986,7 +1010,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg; struct icp_qat_fw_la_bulk_req *msg;
u8 *iv = areq->iv; u8 *iv = areq->iv;
int ret, ctr = 0; int ret;
if (areq->cryptlen % AES_BLOCK_SIZE != 0) if (areq->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL; return -EINVAL;
@@ -1013,15 +1037,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_off = 0; auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen; auth_param->auth_len = areq->assoclen + areq->cryptlen;
do { ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); if (ret == -ENOSPC)
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
return -EBUSY;
} return ret;
return -EINPROGRESS;
} }
static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
@@ -1174,7 +1194,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg; struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0; int ret;
if (req->cryptlen == 0) if (req->cryptlen == 0)
return 0; return 0;
@@ -1198,15 +1218,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req); qat_alg_set_req_iv(qat_req);
do { ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); if (ret == -ENOSPC)
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
return -EBUSY;
} return ret;
return -EINPROGRESS;
} }
static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
@@ -1243,7 +1259,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg; struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0; int ret;
if (req->cryptlen == 0) if (req->cryptlen == 0)
return 0; return 0;
@@ -1268,15 +1284,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req); qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req); qat_alg_update_iv(qat_req);
do { ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); if (ret == -ENOSPC)
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_free_bufl(ctx->inst, qat_req);
return -EBUSY;
} return ret;
return -EINPROGRESS;
} }
static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)

View File

@@ -0,0 +1,86 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2022 Intel Corporation */
#include "adf_transport.h"
#include "qat_algs_send.h"
#include "qat_crypto.h"
#define ADF_MAX_RETRIES 20
static int qat_alg_send_message_retry(struct qat_alg_req *req)
{
int ret = 0, ctr = 0;
do {
ret = adf_send_message(req->tx_ring, req->fw_req);
} while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
if (ret == -EAGAIN)
return -ENOSPC;
return -EINPROGRESS;
}
void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
{
struct qat_alg_req *req, *tmp;
spin_lock_bh(&backlog->lock);
list_for_each_entry_safe(req, tmp, &backlog->list, list) {
if (adf_send_message(req->tx_ring, req->fw_req)) {
/* The HW ring is full. Do nothing.
* qat_alg_send_backlog() will be invoked again by
* another callback.
*/
break;
}
list_del(&req->list);
req->base->complete(req->base, -EINPROGRESS);
}
spin_unlock_bh(&backlog->lock);
}
static void qat_alg_backlog_req(struct qat_alg_req *req,
struct qat_instance_backlog *backlog)
{
INIT_LIST_HEAD(&req->list);
spin_lock_bh(&backlog->lock);
list_add_tail(&req->list, &backlog->list);
spin_unlock_bh(&backlog->lock);
}
static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
{
struct qat_instance_backlog *backlog = req->backlog;
struct adf_etr_ring_data *tx_ring = req->tx_ring;
u32 *fw_req = req->fw_req;
/* If any request is already backlogged, then add to backlog list */
if (!list_empty(&backlog->list))
goto enqueue;
/* If ring is nearly full, then add to backlog list */
if (adf_ring_nearly_full(tx_ring))
goto enqueue;
/* If adding request to HW ring fails, then add to backlog list */
if (adf_send_message(tx_ring, fw_req))
goto enqueue;
return -EINPROGRESS;
enqueue:
qat_alg_backlog_req(req, backlog);
return -EBUSY;
}
int qat_alg_send_message(struct qat_alg_req *req)
{
u32 flags = req->base->flags;
if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
return qat_alg_send_message_maybacklog(req);
else
return qat_alg_send_message_retry(req);
}

View File

@@ -0,0 +1,11 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2022 Intel Corporation */
#ifndef QAT_ALGS_SEND_H
#define QAT_ALGS_SEND_H
#include "qat_crypto.h"
int qat_alg_send_message(struct qat_alg_req *req);
void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
#endif

View File

@@ -12,6 +12,7 @@
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include "icp_qat_fw_pke.h" #include "icp_qat_fw_pke.h"
#include "adf_accel_devices.h" #include "adf_accel_devices.h"
#include "qat_algs_send.h"
#include "adf_transport.h" #include "adf_transport.h"
#include "adf_common_drv.h" #include "adf_common_drv.h"
#include "qat_crypto.h" #include "qat_crypto.h"
@@ -135,8 +136,23 @@ struct qat_asym_request {
} areq; } areq;
int err; int err;
void (*cb)(struct icp_qat_fw_pke_resp *resp); void (*cb)(struct icp_qat_fw_pke_resp *resp);
struct qat_alg_req alg_req;
} __aligned(64); } __aligned(64);
static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
struct qat_crypto_instance *inst,
struct crypto_async_request *base)
{
struct qat_alg_req *alg_req = &qat_req->alg_req;
alg_req->fw_req = (u32 *)&qat_req->req;
alg_req->tx_ring = inst->pke_tx;
alg_req->base = base;
alg_req->backlog = &inst->backlog;
return qat_alg_send_message(alg_req);
}
static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{ {
struct qat_asym_request *req = (void *)(__force long)resp->opaque; struct qat_asym_request *req = (void *)(__force long)resp->opaque;
@@ -148,25 +164,20 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
if (areq->src) { if (areq->src) {
if (req->src_align) dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
dma_free_coherent(dev, req->ctx.dh->p_size, DMA_TO_DEVICE);
req->src_align, req->in.dh.in.b); kfree_sensitive(req->src_align);
else
dma_unmap_single(dev, req->in.dh.in.b,
req->ctx.dh->p_size, DMA_TO_DEVICE);
} }
areq->dst_len = req->ctx.dh->p_size; areq->dst_len = req->ctx.dh->p_size;
if (req->dst_align) { if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1); areq->dst_len, 1);
kfree_sensitive(req->dst_align);
}
dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
req->out.dh.r);
} else {
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
}
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE); DMA_TO_DEVICE);
@@ -213,8 +224,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
struct qat_asym_request *qat_req = struct qat_asym_request *qat_req =
PTR_ALIGN(kpp_request_ctx(req), 64); PTR_ALIGN(kpp_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req; struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0; int ret;
int n_input_params = 0; int n_input_params = 0;
u8 *vaddr;
if (unlikely(!ctx->xa)) if (unlikely(!ctx->xa))
return -EINVAL; return -EINVAL;
@@ -223,6 +235,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
req->dst_len = ctx->p_size; req->dst_len = ctx->p_size;
return -EOVERFLOW; return -EOVERFLOW;
} }
if (req->src_len > ctx->p_size)
return -EINVAL;
memset(msg, '\0', sizeof(*msg)); memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET); ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -271,27 +287,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
*/ */
if (sg_is_last(req->src) && req->src_len == ctx->p_size) { if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
qat_req->src_align = NULL; qat_req->src_align = NULL;
qat_req->in.dh.in.b = dma_map_single(dev, vaddr = sg_virt(req->src);
sg_virt(req->src),
req->src_len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
qat_req->in.dh.in.b)))
return ret;
} else { } else {
int shift = ctx->p_size - req->src_len; int shift = ctx->p_size - req->src_len;
qat_req->src_align = dma_alloc_coherent(dev, qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
ctx->p_size,
&qat_req->in.dh.in.b,
GFP_KERNEL);
if (unlikely(!qat_req->src_align)) if (unlikely(!qat_req->src_align))
return ret; return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, scatterwalk_map_and_copy(qat_req->src_align + shift,
req->src, 0, req->src_len, 0); req->src, 0, req->src_len, 0);
vaddr = qat_req->src_align;
} }
qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
goto unmap_src;
} }
/* /*
* dst can be of any size in valid range, but HW expects it to be the * dst can be of any size in valid range, but HW expects it to be the
@@ -302,20 +315,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
*/ */
if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) { if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
qat_req->dst_align = NULL; qat_req->dst_align = NULL;
qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst), vaddr = sg_virt(req->dst);
req->dst_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
goto unmap_src;
} else { } else {
qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
&qat_req->out.dh.r,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align)) if (unlikely(!qat_req->dst_align))
goto unmap_src; goto unmap_src;
vaddr = qat_req->dst_align;
} }
qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
goto unmap_dst;
qat_req->in.dh.in_tab[n_input_params] = 0; qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0; qat_req->out.dh.out_tab[1] = 0;
@@ -338,13 +349,13 @@ static int qat_dh_compute_value(struct kpp_request *req)
msg->input_param_count = n_input_params; msg->input_param_count = n_input_params;
msg->output_param_count = 1; msg->output_param_count = 1;
do { ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); if (ret == -ENOSPC)
} while (ret == -EBUSY && ctr++ < 100); goto unmap_all;
if (!ret) return ret;
return -EINPROGRESS;
unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out)) if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out, dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_dh_output_params), sizeof(struct qat_dh_output_params),
@@ -355,23 +366,17 @@ unmap_in_params:
sizeof(struct qat_dh_input_params), sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE); DMA_TO_DEVICE);
unmap_dst: unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
qat_req->out.dh.r);
else
if (!dma_mapping_error(dev, qat_req->out.dh.r)) if (!dma_mapping_error(dev, qat_req->out.dh.r))
dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src: unmap_src:
if (req->src) { if (req->src) {
if (qat_req->src_align)
dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
qat_req->in.dh.in.b);
else
if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
dma_unmap_single(dev, qat_req->in.dh.in.b, dma_unmap_single(dev, qat_req->in.dh.in.b,
ctx->p_size, ctx->p_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
} }
return ret; return ret;
} }
@@ -420,14 +425,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx) static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
{ {
if (ctx->g) { if (ctx->g) {
memset(ctx->g, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g); dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
ctx->g = NULL; ctx->g = NULL;
} }
if (ctx->xa) { if (ctx->xa) {
memset(ctx->xa, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
ctx->xa = NULL; ctx->xa = NULL;
} }
if (ctx->p) { if (ctx->p) {
memset(ctx->p, 0, ctx->p_size);
dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
ctx->p = NULL; ctx->p = NULL;
} }
@@ -510,10 +518,8 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
if (req->src_align) kfree_sensitive(req->src_align);
dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
req->in.rsa.enc.m);
else
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
DMA_TO_DEVICE); DMA_TO_DEVICE);
@@ -522,12 +528,11 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1); areq->dst_len, 1);
dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, kfree_sensitive(req->dst_align);
req->out.rsa.enc.c); }
} else {
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
}
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE); DMA_TO_DEVICE);
@@ -542,8 +547,11 @@ void qat_alg_asym_callback(void *_resp)
{ {
struct icp_qat_fw_pke_resp *resp = _resp; struct icp_qat_fw_pke_resp *resp = _resp;
struct qat_asym_request *areq = (void *)(__force long)resp->opaque; struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
struct qat_instance_backlog *backlog = areq->alg_req.backlog;
areq->cb(resp); areq->cb(resp);
qat_alg_send_backlog(backlog);
} }
#define PKE_RSA_EP_512 0x1c161b21 #define PKE_RSA_EP_512 0x1c161b21
@@ -642,7 +650,8 @@ static int qat_rsa_enc(struct akcipher_request *req)
struct qat_asym_request *qat_req = struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64); PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req; struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0; u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->e)) if (unlikely(!ctx->n || !ctx->e))
return -EINVAL; return -EINVAL;
@@ -651,6 +660,10 @@ static int qat_rsa_enc(struct akcipher_request *req)
req->dst_len = ctx->key_sz; req->dst_len = ctx->key_sz;
return -EOVERFLOW; return -EOVERFLOW;
} }
if (req->src_len > ctx->key_sz)
return -EINVAL;
memset(msg, '\0', sizeof(*msg)); memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET); ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -679,40 +692,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/ */
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL; qat_req->src_align = NULL;
qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), vaddr = sg_virt(req->src);
req->src_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
return ret;
} else { } else {
int shift = ctx->key_sz - req->src_len; int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
&qat_req->in.rsa.enc.m,
GFP_KERNEL);
if (unlikely(!qat_req->src_align)) if (unlikely(!qat_req->src_align))
return ret; return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0); 0, req->src_len, 0);
vaddr = qat_req->src_align;
} }
qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
goto unmap_src;
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL; qat_req->dst_align = NULL;
qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), vaddr = sg_virt(req->dst);
req->dst_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_src;
} else { } else {
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
&qat_req->out.rsa.enc.c,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align)) if (unlikely(!qat_req->dst_align))
goto unmap_src; goto unmap_src;
vaddr = qat_req->dst_align;
} }
qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_dst;
qat_req->in.rsa.in_tab[3] = 0; qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0; qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
@@ -732,13 +744,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = 3; msg->input_param_count = 3;
msg->output_param_count = 1; msg->output_param_count = 1;
do {
ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret) ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
return -EINPROGRESS; if (ret == -ENOSPC)
goto unmap_all;
return ret;
unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out)) if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out, dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params), sizeof(struct qat_rsa_output_params),
@@ -749,21 +762,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params), sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE); DMA_TO_DEVICE);
unmap_dst: unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
qat_req->out.rsa.enc.c);
else
if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
dma_unmap_single(dev, qat_req->out.rsa.enc.c, dma_unmap_single(dev, qat_req->out.rsa.enc.c,
ctx->key_sz, DMA_FROM_DEVICE); ctx->key_sz, DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src: unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
qat_req->in.rsa.enc.m);
else
if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
dma_unmap_single(dev, qat_req->in.rsa.enc.m, dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
ctx->key_sz, DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
return ret; return ret;
} }
@@ -776,7 +783,8 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_asym_request *qat_req = struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64); PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req; struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0; u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->d)) if (unlikely(!ctx->n || !ctx->d))
return -EINVAL; return -EINVAL;
@@ -785,6 +793,10 @@ static int qat_rsa_dec(struct akcipher_request *req)
req->dst_len = ctx->key_sz; req->dst_len = ctx->key_sz;
return -EOVERFLOW; return -EOVERFLOW;
} }
if (req->src_len > ctx->key_sz)
return -EINVAL;
memset(msg, '\0', sizeof(*msg)); memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET); ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -823,40 +835,37 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/ */
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL; qat_req->src_align = NULL;
qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), vaddr = sg_virt(req->src);
req->dst_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
return ret;
} else { } else {
int shift = ctx->key_sz - req->src_len; int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
&qat_req->in.rsa.dec.c,
GFP_KERNEL);
if (unlikely(!qat_req->src_align)) if (unlikely(!qat_req->src_align))
return ret; return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0); 0, req->src_len, 0);
vaddr = qat_req->src_align;
} }
qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
goto unmap_src;
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL; qat_req->dst_align = NULL;
qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), vaddr = sg_virt(req->dst);
req->dst_len,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_src;
} else { } else {
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
&qat_req->out.rsa.dec.m,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align)) if (unlikely(!qat_req->dst_align))
goto unmap_src; goto unmap_src;
vaddr = qat_req->dst_align;
} }
qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_dst;
if (ctx->crt_mode) if (ctx->crt_mode)
qat_req->in.rsa.in_tab[6] = 0; qat_req->in.rsa.in_tab[6] = 0;
@@ -884,13 +893,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->input_param_count = 3; msg->input_param_count = 3;
msg->output_param_count = 1; msg->output_param_count = 1;
do {
ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret) ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
return -EINPROGRESS; if (ret == -ENOSPC)
goto unmap_all;
return ret;
unmap_all:
if (!dma_mapping_error(dev, qat_req->phy_out)) if (!dma_mapping_error(dev, qat_req->phy_out))
dma_unmap_single(dev, qat_req->phy_out, dma_unmap_single(dev, qat_req->phy_out,
sizeof(struct qat_rsa_output_params), sizeof(struct qat_rsa_output_params),
@@ -901,21 +911,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params), sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE); DMA_TO_DEVICE);
unmap_dst: unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
qat_req->out.rsa.dec.m);
else
if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
dma_unmap_single(dev, qat_req->out.rsa.dec.m, dma_unmap_single(dev, qat_req->out.rsa.dec.m,
ctx->key_sz, DMA_FROM_DEVICE); ctx->key_sz, DMA_FROM_DEVICE);
kfree_sensitive(qat_req->dst_align);
unmap_src: unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
qat_req->in.rsa.dec.c);
else
if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
dma_unmap_single(dev, qat_req->in.rsa.dec.c, dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
ctx->key_sz, DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree_sensitive(qat_req->src_align);
return ret; return ret;
} }
@@ -1233,18 +1237,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev); struct device *dev = &GET_DEV(ctx->inst->accel_dev);
if (ctx->n) qat_rsa_clear_ctx(dev, ctx);
dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
if (ctx->e)
dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
if (ctx->d) {
memset(ctx->d, '\0', ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
}
qat_crypto_put_instance(ctx->inst); qat_crypto_put_instance(ctx->inst);
ctx->n = NULL;
ctx->e = NULL;
ctx->d = NULL;
} }
static struct akcipher_alg rsa = { static struct akcipher_alg rsa = {

View File

@@ -136,13 +136,6 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
if (ret) if (ret)
goto err; goto err;
/* Temporarily set the number of crypto instances to zero to avoid
* registering the crypto algorithms.
* This will be removed when the algorithms will support the
* CRYPTO_TFM_REQ_MAY_BACKLOG flag
*/
instances = 0;
for (i = 0; i < instances; i++) { for (i = 0; i < instances; i++) {
val = i; val = i;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
@@ -328,6 +321,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
&inst->pke_rx); &inst->pke_rx);
if (ret) if (ret)
goto err; goto err;
INIT_LIST_HEAD(&inst->backlog.list);
spin_lock_init(&inst->backlog.lock);
} }
return 0; return 0;
err: err:

View File

@@ -9,6 +9,19 @@
#include "adf_accel_devices.h" #include "adf_accel_devices.h"
#include "icp_qat_fw_la.h" #include "icp_qat_fw_la.h"
struct qat_instance_backlog {
struct list_head list;
spinlock_t lock; /* protects backlog list */
};
struct qat_alg_req {
u32 *fw_req;
struct adf_etr_ring_data *tx_ring;
struct crypto_async_request *base;
struct list_head list;
struct qat_instance_backlog *backlog;
};
struct qat_crypto_instance { struct qat_crypto_instance {
struct adf_etr_ring_data *sym_tx; struct adf_etr_ring_data *sym_tx;
struct adf_etr_ring_data *sym_rx; struct adf_etr_ring_data *sym_rx;
@@ -19,8 +32,29 @@ struct qat_crypto_instance {
unsigned long state; unsigned long state;
int id; int id;
atomic_t refctr; atomic_t refctr;
struct qat_instance_backlog backlog;
}; };
#define QAT_MAX_BUFF_DESC 4
struct qat_alg_buf {
u32 len;
u32 resrvd;
u64 addr;
} __packed;
struct qat_alg_buf_list {
u64 resrvd;
u32 num_bufs;
u32 num_mapped_bufs;
struct qat_alg_buf bufers[];
} __packed;
struct qat_alg_fixed_buf_list {
struct qat_alg_buf_list sgl_hdr;
struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
} __packed __aligned(64);
struct qat_crypto_request_buffs { struct qat_crypto_request_buffs {
struct qat_alg_buf_list *bl; struct qat_alg_buf_list *bl;
dma_addr_t blp; dma_addr_t blp;
@@ -28,6 +62,10 @@ struct qat_crypto_request_buffs {
dma_addr_t bloutp; dma_addr_t bloutp;
size_t sz; size_t sz;
size_t sz_out; size_t sz_out;
bool sgl_src_valid;
bool sgl_dst_valid;
struct qat_alg_fixed_buf_list sgl_src;
struct qat_alg_fixed_buf_list sgl_dst;
}; };
struct qat_crypto_request; struct qat_crypto_request;
@@ -53,6 +91,7 @@ struct qat_crypto_request {
u8 iv[AES_BLOCK_SIZE]; u8 iv[AES_BLOCK_SIZE];
}; };
bool encryption; bool encryption;
struct qat_alg_req alg_req;
}; };
static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)

View File

@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8, .reg_bits = 8,
.val_bits = 8, .val_bits = 8,
.use_single_read = true,
.use_single_write = true,
.readable_reg = pca953x_readable_register, .readable_reg = pca953x_readable_register,
.writeable_reg = pca953x_writeable_register, .writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register, .volatile_reg = pca953x_volatile_register,
@@ -894,15 +897,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert) static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{ {
DECLARE_BITMAP(val, MAX_LINE); DECLARE_BITMAP(val, MAX_LINE);
u8 regaddr;
int ret; int ret;
ret = regcache_sync_region(chip->regmap, chip->regs->output, regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
chip->regs->output + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) if (ret)
goto out; goto out;
ret = regcache_sync_region(chip->regmap, chip->regs->direction, regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
chip->regs->direction + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) if (ret)
goto out; goto out;
@@ -1115,14 +1121,14 @@ static int pca953x_regcache_sync(struct device *dev)
* sync these registers first and only then sync the rest. * sync these registers first and only then sync the rest.
*/ */
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret); dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret; return ret;
} }
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret); dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret; return ret;
@@ -1132,7 +1138,7 @@ static int pca953x_regcache_sync(struct device *dev)
if (chip->driver_data & PCA_PCAL) { if (chip->driver_data & PCA_PCAL) {
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0); regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr, ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip)); regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n", dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret); ret);
@@ -1141,7 +1147,7 @@ static int pca953x_regcache_sync(struct device *dev)
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0); regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr, ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip)); regaddr + NBANK(chip) - 1);
if (ret) { if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n", dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret); ret);

View File

@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5); const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
map[index] &= ~(0xFFFFFFFFul << offset); map[index] &= ~(0xFFFFFFFFul << offset);
map[index] |= v << offset; map[index] |= (unsigned long)v << offset;
} }
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch) static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)

View File

@@ -70,6 +70,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/component.h> #include <linux/component.h>
#include <linux/dmi.h>
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_uapi.h>
@@ -215,6 +216,8 @@ static void handle_cursor_update(struct drm_plane *plane,
static const struct drm_format_info * static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static bool static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state); struct drm_crtc_state *new_crtc_state);
@@ -618,6 +621,113 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
} }
#endif #endif
/**
* dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
* Dmub AUX or SET_CONFIG command completion processing callback
* Copies dmub notification to DM which is to be read by AUX command.
* issuing thread and also signals the event to wake up the thread.
*/
void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{
if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
complete(&adev->dm.dmub_aux_transfer_done);
}
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
* Dmub Hpd interrupt processing callback. Gets displayindex through the
* ink index and calls helper to do the processing.
*/
void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
uint8_t link_index = 0;
struct drm_device *dev;
if (adev == NULL)
return;
if (notify == NULL) {
DRM_ERROR("DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
dev = adev->dm.ddev;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
handle_hpd_irq_helper(aconnector);
break;
}
}
drm_connector_list_iter_end(&iter);
}
/**
* register_dmub_notify_callback - Sets callback for DMUB notify
* @adev: amdgpu_device pointer
* @type: Type of dmub notification
* @callback: Dmub interrupt callback function
* @dmub_int_thread_offload: offload indicator
*
* API to register a dmub callback handler for a dmub notification
* Also sets indicator whether callback processing to be offloaded.
* to dmub interrupt handling thread
* Return: true if successfully registered, false if there is existing registration
*/
bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
{
if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
adev->dm.dmub_callback[type] = callback;
adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
} else
return false;
return true;
}
static void dm_handle_hpd_work(struct work_struct *work)
{
struct dmub_hpd_work *dmub_hpd_wrk;
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
return;
}
if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
dmub_hpd_wrk->dmub_notify);
}
kfree(dmub_hpd_wrk);
}
#define DMUB_TRACE_MAX_READ 64 #define DMUB_TRACE_MAX_READ 64
/** /**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -634,18 +744,33 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 }; struct dmcub_trace_buf_entry entry = { 0 };
uint32_t count = 0; uint32_t count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
if (dc_enable_dmub_notifications(adev->dm.dc)) { if (dc_enable_dmub_notifications(adev->dm.dc)) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
DRM_ERROR("Failed to allocate dmub_hpd_wrk");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
do { do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify); dc_stat_get_dmub_notification(adev->dm.dc, &notify);
} while (notify.pending_notification); if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type,
ARRAY_SIZE(dm->dmub_thread_offload));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk->dmub_notify = &notify;
dmub_hpd_wrk->adev = adev;
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
} else {
dm->dmub_callback[notify.type](adev, &notify);
}
if (adev->dm.dmub_notify) } while (notify.pending_notification);
memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
complete(&adev->dm.dmub_aux_transfer_done);
// TODO : HPD Implementation
} else { } else {
DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
@@ -900,6 +1025,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0; return 0;
} }
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
DRM_WARN("Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
fw_inst_const = dmub_fw->data + fw_inst_const = dmub_fw->data +
@@ -1109,6 +1239,149 @@ static void vblank_control_worker(struct work_struct *work)
} }
#endif #endif
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{
struct hpd_rx_irq_offload_work *offload_work;
struct amdgpu_dm_connector *aconnector;
struct dc_link *dc_link;
struct amdgpu_device *adev;
enum dc_connection_type new_connection_type = dc_connection_none;
unsigned long flags;
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
if (!aconnector) {
DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_sink(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
goto skip;
if (amdgpu_in_reset(adev))
goto skip;
mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
dc_link_dp_handle_automated_test(dc_link);
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
dc_link_dp_handle_link_loss(dc_link);
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_link_loss = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
}
mutex_unlock(&adev->dm.dc_lock);
skip:
kfree(offload_work);
}
static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
{
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
if (!hpd_rx_offload_wq)
return NULL;
for (i = 0; i < max_caps; i++) {
hpd_rx_offload_wq[i].wq =
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
return NULL;
}
spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
}
return hpd_rx_offload_wq;
}
struct amdgpu_stutter_quirk {
u16 chip_vendor;
u16 chip_device;
u16 subsys_vendor;
u16 subsys_device;
u8 revision;
};
static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
{ 0, 0, 0, 0, 0 },
};
static bool dm_should_disable_stutter(struct pci_dev *pdev)
{
const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
while (p && p->chip_device != 0) {
if (pdev->vendor == p->chip_vendor &&
pdev->device == p->chip_device &&
pdev->subsystem_vendor == p->subsys_vendor &&
pdev->subsystem_device == p->subsys_device &&
pdev->revision == p->revision) {
return true;
}
++p;
}
return false;
}
static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{}
};
static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
{
const struct dmi_system_id *dmi_id;
dm->aux_hpd_discon_quirk = false;
dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
if (dmi_id) {
dm->aux_hpd_discon_quirk = true;
DRM_INFO("aux_hpd_discon_quirk attached\n");
}
}
static int amdgpu_dm_init(struct amdgpu_device *adev) static int amdgpu_dm_init(struct amdgpu_device *adev)
{ {
struct dc_init_data init_data; struct dc_init_data init_data;
@@ -1200,6 +1473,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.power_down_display_on_boot = true; init_data.flags.power_down_display_on_boot = true;
INIT_LIST_HEAD(&adev->dm.da_list); INIT_LIST_HEAD(&adev->dm.da_list);
retrieve_dmi_info(&adev->dm);
/* Display Core create. */ /* Display Core create. */
adev->dm.dc = dc_create(&init_data); adev->dm.dc = dc_create(&init_data);
@@ -1217,6 +1493,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
if (dm_should_disable_stutter(adev->pdev))
adev->dm.dc->debug.disable_stutter = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true; adev->dm.dc->debug.disable_stutter = true;
@@ -1235,6 +1513,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc); dc_hardware_init(adev->dm.dc);
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
if (!adev->dm.hpd_rx_offload_wq) {
DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config; struct dc_phy_addr_space_config pa_config;
@@ -1287,7 +1571,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
goto error; goto error;
} }
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev); amdgpu_dm_outbox_init(adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
#endif
} }
if (amdgpu_dm_initialize_drm_device(adev)) { if (amdgpu_dm_initialize_drm_device(adev)) {
@@ -1369,6 +1671,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (dc_enable_dmub_notifications(adev->dm.dc)) { if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify); kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL; adev->dm.dmub_notify = NULL;
destroy_workqueue(adev->dm.delayed_hpd_wq);
adev->dm.delayed_hpd_wq = NULL;
} }
if (adev->dm.dmub_bo) if (adev->dm.dmub_bo)
@@ -1394,6 +1698,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.freesync_module = NULL; adev->dm.freesync_module = NULL;
} }
if (adev->dm.hpd_rx_offload_wq) {
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
if (adev->dm.hpd_rx_offload_wq[i].wq) {
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
adev->dm.hpd_rx_offload_wq[i].wq = NULL;
}
}
kfree(adev->dm.hpd_rx_offload_wq);
adev->dm.hpd_rx_offload_wq = NULL;
}
mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock); mutex_destroy(&adev->dm.dc_lock);
@@ -2013,6 +2329,16 @@ context_alloc_fail:
return res; return res;
} }
static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
{
int i;
if (dm->hpd_rx_offload_wq) {
for (i = 0; i < dm->dc->caps.max_links; i++)
flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
}
}
static int dm_suspend(void *handle) static int dm_suspend(void *handle)
{ {
struct amdgpu_device *adev = handle; struct amdgpu_device *adev = handle;
@@ -2034,6 +2360,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev); amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
return ret; return ret;
} }
@@ -2044,6 +2372,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev); amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0; return 0;
@@ -2654,9 +2984,8 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(sink); dc_sink_release(sink);
} }
static void handle_hpd_irq(void *param) static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
{ {
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base; struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type new_connection_type = dc_connection_none;
@@ -2715,7 +3044,15 @@ static void handle_hpd_irq(void *param)
} }
static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) static void handle_hpd_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
handle_hpd_irq_helper(aconnector);
}
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{ {
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret; uint8_t dret;
@@ -2793,6 +3130,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
} }
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
}
static void handle_hpd_rx_irq(void *param) static void handle_hpd_rx_irq(void *param)
{ {
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
@@ -2804,14 +3160,16 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
union hpd_irq_data hpd_irq_data; union hpd_irq_data hpd_irq_data;
bool lock_flag = 0; bool link_loss = false;
bool has_left_work = false;
int idx = aconnector->base.index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
if (adev->dm.disable_hpd_irq) if (adev->dm.disable_hpd_irq)
return; return;
/* /*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be * conflict, after implement i2c helper, this mutex should be
@@ -2819,44 +3177,42 @@ static void handle_hpd_rx_irq(void *param)
*/ */
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);
read_hpd_rx_irq_data(dc_link, &hpd_irq_data); result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
&link_loss, true, &has_left_work);
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || if (!has_left_work)
(dc_link->type == dc_connection_mst_branch)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
result = true;
dm_handle_hpd_rx_irq(aconnector);
goto out; goto out;
} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
result = false; if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dm_handle_hpd_rx_irq(aconnector); schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
goto out;
}
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
dm_handle_mst_sideband_msg(aconnector);
goto out;
}
if (link_loss) {
bool skip = false;
spin_lock(&offload_wq->offload_lock);
skip = offload_wq->is_handling_link_loss;
if (!skip)
offload_wq->is_handling_link_loss = true;
spin_unlock(&offload_wq->offload_lock);
if (!skip)
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
goto out; goto out;
} }
} }
/*
* TODO: We need the lock to avoid touching DC state while it's being
* modified during automated compliance testing, or when link loss
* happens. While this should be split into subhandlers and proper
* interfaces to avoid having to conditionally lock like this in the
* outer layer, we need this workaround temporarily to allow MST
* lightup in some scenarios to avoid timeout.
*/
if (!amdgpu_in_reset(adev) &&
(hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
mutex_lock(&adev->dm.dc_lock);
lock_flag = 1;
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
#else
result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
#endif
if (!amdgpu_in_reset(adev) && lock_flag)
mutex_unlock(&adev->dm.dc_lock);
out: out:
if (result && !is_mst_root_connector) { if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */ /* Downstream Port status changed. */
@@ -2940,6 +3296,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params, amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq, handle_hpd_rx_irq,
(void *) aconnector); (void *) aconnector);
if (adev->dm.hpd_rx_offload_wq)
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
aconnector;
} }
} }
} }

View File

@@ -47,6 +47,8 @@
#define AMDGPU_DM_MAX_CRTC 6 #define AMDGPU_DM_MAX_CRTC 6
#define AMDGPU_DM_MAX_NUM_EDP 2 #define AMDGPU_DM_MAX_NUM_EDP 2
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/* /*
#include "include/amdgpu_dal_power_if.h" #include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h" #include "amdgpu_dm_irq.h"
@@ -86,6 +88,21 @@ struct dm_compressor_info {
uint64_t gpu_addr; uint64_t gpu_addr;
}; };
typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
/**
* struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
*
* @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
* @dmub_notify: notification for callback function
* @adev: amdgpu_device pointer
*/
struct dmub_hpd_work {
struct work_struct handle_hpd_work;
struct dmub_notification *dmub_notify;
struct amdgpu_device *adev;
};
/** /**
* struct vblank_control_work - Work data for vblank control * struct vblank_control_work - Work data for vblank control
* @work: Kernel work data for the work event * @work: Kernel work data for the work event
@@ -154,6 +171,48 @@ struct dal_allocation {
u64 gpu_addr; u64 gpu_addr;
}; };
/**
* struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
* offload work
*/
struct hpd_rx_irq_offload_work_queue {
/**
* @wq: workqueue structure to queue offload work.
*/
struct workqueue_struct *wq;
/**
* @offload_lock: To protect fields of offload work queue.
*/
spinlock_t offload_lock;
/**
* @is_handling_link_loss: Used to prevent inserting link loss event when
* we're handling link loss
*/
bool is_handling_link_loss;
/**
* @aconnector: The aconnector that this work queue is attached to
*/
struct amdgpu_dm_connector *aconnector;
};
/**
* struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
*/
struct hpd_rx_irq_offload_work {
/**
* @work: offload work
*/
struct work_struct work;
/**
* @data: reference irq data which is used while handling offload work
*/
union hpd_irq_data data;
/**
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
};
/** /**
* struct amdgpu_display_manager - Central amdgpu display manager device * struct amdgpu_display_manager - Central amdgpu display manager device
* *
@@ -190,8 +249,30 @@ struct amdgpu_display_manager {
*/ */
struct dmub_srv *dmub_srv; struct dmub_srv *dmub_srv;
/**
* @dmub_notify:
*
* Notification from DMUB.
*/
struct dmub_notification *dmub_notify; struct dmub_notification *dmub_notify;
/**
* @dmub_callback:
*
* Callback functions to handle notification from DMUB.
*/
dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
/**
* @dmub_thread_offload:
*
* Flag to indicate if callback is offload.
*/
bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
/** /**
* @dmub_fb_info: * @dmub_fb_info:
* *
@@ -422,7 +503,12 @@ struct amdgpu_display_manager {
*/ */
struct crc_rd_work *crc_rd_wrk; struct crc_rd_work *crc_rd_wrk;
#endif #endif
/**
* @hpd_rx_offload_wq:
*
* Work queue to offload works of hpd_rx_irq
*/
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
/** /**
* @mst_encoders: * @mst_encoders:
* *
@@ -439,6 +525,7 @@ struct amdgpu_display_manager {
*/ */
struct list_head da_list; struct list_head da_list;
struct completion dmub_aux_transfer_done; struct completion dmub_aux_transfer_done;
struct workqueue_struct *delayed_hpd_wq;
/** /**
* @brightness: * @brightness:
@@ -452,6 +539,14 @@ struct amdgpu_display_manager {
* last successfully applied backlight values. * last successfully applied backlight values.
*/ */
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
/**
* @aux_hpd_discon_quirk:
*
* quirk for hpd discon while aux is on-going.
* occurred on certain intel platform
*/
bool aux_hpd_discon_quirk;
}; };
enum dsc_clock_force_state { enum dsc_clock_force_state {

View File

@@ -55,6 +55,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
ssize_t result = 0; ssize_t result = 0;
struct aux_payload payload; struct aux_payload payload;
enum aux_return_code_type operation_result; enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
if (WARN_ON(msg->size > 16)) if (WARN_ON(msg->size > 16))
return -E2BIG; return -E2BIG;
@@ -71,6 +73,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result); &operation_result);
/*
* w/a on certain intel platform where hpd is unexpected to pull low during
* 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
* aux transaction is succuess in such case, therefore bypass the error
*/
ddc = TO_DM_AUX(aux)->ddc_service;
adev = ddc->ctx->driver_context;
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
result = 0;
operation_result = AUX_RET_SUCCESS;
}
}
if (payload.write && result >= 0) if (payload.write && result >= 0)
result = msg->size; result = msg->size;

View File

@@ -1788,6 +1788,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc); post_surface_trace(dc);
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
if (is_flip_pending_in_pipes(dc, context)) if (is_flip_pending_in_pipes(dc, context))
return; return;
@@ -2974,6 +2979,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true; new_pipe->plane_state->force_full_update = true;
} }
} else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
/*
* Previous frame finished and HW is ready for optimization.
*
* Only relevant for DCN behavior where we can guarantee the optimization
* is safe to apply - retain the legacy behavior for DCE.
*/
dc_post_update_surfaces_to_stream(dc);
} }
@@ -3030,13 +3043,10 @@ void dc_commit_updates_for_stream(struct dc *dc,
pipe_ctx->plane_state->force_full_update = false; pipe_ctx->plane_state->force_full_update = false;
} }
} }
/*let's use current_state to update watermark etc*/
if (update_type >= UPDATE_TYPE_FULL) {
dc_post_update_surfaces_to_stream(dc);
if (dc_ctx->dce_version >= DCE_VERSION_MAX) /* Legacy optimization path for DCE. */
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
else dc_post_update_surfaces_to_stream(dc);
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
} }

View File

@@ -2075,7 +2075,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap; return max_link_cap;
} }
enum dc_status read_hpd_rx_irq_data( static enum dc_status read_hpd_rx_irq_data(
struct dc_link *link, struct dc_link *link,
union hpd_irq_data *irq_data) union hpd_irq_data *irq_data)
{ {
@@ -2743,7 +2743,7 @@ void decide_link_settings(struct dc_stream_state *stream,
} }
/*************************Short Pulse IRQ***************************/ /*************************Short Pulse IRQ***************************/
static bool allow_hpd_rx_irq(const struct dc_link *link) bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
{ {
/* /*
* Don't handle RX IRQ unless one of following is met: * Don't handle RX IRQ unless one of following is met:
@@ -3177,7 +3177,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
} }
} }
static void handle_automated_test(struct dc_link *link) void dc_link_dp_handle_automated_test(struct dc_link *link)
{ {
union test_request test_request; union test_request test_request;
union test_response test_response; union test_response test_response;
@@ -3226,17 +3226,50 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response)); sizeof(test_response));
} }
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss) void dc_link_dp_handle_link_loss(struct dc_link *link)
{
int i;
struct pipe_ctx *pipe_ctx;
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
break;
}
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return;
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
core_link_disable_stream(pipe_ctx);
}
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
}
}
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
bool defer_handling, bool *has_left_work)
{ {
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } }; union device_service_irq device_service_clear = { { 0 } };
enum dc_status result; enum dc_status result;
bool status = false; bool status = false;
struct pipe_ctx *pipe_ctx;
int i;
if (out_link_loss) if (out_link_loss)
*out_link_loss = false; *out_link_loss = false;
if (has_left_work)
*has_left_work = false;
/* For use cases related to down stream connection status change, /* For use cases related to down stream connection status change,
* PSR and device auto test, refer to function handle_sst_hpd_irq * PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/ * in DAL2.1*/
@@ -3268,11 +3301,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&device_service_clear.raw, &device_service_clear.raw,
sizeof(device_service_clear.raw)); sizeof(device_service_clear.raw));
device_service_clear.raw = 0; device_service_clear.raw = 0;
handle_automated_test(link); if (defer_handling && has_left_work)
*has_left_work = true;
else
dc_link_dp_handle_automated_test(link);
return false; return false;
} }
if (!allow_hpd_rx_irq(link)) { if (!dc_link_dp_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index); __func__, link->link_index);
return false; return false;
@@ -3286,12 +3322,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* so do not handle as a normal sink status change interrupt. * so do not handle as a normal sink status change interrupt.
*/ */
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
if (defer_handling && has_left_work)
*has_left_work = true;
return true; return true;
}
/* check if we have MST msg and return since we poll for it */ /* check if we have MST msg and return since we poll for it */
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
if (defer_handling && has_left_work)
*has_left_work = true;
return false; return false;
}
/* For now we only handle 'Downstream port status' case. /* For now we only handle 'Downstream port status' case.
* If we got sink count changed it means * If we got sink count changed it means
@@ -3308,29 +3350,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
sizeof(hpd_irq_dpcd_data), sizeof(hpd_irq_dpcd_data),
"Status: "); "Status: ");
for (i = 0; i < MAX_PIPES; i++) { if (defer_handling && has_left_work)
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; *has_left_work = true;
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) else
break; dc_link_dp_handle_link_loss(link);
}
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return false;
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
status = false; status = false;
if (out_link_loss) if (out_link_loss)

View File

@@ -296,7 +296,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
* false - no change in Downstream port status. No further action required * false - no change in Downstream port status. No further action required
* from DM. */ * from DM. */
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss); union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
bool defer_handling, bool *has_left_work);
/* /*
* On eDP links this function call will stall until T12 has elapsed. * On eDP links this function call will stall until T12 has elapsed.
@@ -305,9 +306,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
*/ */
bool dc_link_wait_for_t12(struct dc_link *link); bool dc_link_wait_for_t12(struct dc_link *link);
enum dc_status read_hpd_rx_irq_data( void dc_link_dp_handle_automated_test(struct dc_link *link);
struct dc_link *link, void dc_link_dp_handle_link_loss(struct dc_link *link);
union hpd_irq_data *irq_data); bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
struct dc_sink_init_data; struct dc_sink_init_data;

View File

@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct dma_buf_map *map) struct dma_buf_map *map)
{ {
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
int ret;
return ttm_bo_vmap(bo, map); dma_resv_lock(gem->resv, NULL);
ret = ttm_bo_vmap(bo, map);
dma_resv_unlock(gem->resv);
return ret;
} }
EXPORT_SYMBOL(drm_gem_ttm_vmap); EXPORT_SYMBOL(drm_gem_ttm_vmap);
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{ {
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map); ttm_bo_vunmap(bo, map);
dma_resv_unlock(gem->resv);
} }
EXPORT_SYMBOL(drm_gem_ttm_vunmap); EXPORT_SYMBOL(drm_gem_ttm_vunmap);

View File

@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_submodules_init(dcss); ret = dcss_submodules_init(dcss);
if (ret) { if (ret) {
of_node_put(dcss->of_port);
dev_err(dev, "submodules initialization failed\n"); dev_err(dev, "submodules initialization failed\n");
goto clks_err; goto clks_err;
} }
@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_clocks_disable(dcss); dcss_clocks_disable(dcss);
} }
of_node_put(dcss->of_port);
pm_runtime_disable(dcss->dev); pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss); dcss_submodules_stop(dcss);

View File

@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
*/ */
static irqreturn_t cdns_i2c_master_isr(void *ptr) static irqreturn_t cdns_i2c_master_isr(void *ptr)
{ {
unsigned int isr_status, avail_bytes, updatetx; unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send; unsigned int bytes_to_send;
bool hold_quirk; bool updatetx;
struct cdns_i2c *id = ptr; struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */ /* Signal completion only after everything is updated */
int done_flag = 0; int done_flag = 0;
@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* Check if transfer size register needs to be updated again for a * Check if transfer size register needs to be updated again for a
* large data receive operation. * large data receive operation.
*/ */
updatetx = 0; updatetx = id->recv_count > id->curr_recv_count;
if (id->recv_count > id->curr_recv_count)
updatetx = 1;
hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
/* When receiving, handle data interrupt and completion interrupt */ /* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf && if (id->p_recv_buf &&
@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
break; break;
} }
if (cdns_is_holdquirk(id, hold_quirk)) if (cdns_is_holdquirk(id, updatetx))
break; break;
} }
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* maintain transfer size non-zero while performing a large * maintain transfer size non-zero while performing a large
* receive operation. * receive operation.
*/ */
if (cdns_is_holdquirk(id, hold_quirk)) { if (cdns_is_holdquirk(id, updatetx)) {
/* wait while fifo is full */ /* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH)) (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
CDNS_I2C_XFER_SIZE_OFFSET); CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count; id->curr_recv_count = id->recv_count;
} }
} else if (id->recv_count && !hold_quirk &&
!id->curr_recv_count) {
/* Set the slave address in address register*/
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
} else {
cdns_i2c_writereg(id->recv_count,
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
} }
/* Clear hold (if not repeated start) and signal completion */ /* Clear hold (if not repeated start) and signal completion */

View File

@@ -49,7 +49,7 @@
#define MLXCPLD_LPCI2C_NACK_IND 2 #define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04 #define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c #define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42 #define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency { enum mlxcpld_i2c_frequency {

View File

@@ -4221,10 +4221,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node; struct irdma_cm_node *cm_node;
struct list_head teardown_list; struct list_head teardown_list;
struct ib_qp_attr attr; struct ib_qp_attr attr;
struct irdma_sc_vsi *vsi = &iwdev->vsi;
struct irdma_sc_qp *sc_qp;
struct irdma_qp *qp;
int i;
INIT_LIST_HEAD(&teardown_list); INIT_LIST_HEAD(&teardown_list);
@@ -4241,52 +4237,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
irdma_cm_disconn(cm_node->iwqp); irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node); irdma_rem_ref_cm_node(cm_node);
} }
if (!iwdev->roce_mode)
return;
INIT_LIST_HEAD(&teardown_list);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
mutex_lock(&vsi->qos[i].qos_mutex);
list_for_each_safe (list_node, list_core_temp,
&vsi->qos[i].qplist) {
u32 qp_ip[4];
sc_qp = container_of(list_node, struct irdma_sc_qp,
list);
if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
continue;
qp = sc_qp->qp_uk.back_qp;
if (!disconnect_all) {
if (nfo->ipv4)
qp_ip[0] = qp->udp_info.local_ipaddr[3];
else
memcpy(qp_ip,
&qp->udp_info.local_ipaddr[0],
sizeof(qp_ip));
}
if (disconnect_all ||
(nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
!memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
spin_lock(&iwdev->rf->qptable_lock);
if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
irdma_qp_add_ref(&qp->ibqp);
list_add(&qp->teardown_entry,
&teardown_list);
}
spin_unlock(&iwdev->rf->qptable_lock);
}
}
mutex_unlock(&vsi->qos[i].qos_mutex);
}
list_for_each_safe (list_node, list_core_temp, &teardown_list) {
qp = container_of(list_node, struct irdma_qp, teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
irdma_qp_rem_ref(&qp->ibqp);
}
} }
/** /**

View File

@@ -202,6 +202,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD; dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT; dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE; dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE; dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES; dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;

View File

@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops; dev->irq_ops = &icrdma_irq_ops;
dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;

View File

@@ -127,6 +127,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size; u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size; u64 max_hw_inbound_msg_size;
u64 max_mr_size; u64 max_mr_size;
u64 page_size_cap;
u32 min_hw_qp_id; u32 min_hw_qp_id;
u32 min_hw_aeq_size; u32 min_hw_aeq_size;
u32 max_hw_aeq_size; u32 max_hw_aeq_size;

View File

@@ -29,7 +29,7 @@ static int irdma_query_device(struct ib_device *ibdev,
props->vendor_part_id = pcidev->device; props->vendor_part_id = pcidev->device;
props->hw_ver = rf->pcidev->revision; props->hw_ver = rf->pcidev->revision;
props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; props->page_size_cap = hw_attrs->page_size_cap;
props->max_mr_size = hw_attrs->max_mr_size; props->max_mr_size = hw_attrs->max_mr_size;
props->max_qp = rf->max_qp - rf->used_qps; props->max_qp = rf->max_qp - rf->used_qps;
props->max_qp_wr = hw_attrs->max_qp_wr; props->max_qp_wr = hw_attrs->max_qp_wr;
@@ -2776,7 +2776,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
iwmr->page_size = ib_umem_find_best_pgsz(region, iwmr->page_size = ib_umem_find_best_pgsz(region,
SZ_4K | SZ_2M | SZ_1G, iwdev->rf->sc_dev.hw_attrs.page_size_cap,
virt); virt);
if (unlikely(!iwmr->page_size)) { if (unlikely(!iwmr->page_size)) {
kfree(iwmr); kfree(iwmr);

View File

@@ -644,7 +644,7 @@ err_out:
* RDN_DELAY = ----------------------- {3} * RDN_DELAY = ----------------------- {3}
* RP * RP
*/ */
static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
const struct nand_sdr_timings *sdr) const struct nand_sdr_timings *sdr)
{ {
struct gpmi_nfc_hardware_timing *hw = &this->hw; struct gpmi_nfc_hardware_timing *hw = &this->hw;
@@ -655,32 +655,44 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
unsigned int tRP_ps; unsigned int tRP_ps;
bool use_half_period; bool use_half_period;
int sample_delay_ps, sample_delay_factor; int sample_delay_ps, sample_delay_factor;
u16 busy_timeout_cycles; unsigned int busy_timeout_cycles;
u8 wrn_dly_sel; u8 wrn_dly_sel;
unsigned long clk_rate, min_rate;
u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) { if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */ /* ONFI non-EDO modes [0-3] */
hw->clk_rate = 22000000; hw->clk_rate = 22000000;
min_rate = 0;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
} else if (sdr->tRC_min >= 25000) { } else if (sdr->tRC_min >= 25000) {
/* ONFI EDO mode 4 */ /* ONFI EDO mode 4 */
hw->clk_rate = 80000000; hw->clk_rate = 80000000;
min_rate = 22000000;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
} else { } else {
/* ONFI EDO mode 5 */ /* ONFI EDO mode 5 */
hw->clk_rate = 100000000; hw->clk_rate = 100000000;
min_rate = 80000000;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
} }
hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate); clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
if (clk_rate <= min_rate) {
dev_err(this->dev, "clock setting: expected %ld, got %ld\n",
hw->clk_rate, clk_rate);
return -ENOTSUPP;
}
hw->clk_rate = clk_rate;
/* SDR core timings are given in picoseconds */ /* SDR core timings are given in picoseconds */
period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
@@ -714,6 +726,7 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) | hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
BM_GPMI_CTRL1_DLL_ENABLE | BM_GPMI_CTRL1_DLL_ENABLE |
(use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
return 0;
} }
static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this) static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
@@ -769,6 +782,7 @@ static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
{ {
struct gpmi_nand_data *this = nand_get_controller_data(chip); struct gpmi_nand_data *this = nand_get_controller_data(chip);
const struct nand_sdr_timings *sdr; const struct nand_sdr_timings *sdr;
int ret;
/* Retrieve required NAND timings */ /* Retrieve required NAND timings */
sdr = nand_get_sdr_timings(conf); sdr = nand_get_sdr_timings(conf);
@@ -784,7 +798,9 @@ static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
return 0; return 0;
/* Do the actual derivation of the controller timings */ /* Do the actual derivation of the controller timings */
gpmi_nfc_compute_timings(this, sdr); ret = gpmi_nfc_compute_timings(this, sdr);
if (ret)
return ret;
this->hw.must_apply_timings = true; this->hw.must_apply_timings = true;

View File

@@ -414,18 +414,21 @@ int ksz_switch_register(struct ksz_device *dev,
ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
if (!ports) if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports"); ports = of_get_child_by_name(dev->dev->of_node, "ports");
if (ports) if (ports) {
for_each_available_child_of_node(ports, port) { for_each_available_child_of_node(ports, port) {
if (of_property_read_u32(port, "reg", if (of_property_read_u32(port, "reg",
&port_num)) &port_num))
continue; continue;
if (!(dev->port_mask & BIT(port_num))) { if (!(dev->port_mask & BIT(port_num))) {
of_node_put(port); of_node_put(port);
of_node_put(ports);
return -EINVAL; return -EINVAL;
} }
of_get_phy_mode(port, of_get_phy_mode(port,
&dev->ports[port_num].interface); &dev->ports[port_num].interface);
} }
of_node_put(ports);
}
dev->synclko_125 = of_property_read_bool(dev->dev->of_node, dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125"); "microchip,synclko-125");
} }

View File

@@ -3372,12 +3372,28 @@ static const struct of_device_id sja1105_dt_ids[] = {
}; };
MODULE_DEVICE_TABLE(of, sja1105_dt_ids); MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
static const struct spi_device_id sja1105_spi_ids[] = {
{ "sja1105e" },
{ "sja1105t" },
{ "sja1105p" },
{ "sja1105q" },
{ "sja1105r" },
{ "sja1105s" },
{ "sja1110a" },
{ "sja1110b" },
{ "sja1110c" },
{ "sja1110d" },
{ },
};
MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
static struct spi_driver sja1105_driver = { static struct spi_driver sja1105_driver = {
.driver = { .driver = {
.name = "sja1105", .name = "sja1105",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids), .of_match_table = of_match_ptr(sja1105_dt_ids),
}, },
.id_table = sja1105_spi_ids,
.probe = sja1105_probe, .probe = sja1105_probe,
.remove = sja1105_remove, .remove = sja1105_remove,
.shutdown = sja1105_shutdown, .shutdown = sja1105_shutdown,

View File

@@ -207,10 +207,20 @@ static const struct of_device_id vsc73xx_of_match[] = {
}; };
MODULE_DEVICE_TABLE(of, vsc73xx_of_match); MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static const struct spi_device_id vsc73xx_spi_ids[] = {
{ "vsc7385" },
{ "vsc7388" },
{ "vsc7395" },
{ "vsc7398" },
{ },
};
MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
static struct spi_driver vsc73xx_spi_driver = { static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe, .probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove, .remove = vsc73xx_spi_remove,
.shutdown = vsc73xx_spi_shutdown, .shutdown = vsc73xx_spi_shutdown,
.id_table = vsc73xx_spi_ids,
.driver = { .driver = {
.name = "vsc73xx-spi", .name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match, .of_match_table = vsc73xx_of_match,

View File

@@ -1236,8 +1236,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf; csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
sock_net(newsk)-> READ_ONCE(sock_net(newsk)->
ipv4.sysctl_tcp_window_scaling, ipv4.sysctl_tcp_window_scaling),
tp->window_clamp); tp->window_clamp);
neigh_release(n); neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk); inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1384,7 +1384,7 @@ static void chtls_pass_accept_request(struct sock *sk,
#endif #endif
} }
if (req->tcpopt.wsf <= 14 && if (req->tcpopt.wsf <= 14 &&
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1; inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
} }
@@ -1392,7 +1392,7 @@ static void chtls_pass_accept_request(struct sock *sk,
th_ecn = tcph->ece && tcph->cwr; th_ecn = tcph->ece && tcph->cwr;
if (th_ecn) { if (th_ecn) {
ect = !INET_ECN_is_not_ect(ip_dsfield); ect = !INET_ECN_is_not_ect(ip_dsfield);
ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn; ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk)) if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
inet_rsk(oreq)->ecn_ok = 1; inet_rsk(oreq)->ecn_ok = 1;
} }

View File

@@ -2287,7 +2287,7 @@ err:
/* Uses sync mcc */ /* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
u8 page_num, u8 *data) u8 page_num, u32 off, u32 len, u8 *data)
{ {
struct be_dma_mem cmd; struct be_dma_mem cmd;
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
@@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
req->port = cpu_to_le32(adapter->hba_port_num); req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num); req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
if (!status) { if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va; struct be_cmd_resp_port_type *resp = cmd.va;
memcpy(data, resp->page_data, PAGE_DATA_LEN); memcpy(data, resp->page_data + off, len);
} }
err: err:
mutex_unlock(&adapter->mcc_lock); mutex_unlock(&adapter->mcc_lock);
@@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
int status; int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
page_data); 0, PAGE_DATA_LEN, page_data);
if (!status) { if (!status) {
switch (adapter->phy.interface_type) { switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP: case PHY_TYPE_QSFP:
@@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
int status; int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
page_data); 0, PAGE_DATA_LEN, page_data);
if (!status) { if (!status) {
strlcpy(adapter->phy.vendor_name, page_data + strlcpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);

View File

@@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state); u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
u8 page_num, u8 *data); u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter); int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,

View File

@@ -1342,7 +1342,7 @@ static int be_get_module_info(struct net_device *netdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
page_data); 0, PAGE_DATA_LEN, page_data);
if (!status) { if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) { if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079; modinfo->type = ETH_MODULE_SFF_8079;
@@ -1360,25 +1360,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
int status; int status;
u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES)) if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP; return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin = eeprom->offset;
end = eeprom->offset + eeprom->len;
if (begin < PAGE_DATA_LEN) {
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
min_t(u32, end, PAGE_DATA_LEN) - begin,
data); data);
if (status) if (status)
goto err; goto err;
if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { data += PAGE_DATA_LEN - begin;
status = be_cmd_read_port_transceiver_data(adapter, begin = PAGE_DATA_LEN;
TR_PAGE_A2, }
data +
PAGE_DATA_LEN); if (end > PAGE_DATA_LEN) {
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
begin - PAGE_DATA_LEN,
end - begin, data);
if (status) if (status)
goto err; goto err;
} }
if (eeprom->offset)
memcpy(data, data + eeprom->offset, eeprom->len);
err: err:
return be_cmd_status(status); return be_cmd_status(status);
} }

View File

@@ -630,7 +630,6 @@ struct e1000_phy_info {
bool disable_polarity_correction; bool disable_polarity_correction;
bool is_mdix; bool is_mdix;
bool polarity_correction; bool polarity_correction;
bool reset_disable;
bool speed_downgraded; bool speed_downgraded;
bool autoneg_wait_to_complete; bool autoneg_wait_to_complete;
}; };

View File

@@ -2050,10 +2050,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
bool blocked = false; bool blocked = false;
int i = 0; int i = 0;
/* Check the PHY (LCD) reset flag */
if (hw->phy.reset_disable)
return true;
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30)) (i++ < 30))
usleep_range(10000, 11000); usleep_range(10000, 11000);

View File

@@ -271,7 +271,6 @@
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26) #define I217_MEMPWR PHY_REG(772, 26)
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
#define I217_MEMPWR_MOEM 0x1000
/* Receive Address Initial CRC Calculation */ /* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))

View File

@@ -6499,6 +6499,10 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
hw->mac.type >= e1000_pch_adp) { hw->mac.type >= e1000_pch_adp) {
/* Keep the GPT clock enabled for CSME */
mac_data = er32(FEXTNVM);
mac_data |= BIT(3);
ew32(FEXTNVM, mac_data);
/* Request ME unconfigure the device from S0ix */ /* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME); mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG; mac_data &= ~E1000_H2ME_START_DPG;
@@ -6992,21 +6996,8 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
u16 phy_data;
int rc; int rc;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
hw->mac.type >= e1000_pch_adp) {
/* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
e1e_rphy(hw, I217_MEMPWR, &phy_data);
phy_data |= I217_MEMPWR_MOEM;
e1e_wphy(hw, I217_MEMPWR, phy_data);
/* Disable LCD reset */
hw->phy.reset_disable = true;
}
e1000e_flush_lpic(pdev); e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev); e1000e_pm_freeze(dev);
@@ -7028,8 +7019,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
u16 phy_data;
int rc; int rc;
/* Introduce S0ix implementation */ /* Introduce S0ix implementation */
@@ -7040,17 +7029,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
if (rc) if (rc)
return rc; return rc;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
hw->mac.type >= e1000_pch_adp) {
/* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
e1e_rphy(hw, I217_MEMPWR, &phy_data);
phy_data &= ~I217_MEMPWR_MOEM;
e1e_wphy(hw, I217_MEMPWR, phy_data);
/* Enable LCD reset */
hw->phy.reset_disable = false;
}
return e1000e_pm_thaw(dev); return e1000e_pm_thaw(dev);
} }

View File

@@ -10631,7 +10631,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/ **/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{ {
int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
i40e_status ret; i40e_status ret;
@@ -10639,13 +10639,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
int v; int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
i40e_check_recovery_mode(pf)) { is_recovery_mode_reported)
i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
}
if (test_bit(__I40E_DOWN, pf->state) && if (test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RECOVERY_MODE, pf->state) && !test_bit(__I40E_RECOVERY_MODE, pf->state))
!old_recovery_mode_bit)
goto clear_recovery; goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -10672,13 +10670,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* accordingly with regard to resources initialization * accordingly with regard to resources initialization
* and deinitialization * and deinitialization
*/ */
if (test_bit(__I40E_RECOVERY_MODE, pf->state) || if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
old_recovery_mode_bit) {
if (i40e_get_capabilities(pf, if (i40e_get_capabilities(pf,
i40e_aqc_opc_list_func_capabilities)) i40e_aqc_opc_list_func_capabilities))
goto end_unlock; goto end_unlock;
if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { if (is_recovery_mode_reported) {
/* we're staying in recovery mode so we'll reinitialize /* we're staying in recovery mode so we'll reinitialize
* misc vector here * misc vector here
*/ */

View File

@@ -1250,11 +1250,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{ {
struct iavf_rx_buffer *rx_buffer; struct iavf_rx_buffer *rx_buffer;
if (!size)
return NULL;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page); prefetchw(rx_buffer->page);
if (!size)
return rx_buffer;
/* we are reusing so sync this buffer for CPU use */ /* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev, dma_sync_single_range_for_cpu(rx_ring->dev,

View File

@@ -6159,6 +6159,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0; u32 value = 0;
if (IGC_REMOVED(hw_addr))
return ~value;
value = readl(&hw_addr[reg]); value = readl(&hw_addr[reg]);
/* reads should not return all F's */ /* reads should not return all F's */

View File

@@ -306,6 +306,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \ #define wr32(reg, val) \
do { \ do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!IGC_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \ writel((val), &hw_addr[(reg)]); \
} while (0) } while (0)
@@ -318,4 +319,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) #define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
#define IGC_REMOVED(h) unlikely(!(h))
#endif #endif

View File

@@ -770,6 +770,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC #ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec; struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */ #endif /* CONFIG_IXGBE_IPSEC */
spinlock_t vfs_lock;
}; };
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)

View File

@@ -6397,6 +6397,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* n-tuple support exists, always init our spinlock */ /* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock); spin_lock_init(&adapter->fdir_perfect_lock);
/* init spinlock to avoid concurrency of VF resources */
spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter); ixgbe_init_dcb(adapter);
#endif #endif

View File

@@ -204,10 +204,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{ {
unsigned int num_vfs = adapter->num_vfs, vf; unsigned int num_vfs = adapter->num_vfs, vf;
unsigned long flags;
int rss; int rss;
spin_lock_irqsave(&adapter->vfs_lock, flags);
/* set num VFs to 0 to prevent access to vfinfo */ /* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0; adapter->num_vfs = 0;
spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */ /* put the reference to all of the vf devices */
for (vf = 0; vf < num_vfs; ++vf) { for (vf = 0; vf < num_vfs; ++vf) {
@@ -1305,8 +1308,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
void ixgbe_msg_task(struct ixgbe_adapter *adapter) void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
u32 vf; u32 vf;
spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) { for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */ /* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf)) if (!ixgbe_check_for_rst(hw, vf))
@@ -1320,6 +1325,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
if (!ixgbe_check_for_ack(hw, vf)) if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf); ixgbe_rcv_ack_from_vf(adapter, vf);
} }
spin_unlock_irqrestore(&adapter->vfs_lock, flags);
} }
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)

View File

@@ -5196,7 +5196,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
{ {
const struct fib_nh *nh = fib_info_nh(fi, 0); const struct fib_nh *nh = fib_info_nh(fi, 0);
return nh->fib_nh_scope == RT_SCOPE_LINK || return nh->fib_nh_gw_family ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
} }
@@ -9588,7 +9588,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
unsigned long *fields = config->fields; unsigned long *fields = config->fields;
u32 hash_fields; u32 hash_fields;
switch (net->ipv4.sysctl_fib_multipath_hash_policy) { switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
case 0: case 0:
mlxsw_sp_mp4_hash_outer_addr(config); mlxsw_sp_mp4_hash_outer_addr(config);
break; break;
@@ -9606,7 +9606,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_mp_hash_inner_l3(config); mlxsw_sp_mp_hash_inner_l3(config);
break; break;
case 3: case 3:
hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
/* Outer */ /* Outer */
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP); MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP); MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
@@ -9787,13 +9787,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{ {
struct net *net = mlxsw_sp_net(mlxsw_sp); struct net *net = mlxsw_sp_net(mlxsw_sp);
bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN]; char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs; u64 max_rifs;
bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO; return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);

View File

@@ -472,7 +472,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip4_dst_hoplimit(&rt->dst); set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
ip_rt_put(rt); ip_rt_put(rt);
} else { } else {
set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
} }
} }

View File

@@ -219,6 +219,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
if (queue == 0 || queue == 4) { if (queue == 0 || queue == 4) {
value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
value |= MTL_RXQ_DMA_Q04MDMACH(chan); value |= MTL_RXQ_DMA_Q04MDMACH(chan);
} else if (queue > 4) {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
} else { } else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);

View File

@@ -800,14 +800,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
netdev_warn(priv->dev, netdev_warn(priv->dev,
"Setting EEE tx-lpi is not supported\n"); "Setting EEE tx-lpi is not supported\n");
if (priv->hw->xpcs) {
ret = xpcs_config_eee(priv->hw->xpcs,
priv->plat->mult_fact_100ns,
edata->eee_enabled);
if (ret)
return ret;
}
if (!edata->eee_enabled) if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv); stmmac_disable_eee_mode(priv);

View File

@@ -844,19 +844,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
struct timespec64 now; struct timespec64 now;
u32 sec_inc = 0; u32 sec_inc = 0;
u64 temp = 0; u64 temp = 0;
int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
if (ret < 0) {
netdev_warn(priv->dev,
"failed to enable PTP reference clock: %pe\n",
ERR_PTR(ret));
return ret;
}
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags; priv->systime_flags = systime_flags;
@@ -3325,6 +3316,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv); stmmac_mmc_setup(priv);
if (ptp_register) {
ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
if (ret < 0)
netdev_warn(priv->dev,
"failed to enable PTP reference clock: %pe\n",
ERR_PTR(ret));
}
ret = stmmac_init_ptp(priv); ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP) if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n"); netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -7279,8 +7278,6 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__); netdev_info(priv->dev, "%s: removing driver", __func__);
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
stmmac_stop_all_dma(priv); stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false); stmmac_mac_set(priv, priv->ioaddr, false);
@@ -7307,6 +7304,9 @@ int stmmac_dvr_remove(struct device *dev)
mutex_destroy(&priv->lock); mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps); bitmap_free(priv->af_xdp_zc_qps);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(stmmac_dvr_remove); EXPORT_SYMBOL_GPL(stmmac_dvr_remove);

View File

@@ -815,7 +815,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (ret) if (ret)
return ret; return ret;
stmmac_init_tstamp_counter(priv, priv->systime_flags); ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
if (ret < 0) {
netdev_warn(priv->dev,
"failed to enable PTP reference clock: %pe\n",
ERR_PTR(ret));
return ret;
}
} }
return 0; return 0;

View File

@@ -1097,7 +1097,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop; goto drop;
len = run_ebpf_filter(tun, skb, len); len = run_ebpf_filter(tun, skb, len);
if (len == 0 || pskb_trim(skb, len)) if (len == 0)
goto drop;
if (pskb_trim(skb, len))
goto drop; goto drop;
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))

View File

@@ -1796,7 +1796,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1809,7 +1809,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1822,7 +1822,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1835,7 +1835,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1848,7 +1848,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1861,7 +1861,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1874,7 +1874,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1887,7 +1887,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1900,7 +1900,7 @@ static const struct driver_info toshiba_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };
@@ -1913,7 +1913,7 @@ static const struct driver_info mct_info = {
.link_reset = ax88179_link_reset, .link_reset = ax88179_link_reset,
.reset = ax88179_reset, .reset = ax88179_reset,
.stop = ax88179_stop, .stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup, .rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup, .tx_fixup = ax88179_tx_fixup,
}; };

View File

@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "12" #define NETNEXT_VERSION "12"
/* Information for net */ /* Information for net */
#define NET_VERSION "12" #define NET_VERSION "13"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -5915,7 +5915,8 @@ static void r8153_enter_oob(struct r8152 *tp)
wait_oob_link_list_ready(tp); wait_oob_link_list_ready(tp);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
switch (tp->version) { switch (tp->version) {
case RTL_VER_03: case RTL_VER_03:
@@ -5951,6 +5952,10 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
rxdy_gated_en(tp, false); rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -6553,6 +6558,9 @@ static void rtl8156_down(struct r8152 *tp)
rtl_disable(tp); rtl_disable(tp);
rtl_reset_bmu(tp); rtl_reset_bmu(tp);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
/* Clear teredo wake event. bit[15:8] is the teredo wakeup /* Clear teredo wake event. bit[15:8] is the teredo wakeup
* type. Set it to zero. bits[7:0] are the W1C bits about * type. Set it to zero. bits[7:0] are the W1C bits about
* the events. Set them to all 1 to clear them. * the events. Set them to all 1 to clear them.
@@ -6563,6 +6571,10 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data |= NOW_IS_OOB; ocp_data |= NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
rtl_rx_vlan_en(tp, true); rtl_rx_vlan_en(tp, true);
rxdy_gated_en(tp, false); rxdy_gated_en(tp, false);

View File

@@ -2,7 +2,8 @@
/* /*
* Copyright(c) 2021 Intel Corporation * Copyright(c) 2021 Intel Corporation
*/ */
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm" #define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
@@ -40,3 +41,5 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
#endif /* CONFIG_EFI */ #endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */

View File

@@ -1100,7 +1100,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
continue; continue;
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
mtxq->wcid = wcid; mtxq->wcid = wcid->idx;
} }
ewma_signal_init(&wcid->rssi); ewma_signal_init(&wcid->rssi);

View File

@@ -263,7 +263,7 @@ struct mt76_wcid {
}; };
struct mt76_txq { struct mt76_txq {
struct mt76_wcid *wcid; u16 wcid;
u16 agg_ssn; u16 agg_ssn;
bool send_bar; bool send_bar;

View File

@@ -74,7 +74,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid; mtxq->wcid = idx;
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out: out:

View File

@@ -235,7 +235,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) { if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid; mtxq->wcid = idx;
} }
ret = mt7615_mcu_add_dev_info(phy, vif, true); ret = mt7615_mcu_add_dev_info(phy, vif, true);

View File

@@ -288,7 +288,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1; mvif->group_wcid.hw_key_idx = -1;
mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->group_wcid; rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid);
mtxq->wcid = MT_VIF_WCID(idx);
} }
int int
@@ -341,6 +342,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
dev->mt76.vif_mask &= ~BIT(mvif->idx); dev->mt76.vif_mask &= ~BIT(mvif->idx);
rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL);
} }
EXPORT_SYMBOL_GPL(mt76x02_remove_interface); EXPORT_SYMBOL_GPL(mt76x02_remove_interface);

View File

@@ -243,7 +243,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) { if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid; mtxq->wcid = idx;
} }
if (vif->type != NL80211_IFTYPE_AP && if (vif->type != NL80211_IFTYPE_AP &&

View File

@@ -283,7 +283,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) { if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid; mtxq->wcid = idx;
} }
out: out:

View File

@@ -1306,7 +1306,7 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
} }
int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
{ {
int i, err = 0; int i, err = 0;
@@ -1325,6 +1325,26 @@ int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
return err; return err;
} }
int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
int err;
err = __mt7921e_mcu_drv_pmctrl(dev);
if (err < 0)
goto out;
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
out:
return err;
}
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{ {
struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_phy *mphy = &dev->mt76.phy;
@@ -1337,16 +1357,6 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
goto out; goto out;
err = __mt7921_mcu_drv_pmctrl(dev); err = __mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
goto out;
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
out: out:
mutex_unlock(&pm->mutex); mutex_unlock(&pm->mutex);

View File

@@ -374,6 +374,7 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
bool enable); bool enable);
void mt7921_scan_work(struct work_struct *work); void mt7921_scan_work(struct work_struct *work);
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif); int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev); int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);

View File

@@ -95,6 +95,14 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
u32 mapped; u32 mapped;
u32 size; u32 size;
} fixed_map[] = { } fixed_map[] = {
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
@@ -109,22 +117,15 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */ { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
{ 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
@@ -191,7 +192,6 @@ static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val); return dev->bus_ops->rmw(mdev, addr, mask, val);
} }
static int mt7921_pci_probe(struct pci_dev *pdev, static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id) const struct pci_device_id *id)
{ {
@@ -264,7 +264,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
bus_ops->rmw = mt7921_rmw; bus_ops->rmw = mt7921_rmw;
dev->mt76.bus = bus_ops; dev->mt76.bus = bus_ops;
ret = __mt7921_mcu_drv_pmctrl(dev); ret = __mt7921e_mcu_drv_pmctrl(dev);
if (ret) if (ret)
goto err_free_dev; goto err_free_dev;

View File

@@ -14,7 +14,7 @@
#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2) #define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
#define MT_MCU_INT_EVENT_RESET_DONE BIT(3) #define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
#define MT_PLE_BASE 0x8000 #define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) #define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0) #define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0)
@@ -25,7 +25,7 @@
#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n)) #define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) #define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
#define MT_MDP_BASE 0xf000 #define MT_MDP_BASE 0x820cd000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) #define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
#define MT_MDP_DCR0 MT_MDP(0x000) #define MT_MDP_DCR0 MT_MDP(0x000)
@@ -48,7 +48,7 @@
#define MT_MDP_TO_WM 1 #define MT_MDP_TO_WM 1
/* TMAC: band 0(0x21000), band 1(0xa1000) */ /* TMAC: band 0(0x21000), band 1(0xa1000) */
#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000) #define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) #define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0) #define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
@@ -73,7 +73,7 @@
#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c) #define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0) #define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00) #define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs)) #define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000) #define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
@@ -81,7 +81,7 @@
#define MT_DMA_DCR0_RXD_G5_EN BIT(23) #define MT_DMA_DCR0_RXD_G5_EN BIT(23)
/* LPON: band 0(0x24200), band 1(0xa4200) */ /* LPON: band 0(0x24200), band 1(0xa4200) */
#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200) #define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs)) #define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080) #define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
@@ -92,7 +92,7 @@
#define MT_LPON_TCR_SW_WRITE BIT(0) #define MT_LPON_TCR_SW_WRITE BIT(0)
/* MIB: band 0(0x24800), band 1(0xa4800) */ /* MIB: band 0(0x24800), band 1(0xa4800) */
#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800) #define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) #define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
#define MT_MIB_SCR1(_band) MT_WF_MIB(_band, 0x004) #define MT_MIB_SCR1(_band) MT_WF_MIB(_band, 0x004)
@@ -141,7 +141,7 @@
#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2)) #define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0)) #define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
#define MT_WTBLON_TOP_BASE 0x34000 #define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs)) #define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200) #define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200)
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0) #define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
@@ -151,7 +151,7 @@
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31) #define MT_WTBL_UPDATE_BUSY BIT(31)
#define MT_WTBL_BASE 0x38000 #define MT_WTBL_BASE 0x820d8000
#define MT_WTBL_LMAC_ID GENMASK(14, 8) #define MT_WTBL_LMAC_ID GENMASK(14, 8)
#define MT_WTBL_LMAC_DW GENMASK(7, 2) #define MT_WTBL_LMAC_DW GENMASK(7, 2)
#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \ #define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
@@ -159,7 +159,7 @@
FIELD_PREP(MT_WTBL_LMAC_DW, _dw)) FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
/* AGG: band 0(0x20800), band 1(0xa0800) */ /* AGG: band 0(0x20800), band 1(0xa0800) */
#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800) #define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4) #define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
@@ -190,7 +190,7 @@
#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4) #define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
/* ARB: band 0(0x20c00), band 1(0xa0c00) */ /* ARB: band 0(0x20c00), band 1(0xa0c00) */
#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00) #define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) #define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080) #define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
@@ -200,7 +200,7 @@
#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4) #define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
/* RMAC: band 0(0x21400), band 1(0xa1400) */ /* RMAC: band 0(0x21400), band 1(0xa1400) */
#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400) #define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs)) #define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000) #define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)

View File

@@ -406,12 +406,11 @@ mt76_txq_stopped(struct mt76_queue *q)
static int static int
mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
struct mt76_txq *mtxq) struct mt76_txq *mtxq, struct mt76_wcid *wcid)
{ {
struct mt76_dev *dev = phy->dev; struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq); struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq); enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
struct ieee80211_tx_info *info; struct ieee80211_tx_info *info;
struct sk_buff *skb; struct sk_buff *skb;
int n_frames = 1; int n_frames = 1;
@@ -491,8 +490,8 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
break; break;
mtxq = (struct mt76_txq *)txq->drv_priv; mtxq = (struct mt76_txq *)txq->drv_priv;
wcid = mtxq->wcid; wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue; continue;
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
@@ -511,7 +510,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
} }
if (!mt76_txq_stopped(q)) if (!mt76_txq_stopped(q))
n_frames = mt76_txq_send_burst(phy, q, mtxq); n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
spin_unlock_bh(&q->lock); spin_unlock_bh(&q->lock);

View File

@@ -3660,7 +3660,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
} }
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid, struct nvme_ns_ids *ids) unsigned nsid, struct nvme_ns_ids *ids, bool is_shared)
{ {
struct nvme_ns_head *head; struct nvme_ns_head *head;
size_t size = sizeof(*head); size_t size = sizeof(*head);
@@ -3684,15 +3684,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->subsys = ctrl->subsys; head->subsys = ctrl->subsys;
head->ns_id = nsid; head->ns_id = nsid;
head->ids = *ids; head->ids = *ids;
head->shared = is_shared;
kref_init(&head->ref); kref_init(&head->ref);
ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs for nsid %d\n", nsid);
goto out_cleanup_srcu;
}
if (head->ids.csi) { if (head->ids.csi) {
ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
if (ret) if (ret)
@@ -3731,12 +3725,17 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
mutex_lock(&ctrl->subsys->lock); mutex_lock(&ctrl->subsys->lock);
head = nvme_find_ns_head(ctrl, nsid); head = nvme_find_ns_head(ctrl, nsid);
if (!head) { if (!head) {
head = nvme_alloc_ns_head(ctrl, nsid, ids); ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs for nsid %d\n", nsid);
goto out_unlock;
}
head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared);
if (IS_ERR(head)) { if (IS_ERR(head)) {
ret = PTR_ERR(head); ret = PTR_ERR(head);
goto out_unlock; goto out_unlock;
} }
head->shared = is_shared;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
if (!is_shared || !head->shared) { if (!is_shared || !head->shared) {

View File

@@ -1142,6 +1142,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
u8 buffer[sizeof(struct pci_delete_interrupt)]; u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt; } ctxt;
if (!int_desc->vector_count) {
kfree(int_desc);
return;
}
memset(&ctxt, 0, sizeof(ctxt)); memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
int_pkt->message_type.type = int_pkt->message_type.type =
@@ -1204,6 +1208,28 @@ static void hv_irq_mask(struct irq_data *data)
pci_msi_mask_irq(data); pci_msi_mask_irq(data);
} }
static unsigned int hv_msi_get_int_vector(struct irq_data *data)
{
struct irq_cfg *cfg = irqd_cfg(data);
return cfg->vector;
}
static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
int ret = pci_msi_prepare(domain, dev, nvec, info);
/*
* By using the interrupt remapper in the hypervisor IOMMU, contiguous
* CPU vectors is not needed for multi-MSI
*/
if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
return ret;
}
/** /**
* hv_irq_unmask() - "Unmask" the IRQ by setting its current * hv_irq_unmask() - "Unmask" the IRQ by setting its current
* affinity. * affinity.
@@ -1219,6 +1245,7 @@ static void hv_irq_unmask(struct irq_data *data)
struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data); struct irq_cfg *cfg = irqd_cfg(data);
struct hv_retarget_device_interrupt *params; struct hv_retarget_device_interrupt *params;
struct tran_int_desc *int_desc;
struct hv_pcibus_device *hbus; struct hv_pcibus_device *hbus;
struct cpumask *dest; struct cpumask *dest;
cpumask_var_t tmp; cpumask_var_t tmp;
@@ -1233,6 +1260,7 @@ static void hv_irq_unmask(struct irq_data *data)
pdev = msi_desc_to_pci_dev(msi_desc); pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus; pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
int_desc = data->chip_data;
spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
@@ -1240,7 +1268,8 @@ static void hv_irq_unmask(struct irq_data *data)
memset(params, 0, sizeof(*params)); memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF; params->partition_id = HV_PARTITION_ID_SELF;
params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc); params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) | (hbus->hdev->dev_instance.b[7] << 8) |
@@ -1341,12 +1370,12 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
static u32 hv_compose_msi_req_v1( static u32 hv_compose_msi_req_v1(
struct pci_create_interrupt *int_pkt, struct cpumask *affinity, struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
u32 slot, u8 vector) u32 slot, u8 vector, u8 vector_count)
{ {
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot; int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1; int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED; int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
/* /*
@@ -1369,14 +1398,14 @@ static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
static u32 hv_compose_msi_req_v2( static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
u32 slot, u8 vector) u32 slot, u8 vector, u8 vector_count)
{ {
int cpu; int cpu;
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot; int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1; int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED; int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
cpu = hv_compose_msi_req_get_cpu(affinity); cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] = int_pkt->int_desc.processor_array[0] =
@@ -1388,7 +1417,7 @@ static u32 hv_compose_msi_req_v2(
static u32 hv_compose_msi_req_v3( static u32 hv_compose_msi_req_v3(
struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
u32 slot, u32 vector) u32 slot, u32 vector, u8 vector_count)
{ {
int cpu; int cpu;
@@ -1396,7 +1425,7 @@ static u32 hv_compose_msi_req_v3(
int_pkt->wslot.slot = slot; int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector = vector;
int_pkt->int_desc.reserved = 0; int_pkt->int_desc.reserved = 0;
int_pkt->int_desc.vector_count = 1; int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED; int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
cpu = hv_compose_msi_req_get_cpu(affinity); cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] = int_pkt->int_desc.processor_array[0] =
@@ -1419,7 +1448,6 @@ static u32 hv_compose_msi_req_v3(
*/ */
static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{ {
struct irq_cfg *cfg = irqd_cfg(data);
struct hv_pcibus_device *hbus; struct hv_pcibus_device *hbus;
struct vmbus_channel *channel; struct vmbus_channel *channel;
struct hv_pci_dev *hpdev; struct hv_pci_dev *hpdev;
@@ -1428,6 +1456,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct cpumask *dest; struct cpumask *dest;
struct compose_comp_ctxt comp; struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc; struct tran_int_desc *int_desc;
struct msi_desc *msi_desc;
u8 vector, vector_count;
struct { struct {
struct pci_packet pci_pkt; struct pci_packet pci_pkt;
union { union {
@@ -1440,7 +1470,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
u32 size; u32 size;
int ret; int ret;
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); /* Reuse the previous allocation */
if (data->chip_data) {
int_desc = data->chip_data;
msg->address_hi = int_desc->address >> 32;
msg->address_lo = int_desc->address & 0xffffffff;
msg->data = int_desc->data;
return;
}
msi_desc = irq_data_get_msi_desc(data);
pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data); dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus; pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@@ -1449,17 +1489,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (!hpdev) if (!hpdev)
goto return_null_message; goto return_null_message;
/* Free any previous message that might have already been composed. */
if (data->chip_data) {
int_desc = data->chip_data;
data->chip_data = NULL;
hv_int_desc_free(hpdev, int_desc);
}
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc) if (!int_desc)
goto drop_reference; goto drop_reference;
if (!msi_desc->msi_attrib.is_msix && msi_desc->nvec_used > 1) {
/*
* If this is not the first MSI of Multi MSI, we already have
* a mapping. Can exit early.
*/
if (msi_desc->irq != data->irq) {
data->chip_data = int_desc;
int_desc->address = msi_desc->msg.address_lo |
(u64)msi_desc->msg.address_hi << 32;
int_desc->data = msi_desc->msg.data +
(data->irq - msi_desc->irq);
msg->address_hi = msi_desc->msg.address_hi;
msg->address_lo = msi_desc->msg.address_lo;
msg->data = int_desc->data;
put_pcichild(hpdev);
return;
}
/*
* The vector we select here is a dummy value. The correct
* value gets sent to the hypervisor in unmask(). This needs
* to be aligned with the count, and also not zero. Multi-msi
* is powers of 2 up to 32, so 32 will always work here.
*/
vector = 32;
vector_count = msi_desc->nvec_used;
} else {
vector = hv_msi_get_int_vector(data);
vector_count = 1;
}
memset(&ctxt, 0, sizeof(ctxt)); memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event); init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl; ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
@@ -1470,7 +1533,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest, dest,
hpdev->desc.win_slot.slot, hpdev->desc.win_slot.slot,
cfg->vector); vector,
vector_count);
break; break;
case PCI_PROTOCOL_VERSION_1_2: case PCI_PROTOCOL_VERSION_1_2:
@@ -1478,14 +1542,16 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest, dest,
hpdev->desc.win_slot.slot, hpdev->desc.win_slot.slot,
cfg->vector); vector,
vector_count);
break; break;
case PCI_PROTOCOL_VERSION_1_4: case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
dest, dest,
hpdev->desc.win_slot.slot, hpdev->desc.win_slot.slot,
cfg->vector); vector,
vector_count);
break; break;
default: default:
@@ -1601,7 +1667,7 @@ static struct irq_chip hv_msi_irq_chip = {
}; };
static struct msi_domain_ops hv_msi_ops = { static struct msi_domain_ops hv_msi_ops = {
.msi_prepare = pci_msi_prepare, .msi_prepare = hv_msi_prepare,
.msi_free = hv_msi_free, .msi_free = hv_msi_free,
}; };

View File

@@ -341,12 +341,12 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
struct armada_37xx_pin_group *grp) struct armada_37xx_pin_group *grp)
{ {
struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct device *dev = info->dev;
unsigned int reg = SELECTION; unsigned int reg = SELECTION;
unsigned int mask = grp->reg_mask; unsigned int mask = grp->reg_mask;
int func, val; int func, val;
dev_dbg(info->dev, "enable function %s group %s\n", dev_dbg(dev, "enable function %s group %s\n", name, grp->name);
name, grp->name);
func = match_string(grp->funcs, NB_FUNCS, name); func = match_string(grp->funcs, NB_FUNCS, name);
if (func < 0) if (func < 0)
@@ -722,25 +722,22 @@ static unsigned int armada_37xx_irq_startup(struct irq_data *d)
static int armada_37xx_irqchip_register(struct platform_device *pdev, static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct armada_37xx_pinctrl *info) struct armada_37xx_pinctrl *info)
{ {
struct device_node *np = info->dev->of_node;
struct gpio_chip *gc = &info->gpio_chip; struct gpio_chip *gc = &info->gpio_chip;
struct irq_chip *irqchip = &info->irq_chip; struct irq_chip *irqchip = &info->irq_chip;
struct gpio_irq_chip *girq = &gc->irq; struct gpio_irq_chip *girq = &gc->irq;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource res; struct device_node *np;
int ret = -ENODEV, i, nr_irq_parent; int ret = -ENODEV, i, nr_irq_parent;
/* Check if we have at least one gpio-controller child node */ /* Check if we have at least one gpio-controller child node */
for_each_child_of_node(info->dev->of_node, np) { for_each_child_of_node(dev->of_node, np) {
if (of_property_read_bool(np, "gpio-controller")) { if (of_property_read_bool(np, "gpio-controller")) {
ret = 0; ret = 0;
break; break;
} }
} }
if (ret) { if (ret)
dev_err(dev, "no gpio-controller child node\n"); return dev_err_probe(dev, ret, "no gpio-controller child node\n");
return ret;
}
nr_irq_parent = of_irq_count(np); nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock); spin_lock_init(&info->irq_lock);
@@ -750,12 +747,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
return 0; return 0;
} }
if (of_address_to_resource(info->dev->of_node, 1, &res)) { info->base = devm_platform_ioremap_resource(pdev, 1);
dev_err(dev, "cannot find IO resource\n");
return -ENOENT;
}
info->base = devm_ioremap_resource(info->dev, &res);
if (IS_ERR(info->base)) if (IS_ERR(info->base))
return PTR_ERR(info->base); return PTR_ERR(info->base);
@@ -774,8 +766,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
* the chained irq with all of them. * the chained irq with all of them.
*/ */
girq->num_parents = nr_irq_parent; girq->num_parents = nr_irq_parent;
girq->parents = devm_kcalloc(&pdev->dev, nr_irq_parent, girq->parents = devm_kcalloc(dev, nr_irq_parent, sizeof(*girq->parents), GFP_KERNEL);
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents) if (!girq->parents)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) { for (i = 0; i < nr_irq_parent; i++) {
@@ -794,11 +785,12 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
static int armada_37xx_gpiochip_register(struct platform_device *pdev, static int armada_37xx_gpiochip_register(struct platform_device *pdev,
struct armada_37xx_pinctrl *info) struct armada_37xx_pinctrl *info)
{ {
struct device *dev = &pdev->dev;
struct device_node *np; struct device_node *np;
struct gpio_chip *gc; struct gpio_chip *gc;
int ret = -ENODEV; int ret = -ENODEV;
for_each_child_of_node(info->dev->of_node, np) { for_each_child_of_node(dev->of_node, np) {
if (of_find_property(np, "gpio-controller", NULL)) { if (of_find_property(np, "gpio-controller", NULL)) {
ret = 0; ret = 0;
break; break;
@@ -811,19 +803,16 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
gc = &info->gpio_chip; gc = &info->gpio_chip;
gc->ngpio = info->data->nr_pins; gc->ngpio = info->data->nr_pins;
gc->parent = &pdev->dev; gc->parent = dev;
gc->base = -1; gc->base = -1;
gc->of_node = np; gc->of_node = np;
gc->label = info->data->name; gc->label = info->data->name;
ret = armada_37xx_irqchip_register(pdev, info); ret = armada_37xx_irqchip_register(pdev, info);
if (ret)
return ret;
ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret) if (ret)
return ret; return ret;
return 0; return devm_gpiochip_add_data(dev, gc, info);
} }
/** /**
@@ -874,13 +863,13 @@ static int armada_37xx_add_function(struct armada_37xx_pmx_func *funcs,
static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info) static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
{ {
int n, num = 0, funcsize = info->data->nr_pins; int n, num = 0, funcsize = info->data->nr_pins;
struct device *dev = info->dev;
for (n = 0; n < info->ngroups; n++) { for (n = 0; n < info->ngroups; n++) {
struct armada_37xx_pin_group *grp = &info->groups[n]; struct armada_37xx_pin_group *grp = &info->groups[n];
int i, j, f; int i, j, f;
grp->pins = devm_kcalloc(info->dev, grp->pins = devm_kcalloc(dev, grp->npins + grp->extra_npins,
grp->npins + grp->extra_npins,
sizeof(*grp->pins), sizeof(*grp->pins),
GFP_KERNEL); GFP_KERNEL);
if (!grp->pins) if (!grp->pins)
@@ -898,8 +887,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
ret = armada_37xx_add_function(info->funcs, &funcsize, ret = armada_37xx_add_function(info->funcs, &funcsize,
grp->funcs[f]); grp->funcs[f]);
if (ret == -EOVERFLOW) if (ret == -EOVERFLOW)
dev_err(info->dev, dev_err(dev, "More functions than pins(%d)\n",
"More functions than pins(%d)\n",
info->data->nr_pins); info->data->nr_pins);
if (ret < 0) if (ret < 0)
continue; continue;
@@ -925,6 +913,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info) static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
{ {
struct armada_37xx_pmx_func *funcs = info->funcs; struct armada_37xx_pmx_func *funcs = info->funcs;
struct device *dev = info->dev;
int n; int n;
for (n = 0; n < info->nfuncs; n++) { for (n = 0; n < info->nfuncs; n++) {
@@ -932,8 +921,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
const char **groups; const char **groups;
int g; int g;
funcs[n].groups = devm_kcalloc(info->dev, funcs[n].groups = devm_kcalloc(dev, funcs[n].ngroups,
funcs[n].ngroups,
sizeof(*(funcs[n].groups)), sizeof(*(funcs[n].groups)),
GFP_KERNEL); GFP_KERNEL);
if (!funcs[n].groups) if (!funcs[n].groups)
@@ -962,6 +950,7 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
const struct armada_37xx_pin_data *pin_data = info->data; const struct armada_37xx_pin_data *pin_data = info->data;
struct pinctrl_desc *ctrldesc = &info->pctl; struct pinctrl_desc *ctrldesc = &info->pctl;
struct pinctrl_pin_desc *pindesc, *pdesc; struct pinctrl_pin_desc *pindesc, *pdesc;
struct device *dev = &pdev->dev;
int pin, ret; int pin, ret;
info->groups = pin_data->groups; info->groups = pin_data->groups;
@@ -973,9 +962,7 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
ctrldesc->pmxops = &armada_37xx_pmx_ops; ctrldesc->pmxops = &armada_37xx_pmx_ops;
ctrldesc->confops = &armada_37xx_pinconf_ops; ctrldesc->confops = &armada_37xx_pinconf_ops;
pindesc = devm_kcalloc(&pdev->dev, pindesc = devm_kcalloc(dev, pin_data->nr_pins, sizeof(*pindesc), GFP_KERNEL);
pin_data->nr_pins, sizeof(*pindesc),
GFP_KERNEL);
if (!pindesc) if (!pindesc)
return -ENOMEM; return -ENOMEM;
@@ -994,14 +981,10 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
* we allocate functions for number of pins and hope there are * we allocate functions for number of pins and hope there are
* fewer unique functions than pins available * fewer unique functions than pins available
*/ */
info->funcs = devm_kcalloc(&pdev->dev, info->funcs = devm_kcalloc(dev, pin_data->nr_pins, sizeof(*info->funcs), GFP_KERNEL);
pin_data->nr_pins,
sizeof(struct armada_37xx_pmx_func),
GFP_KERNEL);
if (!info->funcs) if (!info->funcs)
return -ENOMEM; return -ENOMEM;
ret = armada_37xx_fill_group(info); ret = armada_37xx_fill_group(info);
if (ret) if (ret)
return ret; return ret;
@@ -1010,11 +993,9 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
if (ret) if (ret)
return ret; return ret;
info->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, info); info->pctl_dev = devm_pinctrl_register(dev, ctrldesc, info);
if (IS_ERR(info->pctl_dev)) { if (IS_ERR(info->pctl_dev))
dev_err(&pdev->dev, "could not register pinctrl driver\n"); return dev_err_probe(dev, PTR_ERR(info->pctl_dev), "could not register pinctrl driver\n");
return PTR_ERR(info->pctl_dev);
}
return 0; return 0;
} }
@@ -1135,28 +1116,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
{ }, { },
}; };
static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.use_raw_spinlock = true,
};
static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev) static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
{ {
struct armada_37xx_pinctrl *info; struct armada_37xx_pinctrl *info;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct regmap *regmap; struct regmap *regmap;
void __iomem *base;
int ret; int ret;
info = devm_kzalloc(dev, sizeof(struct armada_37xx_pinctrl), base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
GFP_KERNEL); if (IS_ERR(base)) {
dev_err(dev, "failed to ioremap base address: %pe\n", base);
return PTR_ERR(base);
}
regmap = devm_regmap_init_mmio(dev, base,
&armada_37xx_pinctrl_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to create regmap: %pe\n", regmap);
return PTR_ERR(regmap);
}
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
info->dev = dev; info->dev = dev;
regmap = syscon_node_to_regmap(np);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "cannot get regmap\n");
return PTR_ERR(regmap);
}
info->regmap = regmap; info->regmap = regmap;
info->data = of_device_get_match_data(dev); info->data = of_device_get_match_data(dev);
ret = armada_37xx_pinctrl_register(pdev, info); ret = armada_37xx_pinctrl_register(pdev, info);

Some files were not shown because too many files have changed in this diff Show More