Merge 5.15.87 into android14-5.15

Changes in 5.15.87
	usb: dwc3: qcom: Fix memory leak in dwc3_qcom_interconnect_init
	cifs: fix oops during encryption
	Revert "selftests/bpf: Add test for unstable CT lookup API"
	nvme-pci: fix doorbell buffer value endianness
	nvme-pci: fix mempool alloc size
	nvme-pci: fix page size checks
	ACPI: resource: Skip IRQ override on Asus Vivobook K3402ZA/K3502ZA
	ACPI: resource: do IRQ override on LENOVO IdeaPad
	ACPI: resource: do IRQ override on XMG Core 15
	ACPI: resource: do IRQ override on Lenovo 14ALC7
	block, bfq: fix uaf for bfqq in bfq_exit_icq_bfqq
	ata: ahci: Fix PCS quirk application for suspend
	nvme: fix the NVME_CMD_EFFECTS_CSE_MASK definition
	nvmet: don't defer passthrough commands with trivial effects to the workqueue
	fs/ntfs3: Validate BOOT record_size
	fs/ntfs3: Add overflow check for attribute size
	fs/ntfs3: Validate data run offset
	fs/ntfs3: Add null pointer check to attr_load_runs_vcn
	fs/ntfs3: Fix memory leak on ntfs_fill_super() error path
	fs/ntfs3: Add null pointer check for inode operations
	fs/ntfs3: Validate attribute name offset
	fs/ntfs3: Validate buffer length while parsing index
	fs/ntfs3: Validate resident attribute name
	fs/ntfs3: Fix slab-out-of-bounds read in run_unpack
	soundwire: dmi-quirks: add quirk variant for LAPBC710 NUC15
	fs/ntfs3: Validate index root when initialize NTFS security
	fs/ntfs3: Use __GFP_NOWARN allocation at wnd_init()
	fs/ntfs3: Use __GFP_NOWARN allocation at ntfs_fill_super()
	fs/ntfs3: Delete duplicate condition in ntfs_read_mft()
	fs/ntfs3: Fix slab-out-of-bounds in r_page
	objtool: Fix SEGFAULT
	powerpc/rtas: avoid device tree lookups in rtas_os_term()
	powerpc/rtas: avoid scheduling in rtas_os_term()
	HID: multitouch: fix Asus ExpertBook P2 P2451FA trackpoint
	HID: plantronics: Additional PIDs for double volume key presses quirk
	pstore: Properly assign mem_type property
	pstore/zone: Use GFP_ATOMIC to allocate zone buffer
	hfsplus: fix bug causing custom uid and gid being unable to be assigned with mount
	binfmt: Fix error return code in load_elf_fdpic_binary()
	ovl: Use ovl mounter's fsuid and fsgid in ovl_link()
	ALSA: line6: correct midi status byte when receiving data from podxt
	ALSA: line6: fix stack overflow in line6_midi_transmit
	pnode: terminate at peers of source
	mfd: mt6360: Add bounds checking in Regmap read/write call-backs
	md: fix a crash in mempool_free
	mm, compaction: fix fast_isolate_around() to stay within boundaries
	f2fs: should put a page when checking the summary info
	f2fs: allow to read node block after shutdown
	mmc: vub300: fix warning - do not call blocking ops when !TASK_RUNNING
	tpm: acpi: Call acpi_put_table() to fix memory leak
	tpm: tpm_crb: Add the missed acpi_put_table() to fix memory leak
	tpm: tpm_tis: Add the missed acpi_put_table() to fix memory leak
	SUNRPC: Don't leak netobj memory when gss_read_proxy_verf() fails
	kcsan: Instrument memcpy/memset/memmove with newer Clang
	ASoC: Intel/SOF: use set_stream() instead of set_tdm_slots() for HDAudio
	ASoC/SoundWire: dai: expand 'stream' concept beyond SoundWire
	rcu-tasks: Simplify trc_read_check_handler() atomic operations
	net/af_packet: add VLAN support for AF_PACKET SOCK_RAW GSO
	net/af_packet: make sure to pull mac header
	media: stv0288: use explicitly signed char
	soc: qcom: Select REMAP_MMIO for LLCC driver
	kest.pl: Fix grub2 menu handling for rebooting
	ktest.pl minconfig: Unset configs instead of just removing them
	jbd2: use the correct print format
	perf/x86/intel/uncore: Disable I/O stacks to PMU mapping on ICX-D
	perf/x86/intel/uncore: Clear attr_update properly
	arm64: dts: qcom: sdm845-db845c: correct SPI2 pins drive strength
	mmc: sdhci-sprd: Disable CLK_AUTO when the clock is less than 400K
	btrfs: fix resolving backrefs for inline extent followed by prealloc
	ARM: ux500: do not directly dereference __iomem
	arm64: dts: qcom: sdm850-lenovo-yoga-c630: correct I2C12 pins drive strength
	selftests: Use optional USERCFLAGS and USERLDFLAGS
	PM/devfreq: governor: Add a private governor_data for governor
	cpufreq: Init completion before kobject_init_and_add()
	ALSA: patch_realtek: Fix Dell Inspiron Plus 16
	ALSA: hda/realtek: Apply dual codec fixup for Dell Latitude laptops
	fs: dlm: fix sock release if listen fails
	fs: dlm: retry accept() until -EAGAIN or error returns
	mptcp: mark ops structures as ro_after_init
	mptcp: remove MPTCP 'ifdef' in TCP SYN cookies
	dm cache: Fix ABBA deadlock between shrink_slab and dm_cache_metadata_abort
	dm thin: Fix ABBA deadlock between shrink_slab and dm_pool_abort_metadata
	dm thin: Use last transaction's pmd->root when commit failed
	dm thin: resume even if in FAIL mode
	dm thin: Fix UAF in run_timer_softirq()
	dm integrity: Fix UAF in dm_integrity_dtr()
	dm clone: Fix UAF in clone_dtr()
	dm cache: Fix UAF in destroy()
	dm cache: set needs_check flag after aborting metadata
	tracing/hist: Fix out-of-bound write on 'action_data.var_ref_idx'
	perf/core: Call LSM hook after copying perf_event_attr
	of/kexec: Fix reading 32-bit "linux,initrd-{start,end}" values
	KVM: VMX: Resume guest immediately when injecting #GP on ECREATE
	KVM: nVMX: Inject #GP, not #UD, if "generic" VMXON CR0/CR4 check fails
	KVM: nVMX: Properly expose ENABLE_USR_WAIT_PAUSE control to L1
	x86/microcode/intel: Do not retry microcode reloading on the APs
	ftrace/x86: Add back ftrace_expected for ftrace bug reports
	x86/kprobes: Fix kprobes instruction boudary check with CONFIG_RETHUNK
	x86/kprobes: Fix optprobe optimization check with CONFIG_RETHUNK
	tracing: Fix race where eprobes can be called before the event
	tracing: Fix complicated dependency of CONFIG_TRACER_MAX_TRACE
	tracing/hist: Fix wrong return value in parse_action_params()
	tracing/probes: Handle system names with hyphens
	tracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line
	staging: media: tegra-video: fix chan->mipi value on error
	staging: media: tegra-video: fix device_node use after free
	ARM: 9256/1: NWFPE: avoid compiler-generated __aeabi_uldivmod
	media: dvb-core: Fix double free in dvb_register_device()
	media: dvb-core: Fix UAF due to refcount races at releasing
	cifs: fix confusing debug message
	cifs: fix missing display of three mount options
	rtc: ds1347: fix value written to century register
	block: mq-deadline: Do not break sequential write streams to zoned HDDs
	md/bitmap: Fix bitmap chunk size overflow issues
	efi: Add iMac Pro 2017 to uefi skip cert quirk
	wifi: wilc1000: sdio: fix module autoloading
	ASoC: jz4740-i2s: Handle independent FIFO flush bits
	ipu3-imgu: Fix NULL pointer dereference in imgu_subdev_set_selection()
	ipmi: fix long wait in unload when IPMI disconnect
	mtd: spi-nor: Check for zero erase size in spi_nor_find_best_erase_type()
	ima: Fix a potential NULL pointer access in ima_restore_measurement_list
	ipmi: fix use after free in _ipmi_destroy_user()
	PCI: Fix pci_device_is_present() for VFs by checking PF
	PCI/sysfs: Fix double free in error path
	riscv: stacktrace: Fixup ftrace_graph_ret_addr retp argument
	riscv: mm: notify remote harts about mmu cache updates
	crypto: n2 - add missing hash statesize
	crypto: ccp - Add support for TEE for PCI ID 0x14CA
	driver core: Fix bus_type.match() error handling in __driver_attach()
	phy: qcom-qmp-combo: fix sc8180x reset
	iommu/amd: Fix ivrs_acpihid cmdline parsing code
	remoteproc: core: Do pm_relax when in RPROC_OFFLINE state
	parisc: led: Fix potential null-ptr-deref in start_task()
	device_cgroup: Roll back to original exceptions after copy failure
	drm/connector: send hotplug uevent on connector cleanup
	drm/vmwgfx: Validate the box size for the snooped cursor
	drm/i915/dsi: fix VBT send packet port selection for dual link DSI
	drm/ingenic: Fix missing platform_driver_unregister() call in ingenic_drm_init()
	ext4: silence the warning when evicting inode with dioread_nolock
	ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop
	ext4: remove trailing newline from ext4_msg() message
	fs: ext4: initialize fsdata in pagecache_write()
	ext4: fix use-after-free in ext4_orphan_cleanup
	ext4: fix undefined behavior in bit shift for ext4_check_flag_values
	ext4: add EXT4_IGET_BAD flag to prevent unexpected bad inode
	ext4: add helper to check quota inums
	ext4: fix bug_on in __es_tree_search caused by bad quota inode
	ext4: fix reserved cluster accounting in __es_remove_extent()
	ext4: check and assert if marking an no_delete evicting inode dirty
	ext4: fix bug_on in __es_tree_search caused by bad boot loader inode
	ext4: fix leaking uninitialized memory in fast-commit journal
	ext4: fix uninititialized value in 'ext4_evict_inode'
	ext4: init quota for 'old.inode' in 'ext4_rename'
	ext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline
	ext4: fix corruption when online resizing a 1K bigalloc fs
	ext4: fix error code return to user-space in ext4_get_branch()
	ext4: avoid BUG_ON when creating xattrs
	ext4: fix kernel BUG in 'ext4_write_inline_data_end()'
	ext4: fix inode leak in ext4_xattr_inode_create() on an error path
	ext4: initialize quota before expanding inode in setproject ioctl
	ext4: avoid unaccounted block allocation when expanding inode
	ext4: allocate extended attribute value in vmalloc area
	drm/amdgpu: handle polaris10/11 overlap asics (v2)
	drm/amdgpu: make display pinning more flexible (v2)
	block: mq-deadline: Fix dd_finish_request() for zoned devices
	tracing: Fix issue of missing one synthetic field
	ext4: remove unused enum EXT4_FC_COMMIT_FAILED
	ext4: use ext4_debug() instead of jbd_debug()
	ext4: introduce EXT4_FC_TAG_BASE_LEN helper
	ext4: factor out ext4_fc_get_tl()
	ext4: fix potential out of bound read in ext4_fc_replay_scan()
	ext4: disable fast-commit of encrypted dir operations
	ext4: don't set up encryption key during jbd2 transaction
	ext4: add missing validation of fast-commit record lengths
	ext4: fix unaligned memory access in ext4_fc_reserve_space()
	ext4: fix off-by-one errors in fast-commit block filling
	ARM: renumber bits related to _TIF_WORK_MASK
	phy: qcom-qmp-combo: fix out-of-bounds clock access
	btrfs: replace strncpy() with strscpy()
	btrfs: move missing device handling in a dedicate function
	btrfs: fix extent map use-after-free when handling missing device in read_one_chunk
	x86/mce: Get rid of msr_ops
	x86/MCE/AMD: Clear DFR errors found in THR handler
	media: s5p-mfc: Fix to handle reference queue during finishing
	media: s5p-mfc: Clear workbit to handle error condition
	media: s5p-mfc: Fix in register read and write for H264
	perf probe: Use dwarf_attr_integrate as generic DWARF attr accessor
	perf probe: Fix to get the DW_AT_decl_file and DW_AT_call_file as unsinged data
	ravb: Fix "failed to switch device to config mode" message during unbind
	ext4: goto right label 'failed_mount3a'
	ext4: correct inconsistent error msg in nojournal mode
	mbcache: automatically delete entries from cache on freeing
	ext4: fix deadlock due to mbcache entry corruption
	drm/i915/migrate: don't check the scratch page
	drm/i915/migrate: fix offset calculation
	drm/i915/migrate: fix length calculation
	SUNRPC: ensure the matching upcall is in-flight upon downcall
	btrfs: fix an error handling path in btrfs_defrag_leaves()
	bpf: pull before calling skb_postpull_rcsum()
	drm/panfrost: Fix GEM handle creation ref-counting
	netfilter: nf_tables: consolidate set description
	netfilter: nf_tables: add function to create set stateful expressions
	netfilter: nf_tables: perform type checking for existing sets
	vmxnet3: correctly report csum_level for encapsulated packet
	netfilter: nf_tables: honor set timeout and garbage collection updates
	veth: Fix race with AF_XDP exposing old or uninitialized descriptors
	nfsd: shut down the NFSv4 state objects before the filecache
	net: hns3: add interrupts re-initialization while doing VF FLR
	net: hns3: refactor hns3_nic_reuse_page()
	net: hns3: extract macro to simplify ring stats update code
	net: hns3: fix miss L3E checking for rx packet
	net: hns3: fix VF promisc mode not update when mac table full
	net: sched: fix memory leak in tcindex_set_parms
	qlcnic: prevent ->dcb use-after-free on qlcnic_dcb_enable() failure
	net: dsa: mv88e6xxx: depend on PTP conditionally
	nfc: Fix potential resource leaks
	vdpa_sim: fix possible memory leak in vdpasim_net_init() and vdpasim_blk_init()
	vhost/vsock: Fix error handling in vhost_vsock_init()
	vringh: fix range used in iotlb_translate()
	vhost: fix range used in translate_desc()
	vdpa_sim: fix vringh initialization in vdpasim_queue_ready()
	net/mlx5: E-Switch, properly handle ingress tagged packets on VST
	net/mlx5: Add forgotten cleanup calls into mlx5_init_once() error path
	net/mlx5: Avoid recovery in probe flows
	net/mlx5e: IPoIB, Don't allow CQE compression to be turned on by default
	net/mlx5e: TC, Refactor mlx5e_tc_add_flow_mod_hdr() to get flow attr
	net/mlx5e: Always clear dest encap in neigh-update-del
	net/mlx5e: Fix hw mtu initializing at XDP SQ allocation
	net: amd-xgbe: add missed tasklet_kill
	net: ena: Fix toeplitz initial hash value
	net: ena: Don't register memory info on XDP exchange
	net: ena: Account for the number of processed bytes in XDP
	net: ena: Use bitmask to indicate packet redirection
	net: ena: Fix rx_copybreak value update
	net: ena: Set default value for RX interrupt moderation
	net: ena: Update NUMA TPH hint register upon NUMA node update
	net: phy: xgmiitorgmii: Fix refcount leak in xgmiitorgmii_probe
	RDMA/mlx5: Fix mlx5_ib_get_hw_stats when used for device
	RDMA/mlx5: Fix validation of max_rd_atomic caps for DC
	drm/meson: Reduce the FIFO lines held when AFBC is not used
	filelock: new helper: vfs_inode_has_locks
	ceph: switch to vfs_inode_has_locks() to fix file lock bug
	gpio: sifive: Fix refcount leak in sifive_gpio_probe
	net: sched: atm: dont intepret cls results when asked to drop
	net: sched: cbq: dont intepret cls results when asked to drop
	net: sparx5: Fix reading of the MAC address
	netfilter: ipset: fix hash:net,port,net hang with /0 subnet
	netfilter: ipset: Rework long task execution when adding/deleting entries
	perf tools: Fix resources leak in perf_data__open_dir()
	drm/imx: ipuv3-plane: Fix overlay plane width
	fs/ntfs3: don't hold ni_lock when calling truncate_setsize()
	drivers/net/bonding/bond_3ad: return when there's no aggregator
	octeontx2-pf: Fix lmtst ID used in aura free
	usb: rndis_host: Secure rndis_query check against int overflow
	perf stat: Fix handling of --for-each-cgroup with --bpf-counters to match non BPF mode
	drm/i915: unpin on error in intel_vgpu_shadow_mm_pin()
	caif: fix memory leak in cfctrl_linkup_request()
	udf: Fix extension of the last extent in the file
	ASoC: Intel: bytcr_rt5640: Add quirk for the Advantech MICA-071 tablet
	nvme: fix multipath crash caused by flush request when blktrace is enabled
	io_uring: check for valid register opcode earlier
	nvmet: use NVME_CMD_EFFECTS_CSUPP instead of open coding it
	nvme: also return I/O command effects from nvme_command_effects
	btrfs: check superblock to ensure the fs was not modified at thaw time
	x86/kexec: Fix double-free of elf header buffer
	x86/bugs: Flush IBP in ib_prctl_set()
	nfsd: fix handling of readdir in v4root vs. mount upcall timeout
	fbdev: matroxfb: G200eW: Increase max memory from 1 MB to 16 MB
	block: don't allow splitting of a REQ_NOWAIT bio
	io_uring: fix CQ waiting timeout handling
	thermal: int340x: Add missing attribute for data rate base
	riscv: uaccess: fix type of 0 variable on error in get_user()
	riscv, kprobes: Stricter c.jr/c.jalr decoding
	drm/i915/gvt: fix gvt debugfs destroy
	drm/i915/gvt: fix vgpu debugfs clean in remove
	hfs/hfsplus: use WARN_ON for sanity check
	hfs/hfsplus: avoid WARN_ON() for sanity check, use proper error handling
	ksmbd: fix infinite loop in ksmbd_conn_handler_loop()
	ksmbd: check nt_len to be at least CIFS_ENCPWD_SIZE in ksmbd_decode_ntlmssp_auth_blob
	Revert "ACPI: PM: Add support for upcoming AMD uPEP HID AMDI007"
	mptcp: dedicated request sock for subflow in v6
	mptcp: use proper req destructor for IPv6
	ext4: don't allow journal inode to have encrypt flag
	selftests: set the BUILD variable to absolute path
	btrfs: make thaw time super block check to also verify checksum
	net: hns3: fix return value check bug of rx copybreak
	mbcache: Avoid nesting of cache->c_list_lock under bit locks
	efi: random: combine bootloader provided RNG seed with RNG protocol output
	io_uring: Fix unsigned 'res' comparison with zero in io_fixup_rw_res()
	drm/mgag200: Fix PLL setup for G200_SE_A rev >=4
	Linux 5.15.87

Change-Id: I06fb376627506652ed60c04d56074956e6e075a0
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-01-18 13:13:15 +00:00
302 changed files with 3238 additions and 1873 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 86 SUBLEVEL = 87
EXTRAVERSION = EXTRAVERSION =
NAME = Trick or Treat NAME = Trick or Treat

View File

@@ -129,15 +129,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 3 /* breakpointed or singlestepping */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ #define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */
#define TIF_USING_IWMMXT 17 #define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 #define TIF_RESTORE_SIGMASK 19
#define TIF_SYSCALL_TRACE 20 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 23 /* seccomp syscall filtering active */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)

View File

@@ -1047,7 +1047,10 @@
/* PINCTRL - additions to nodes defined in sdm845.dtsi */ /* PINCTRL - additions to nodes defined in sdm845.dtsi */
&qup_spi2_default { &qup_spi2_default {
drive-strength = <16>; pinconf {
pins = "gpio27", "gpio28", "gpio29", "gpio30";
drive-strength = <16>;
};
}; };
&qup_uart3_default{ &qup_uart3_default{

View File

@@ -475,8 +475,10 @@
}; };
&qup_i2c12_default { &qup_i2c12_default {
drive-strength = <2>; pinmux {
bias-disable; drive-strength = <2>;
bias-disable;
};
}; };
&qup_uart6_default { &qup_uart6_default {

View File

@@ -788,6 +788,7 @@ void __noreturn rtas_halt(void)
/* Must be in the RMO region, so we place it here */ /* Must be in the RMO region, so we place it here */
static char rtas_os_term_buf[2048]; static char rtas_os_term_buf[2048];
static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE;
void rtas_os_term(char *str) void rtas_os_term(char *str)
{ {
@@ -799,16 +800,20 @@ void rtas_os_term(char *str)
* this property may terminate the partition which we want to avoid * this property may terminate the partition which we want to avoid
* since it interferes with panic_timeout. * since it interferes with panic_timeout.
*/ */
if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE)
RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
return; return;
snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
/*
* Keep calling as long as RTAS returns a "try again" status,
* but don't use rtas_busy_delay(), which potentially
* schedules.
*/
do { do {
status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, status = rtas_call(ibm_os_term_token, 1, 1, NULL,
__pa(rtas_os_term_buf)); __pa(rtas_os_term_buf));
} while (rtas_busy_delay(status)); } while (rtas_busy_delay_time(status));
if (status != 0) if (status != 0)
printk(KERN_EMERG "ibm,os-term call failed %d\n", status); printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
@@ -1167,6 +1172,13 @@ void __init rtas_initialize(void)
no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
rtas.entry = no_entry ? rtas.base : entry; rtas.entry = no_entry ? rtas.base : entry;
/*
* Discover these now to avoid device tree lookups in the
* panic path.
*/
if (of_property_read_bool(rtas.dev, "ibm,extended-os-term"))
ibm_os_term_token = rtas_token("ibm,os-term");
/* If RTAS was found, allocate the RMO buffer for it and look for /* If RTAS was found, allocate the RMO buffer for it and look for
* the stop-self token if any * the stop-self token if any
*/ */

View File

@@ -19,6 +19,8 @@ typedef struct {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */ /* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask; cpumask_t icache_stale_mask;
/* A local tlb flush is needed before user execution can resume. */
cpumask_t tlb_stale_mask;
#endif #endif
} mm_context_t; } mm_context_t;

View File

@@ -386,7 +386,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* Relying on flush_tlb_fix_spurious_fault would suffice, but * Relying on flush_tlb_fix_spurious_fault would suffice, but
* the extra traps reduce performance. So, eagerly SFENCE.VMA. * the extra traps reduce performance. So, eagerly SFENCE.VMA.
*/ */
local_flush_tlb_page(address); flush_tlb_page(vma, address);
} }
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,

View File

@@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr)
{ {
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
} }
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0) #define local_flush_tlb_page(addr) do { } while (0)

View File

@@ -216,7 +216,7 @@ do { \
might_fault(); \ might_fault(); \
access_ok(__p, sizeof(*__p)) ? \ access_ok(__p, sizeof(*__p)) ? \
__get_user((x), __p) : \ __get_user((x), __p) : \
((x) = 0, -EFAULT); \ ((x) = (__force __typeof__(x))0, -EFAULT); \
}) })
#define __put_user_asm(insn, x, ptr, err) \ #define __put_user_asm(insn, x, ptr, err) \

View File

@@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f);
} while (0) } while (0)
__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001); __RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001);
__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002); __RISCV_INSN_FUNCS(c_jr, 0xf07f, 0x8002);
__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001); __RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001);
__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002); __RISCV_INSN_FUNCS(c_jalr, 0xf07f, 0x9002);
__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001); __RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001);
__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001); __RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001);
__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002); __RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002);

View File

@@ -60,7 +60,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
} else { } else {
fp = frame->fp; fp = frame->fp;
pc = ftrace_graph_ret_addr(current, NULL, frame->ra, pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
(unsigned long *)(fp - 8)); &frame->ra);
} }
} }

View File

@@ -196,6 +196,16 @@ switch_mm_fast:
if (need_flush_tlb) if (need_flush_tlb)
local_flush_tlb_all(); local_flush_tlb_all();
#ifdef CONFIG_SMP
else {
cpumask_t *mask = &mm->context.tlb_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
local_flush_tlb_all_asid(cntx & asid_mask);
}
}
#endif
} }
static void set_mm_noasid(struct mm_struct *mm) static void set_mm_noasid(struct mm_struct *mm)

View File

@@ -5,23 +5,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
@@ -31,6 +15,7 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride) unsigned long size, unsigned long stride)
{ {
struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm); struct cpumask *cmask = mm_cpumask(mm);
struct cpumask hmask; struct cpumask hmask;
unsigned int cpuid; unsigned int cpuid;
@@ -45,6 +30,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) { if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id); unsigned long asid = atomic_long_read(&mm->context.id);
/*
* TLB will be immediately flushed on harts concurrently
* executing this MM context. TLB flush on other harts
* is deferred until this MM context migrates there.
*/
cpumask_setall(pmask);
cpumask_clear_cpu(cpuid, pmask);
cpumask_andnot(pmask, pmask, cmask);
if (broadcast) { if (broadcast) {
riscv_cpuid_to_hartid_mask(cmask, &hmask); riscv_cpuid_to_hartid_mask(cmask, &hmask);
sbi_remote_sfence_vma_asid(cpumask_bits(&hmask), sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),

View File

@@ -2,6 +2,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/intel-family.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>

View File

@@ -3804,6 +3804,21 @@ static const struct attribute_group *skx_iio_attr_update[] = {
NULL, NULL,
}; };
static void pmu_clear_mapping_attr(const struct attribute_group **groups,
struct attribute_group *ag)
{
int i;
for (i = 0; groups[i]; i++) {
if (groups[i] == ag) {
for (i++; groups[i]; i++)
groups[i - 1] = groups[i];
groups[i - 1] = NULL;
break;
}
}
}
static int static int
pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{ {
@@ -3852,7 +3867,7 @@ clear_attrs:
clear_topology: clear_topology:
kfree(type->topology); kfree(type->topology);
clear_attr_update: clear_attr_update:
type->attr_update = NULL; pmu_clear_mapping_attr(type->attr_update, ag);
return ret; return ret;
} }
@@ -5144,6 +5159,11 @@ static int icx_iio_get_topology(struct intel_uncore_type *type)
static int icx_iio_set_mapping(struct intel_uncore_type *type) static int icx_iio_set_mapping(struct intel_uncore_type *type)
{ {
/* Detect ICX-D system. This case is not supported */
if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
return -EPERM;
}
return pmu_iio_set_mapping(type, &icx_iio_mapping_group); return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
} }

View File

@@ -1951,6 +1951,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
if (ctrl == PR_SPEC_FORCE_DISABLE) if (ctrl == PR_SPEC_FORCE_DISABLE)
task_set_spec_ib_force_disable(task); task_set_spec_ib_force_disable(task);
task_update_spec_tif(task); task_update_spec_tif(task);
if (task == current)
indirect_branch_prediction_barrier();
break; break;
default: default:
return -ERANGE; return -ERANGE;

View File

@@ -526,7 +526,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high,
/* Fall back to method we used for older processors: */ /* Fall back to method we used for older processors: */
switch (block) { switch (block) {
case 0: case 0:
addr = msr_ops.misc(bank); addr = mca_msr_reg(bank, MCA_MISC);
break; break;
case 1: case 1:
offset = ((low & MASK_BLKPTR_LO) >> 21); offset = ((low & MASK_BLKPTR_LO) >> 21);
@@ -965,6 +965,24 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
return status & MCI_STATUS_DEFERRED; return status & MCI_STATUS_DEFERRED;
} }
static bool _log_error_deferred(unsigned int bank, u32 misc)
{
if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS),
mca_msr_reg(bank, MCA_ADDR), misc))
return false;
/*
* Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers.
* Return true here to avoid accessing these registers.
*/
if (!mce_flags.smca)
return true;
/* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */
wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
return true;
}
/* /*
* We have three scenarios for checking for Deferred errors: * We have three scenarios for checking for Deferred errors:
* *
@@ -976,20 +994,9 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
*/ */
static void log_error_deferred(unsigned int bank) static void log_error_deferred(unsigned int bank)
{ {
bool defrd; if (_log_error_deferred(bank, 0))
defrd = _log_error_bank(bank, msr_ops.status(bank),
msr_ops.addr(bank), 0);
if (!mce_flags.smca)
return; return;
/* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
if (defrd) {
wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
return;
}
/* /*
* Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
* for a valid error. * for a valid error.
@@ -1009,7 +1016,7 @@ static void amd_deferred_error_interrupt(void)
static void log_error_thresholding(unsigned int bank, u64 misc) static void log_error_thresholding(unsigned int bank, u64 misc)
{ {
_log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); _log_error_deferred(bank, misc);
} }
static void log_and_reset_block(struct threshold_block *block) static void log_and_reset_block(struct threshold_block *block)
@@ -1397,7 +1404,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
} }
} }
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC));
if (err) if (err)
goto out_kobj; goto out_kobj;

View File

@@ -176,53 +176,27 @@ void mce_unregister_decode_chain(struct notifier_block *nb)
} }
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
static inline u32 ctl_reg(int bank) u32 mca_msr_reg(int bank, enum mca_msr reg)
{ {
return MSR_IA32_MCx_CTL(bank); if (mce_flags.smca) {
} switch (reg) {
case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank);
case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank);
case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank);
case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
}
}
static inline u32 status_reg(int bank) switch (reg) {
{ case MCA_CTL: return MSR_IA32_MCx_CTL(bank);
return MSR_IA32_MCx_STATUS(bank); case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank);
} case MCA_MISC: return MSR_IA32_MCx_MISC(bank);
case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
}
static inline u32 addr_reg(int bank) return 0;
{
return MSR_IA32_MCx_ADDR(bank);
} }
static inline u32 misc_reg(int bank)
{
return MSR_IA32_MCx_MISC(bank);
}
static inline u32 smca_ctl_reg(int bank)
{
return MSR_AMD64_SMCA_MCx_CTL(bank);
}
static inline u32 smca_status_reg(int bank)
{
return MSR_AMD64_SMCA_MCx_STATUS(bank);
}
static inline u32 smca_addr_reg(int bank)
{
return MSR_AMD64_SMCA_MCx_ADDR(bank);
}
static inline u32 smca_misc_reg(int bank)
{
return MSR_AMD64_SMCA_MCx_MISC(bank);
}
struct mca_msr_regs msr_ops = {
.ctl = ctl_reg,
.status = status_reg,
.addr = addr_reg,
.misc = misc_reg
};
static void __print_mce(struct mce *m) static void __print_mce(struct mce *m)
{ {
pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
@@ -371,11 +345,11 @@ static int msr_to_offset(u32 msr)
if (msr == mca_cfg.rip_msr) if (msr == mca_cfg.rip_msr)
return offsetof(struct mce, ip); return offsetof(struct mce, ip);
if (msr == msr_ops.status(bank)) if (msr == mca_msr_reg(bank, MCA_STATUS))
return offsetof(struct mce, status); return offsetof(struct mce, status);
if (msr == msr_ops.addr(bank)) if (msr == mca_msr_reg(bank, MCA_ADDR))
return offsetof(struct mce, addr); return offsetof(struct mce, addr);
if (msr == msr_ops.misc(bank)) if (msr == mca_msr_reg(bank, MCA_MISC))
return offsetof(struct mce, misc); return offsetof(struct mce, misc);
if (msr == MSR_IA32_MCG_STATUS) if (msr == MSR_IA32_MCG_STATUS)
return offsetof(struct mce, mcgstatus); return offsetof(struct mce, mcgstatus);
@@ -676,10 +650,10 @@ static struct notifier_block mce_default_nb = {
static noinstr void mce_read_aux(struct mce *m, int i) static noinstr void mce_read_aux(struct mce *m, int i)
{ {
if (m->status & MCI_STATUS_MISCV) if (m->status & MCI_STATUS_MISCV)
m->misc = mce_rdmsrl(msr_ops.misc(i)); m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC));
if (m->status & MCI_STATUS_ADDRV) { if (m->status & MCI_STATUS_ADDRV) {
m->addr = mce_rdmsrl(msr_ops.addr(i)); m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR));
/* /*
* Mask the reported address by the reported granularity. * Mask the reported address by the reported granularity.
@@ -749,7 +723,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
m.bank = i; m.bank = i;
barrier(); barrier();
m.status = mce_rdmsrl(msr_ops.status(i)); m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
/* If this entry is not valid, ignore it */ /* If this entry is not valid, ignore it */
if (!(m.status & MCI_STATUS_VAL)) if (!(m.status & MCI_STATUS_VAL))
@@ -817,7 +791,7 @@ clear_it:
/* /*
* Clear state for this bank. * Clear state for this bank.
*/ */
mce_wrmsrl(msr_ops.status(i), 0); mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
} }
/* /*
@@ -842,7 +816,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
int i; int i;
for (i = 0; i < this_cpu_read(mce_num_banks); i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
m->status = mce_rdmsrl(msr_ops.status(i)); m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
if (!(m->status & MCI_STATUS_VAL)) if (!(m->status & MCI_STATUS_VAL))
continue; continue;
@@ -1143,7 +1117,7 @@ static void mce_clear_state(unsigned long *toclear)
for (i = 0; i < this_cpu_read(mce_num_banks); i++) { for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
if (test_bit(i, toclear)) if (test_bit(i, toclear))
mce_wrmsrl(msr_ops.status(i), 0); mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
} }
} }
@@ -1202,7 +1176,7 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
m->addr = 0; m->addr = 0;
m->bank = i; m->bank = i;
m->status = mce_rdmsrl(msr_ops.status(i)); m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
if (!(m->status & MCI_STATUS_VAL)) if (!(m->status & MCI_STATUS_VAL))
continue; continue;
@@ -1699,8 +1673,8 @@ static void __mcheck_cpu_init_clear_banks(void)
if (!b->init) if (!b->init)
continue; continue;
wrmsrl(msr_ops.ctl(i), b->ctl); wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
wrmsrl(msr_ops.status(i), 0); wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
} }
} }
@@ -1726,7 +1700,7 @@ static void __mcheck_cpu_check_banks(void)
if (!b->init) if (!b->init)
continue; continue;
rdmsrl(msr_ops.ctl(i), msrval); rdmsrl(mca_msr_reg(i, MCA_CTL), msrval);
b->init = !!msrval; b->init = !!msrval;
} }
} }
@@ -1883,13 +1857,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
mce_flags.amd_threshold = 1; mce_flags.amd_threshold = 1;
if (mce_flags.smca) {
msr_ops.ctl = smca_ctl_reg;
msr_ops.status = smca_status_reg;
msr_ops.addr = smca_addr_reg;
msr_ops.misc = smca_misc_reg;
}
} }
} }
@@ -2265,7 +2232,7 @@ static void mce_disable_error_reporting(void)
struct mce_bank *b = &mce_banks[i]; struct mce_bank *b = &mce_banks[i];
if (b->init) if (b->init)
wrmsrl(msr_ops.ctl(i), 0); wrmsrl(mca_msr_reg(i, MCA_CTL), 0);
} }
return; return;
} }
@@ -2617,7 +2584,7 @@ static void mce_reenable_cpu(void)
struct mce_bank *b = &mce_banks[i]; struct mce_bank *b = &mce_banks[i];
if (b->init) if (b->init)
wrmsrl(msr_ops.ctl(i), b->ctl); wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
} }
} }

View File

@@ -168,14 +168,14 @@ struct mce_vendor_flags {
extern struct mce_vendor_flags mce_flags; extern struct mce_vendor_flags mce_flags;
struct mca_msr_regs { enum mca_msr {
u32 (*ctl) (int bank); MCA_CTL,
u32 (*status) (int bank); MCA_STATUS,
u32 (*addr) (int bank); MCA_ADDR,
u32 (*misc) (int bank); MCA_MISC,
}; };
extern struct mca_msr_regs msr_ops; u32 mca_msr_reg(int bank, enum mca_msr reg);
/* Decide whether to add MCE record to MCE event pool or filter it out. */ /* Decide whether to add MCE record to MCE event pool or filter it out. */
extern bool filter_mce(struct mce *m); extern bool filter_mce(struct mce *m);

View File

@@ -659,7 +659,6 @@ void load_ucode_intel_ap(void)
else else
iup = &intel_ucode_patch; iup = &intel_ucode_patch;
reget:
if (!*iup) { if (!*iup) {
patch = __load_ucode_intel(&uci); patch = __load_ucode_intel(&uci);
if (!patch) if (!patch)
@@ -670,12 +669,7 @@ reget:
uci.mc = *iup; uci.mc = *iup;
if (apply_microcode_early(&uci, true)) { apply_microcode_early(&uci, true);
/* Mixed-silicon system? Try to refetch the proper patch: */
*iup = NULL;
goto reget;
}
} }
static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)

View File

@@ -401,10 +401,8 @@ int crash_load_segments(struct kimage *image)
kbuf.buf_align = ELF_CORE_HEADER_ALIGN; kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf); ret = kexec_add_buffer(&kbuf);
if (ret) { if (ret)
vfree((void *)image->elf_headers);
return ret; return ret;
}
image->elf_load_addr = kbuf.mem; image->elf_load_addr = kbuf.mem;
pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
image->elf_load_addr, kbuf.bufsz, kbuf.bufsz); image->elf_load_addr, kbuf.bufsz, kbuf.bufsz);

View File

@@ -219,7 +219,9 @@ void ftrace_replace_code(int enable)
ret = ftrace_verify_code(rec->ip, old); ret = ftrace_verify_code(rec->ip, old);
if (ret) { if (ret) {
ftrace_expected = old;
ftrace_bug(ret, rec); ftrace_bug(ret, rec);
ftrace_expected = NULL;
return; return;
} }
} }

View File

@@ -37,6 +37,7 @@
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/moduleloader.h> #include <linux/moduleloader.h>
@@ -289,12 +290,15 @@ static int can_probe(unsigned long paddr)
if (ret < 0) if (ret < 0)
return 0; return 0;
#ifdef CONFIG_KGDB
/* /*
* Another debugging subsystem might insert this breakpoint. * If there is a dynamically installed kgdb sw breakpoint,
* In that case, we can't recover it. * this function should not be probed.
*/ */
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
kgdb_has_hit_break(addr))
return 0; return 0;
#endif
addr += insn.length; addr += insn.length;
} }

View File

@@ -15,6 +15,7 @@
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/objtool.h> #include <linux/objtool.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
@@ -272,19 +273,6 @@ static int insn_is_indirect_jump(struct insn *insn)
return ret; return ret;
} }
static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
{
unsigned char ops;
for (; addr < eaddr; addr++) {
if (get_kernel_nofault(ops, (void *)addr) < 0 ||
ops != INT3_INSN_OPCODE)
return false;
}
return true;
}
/* Decode whole function to ensure any instructions don't jump into target */ /* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr) static int can_optimize(unsigned long paddr)
{ {
@@ -327,15 +315,15 @@ static int can_optimize(unsigned long paddr)
ret = insn_decode_kernel(&insn, (void *)recovered_insn); ret = insn_decode_kernel(&insn, (void *)recovered_insn);
if (ret < 0) if (ret < 0)
return 0; return 0;
#ifdef CONFIG_KGDB
/* /*
* In the case of detecting unknown breakpoint, this could be * If there is a dynamically installed kgdb sw breakpoint,
* a padding INT3 between functions. Let's check that all the * this function should not be probed.
* rest of the bytes are also INT3.
*/ */
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
return is_padding_int3(addr, paddr - offset + size) ? 1 : 0; kgdb_has_hit_break(addr))
return 0;
#endif
/* Recover address */ /* Recover address */
insn.kaddr = (void *)addr; insn.kaddr = (void *)addr;
insn.next_byte = (void *)(addr + insn.length); insn.next_byte = (void *)(addr + insn.length);

View File

@@ -4970,24 +4970,35 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
| FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
/* /*
* Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter
* that have higher priority than VM-Exit (see Intel SDM's pseudocode * the guest and so cannot rely on hardware to perform the check,
* for VMXON), as KVM must load valid CR0/CR4 values into hardware while * which has higher priority than VM-Exit (see Intel SDM's pseudocode
* running the guest, i.e. KVM needs to check the _guest_ values. * for VMXON).
* *
* Rely on hardware for the other two pre-VM-Exit checks, !VM86 and * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86
* !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't
* Mode, but KVM will never take the guest out of those modes. * force any of the relevant guest state. For a restricted guest, KVM
* does force CR0.PE=1, but only to also force VM86 in order to emulate
* Real Mode, and so there's no need to check CR0.PE manually.
*/ */
if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
!nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;
} }
/* /*
* CPL=0 and all other checks that are lower priority than VM-Exit must * The CPL is checked for "not in VMX operation" and for "in VMX root",
* be checked manually. * and has higher priority than the VM-Fail due to being post-VMXON,
* i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root,
* VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits
* from L2 to L1, i.e. there's no need to check for the vCPU being in
* VMX non-root.
*
* Forwarding the VM-Exit unconditionally, i.e. without performing the
* #UD checks (see above), is functionally ok because KVM doesn't allow
* L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's
* CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are
* missed by hardware due to shadowing CR0 and/or CR4.
*/ */
if (vmx_get_cpl(vcpu)) { if (vmx_get_cpl(vcpu)) {
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
@@ -4997,6 +5008,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (vmx->nested.vmxon) if (vmx->nested.vmxon)
return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
/*
* Invalid CR0/CR4 generates #GP. These checks are performed if and
* only if the vCPU isn't already in VMX operation, i.e. effectively
* have lower priority than the VM-Fail above.
*/
if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) ||
!nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) { != VMXON_NEEDED_FEATURES) {
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
@@ -6644,7 +6666,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDSEED_EXITING | SECONDARY_EXEC_RDSEED_EXITING |
SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_TSC_SCALING; SECONDARY_EXEC_TSC_SCALING |
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
/* /*
* We can emulate "VMCS shadowing," even if the hardware * We can emulate "VMCS shadowing," even if the hardware

View File

@@ -188,8 +188,10 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu,
/* Enforce CPUID restriction on max enclave size. */ /* Enforce CPUID restriction on max enclave size. */
max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 :
sgx_12_0->edx; sgx_12_0->edx;
if (size >= BIT_ULL(max_size_log2)) if (size >= BIT_ULL(max_size_log2)) {
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
return 1;
}
/* /*
* sgx_virt_ecreate() returns: * sgx_virt_ecreate() returns:

View File

@@ -5252,8 +5252,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags); spin_lock_irqsave(&bfqd->lock, flags);
bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync); bic_set_bfqq(bic, NULL, is_sync);
bfq_exit_bfqq(bfqd, bfqq);
spin_unlock_irqrestore(&bfqd->lock, flags); spin_unlock_irqrestore(&bfqd->lock, flags);
} }
} }

View File

@@ -279,6 +279,16 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
*segs = nsegs; *segs = nsegs;
return NULL; return NULL;
split: split:
/*
* We can't sanely support splitting for a REQ_NOWAIT bio. End it
* with EAGAIN if splitting is required and return an error pointer.
*/
if (bio->bi_opf & REQ_NOWAIT) {
bio->bi_status = BLK_STS_AGAIN;
bio_endio(bio);
return ERR_PTR(-EAGAIN);
}
*segs = nsegs; *segs = nsegs;
/* /*

View File

@@ -153,6 +153,20 @@ static u8 dd_rq_ioclass(struct request *rq)
return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
} }
/*
* get the request before `rq' in sector-sorted order
*/
static inline struct request *
deadline_earlier_request(struct request *rq)
{
struct rb_node *node = rb_prev(&rq->rb_node);
if (node)
return rb_entry_rq(node);
return NULL;
}
/* /*
* get the request after `rq' in sector-sorted order * get the request after `rq' in sector-sorted order
*/ */
@@ -288,6 +302,39 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
return 0; return 0;
} }
/*
* Check if rq has a sequential request preceding it.
*/
static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq)
{
struct request *prev = deadline_earlier_request(rq);
if (!prev)
return false;
return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
}
/*
* Skip all write requests that are sequential from @rq, even if we cross
* a zone boundary.
*/
static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
struct request *rq)
{
sector_t pos = blk_rq_pos(rq);
sector_t skipped_sectors = 0;
while (rq) {
if (blk_rq_pos(rq) != pos + skipped_sectors)
break;
skipped_sectors += blk_rq_sectors(rq);
rq = deadline_latter_request(rq);
}
return rq;
}
/* /*
* For the specified data direction, return the next request to * For the specified data direction, return the next request to
* dispatch using arrival ordered lists. * dispatch using arrival ordered lists.
@@ -309,11 +356,16 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
/* /*
* Look for a write request that can be dispatched, that is one with * Look for a write request that can be dispatched, that is one with
* an unlocked target zone. * an unlocked target zone. For some HDDs, breaking a sequential
* write stream can lead to lower throughput, so make sure to preserve
* sequential write streams, even if that stream crosses into the next
* zones and these zones are unlocked.
*/ */
spin_lock_irqsave(&dd->zone_lock, flags); spin_lock_irqsave(&dd->zone_lock, flags);
list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
if (blk_req_can_dispatch_to_zone(rq)) if (blk_req_can_dispatch_to_zone(rq) &&
(blk_queue_nonrot(rq->q) ||
!deadline_is_seq_writes(dd, rq)))
goto out; goto out;
} }
rq = NULL; rq = NULL;
@@ -344,13 +396,19 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
/* /*
* Look for a write request that can be dispatched, that is one with * Look for a write request that can be dispatched, that is one with
* an unlocked target zone. * an unlocked target zone. For some HDDs, breaking a sequential
* write stream can lead to lower throughput, so make sure to preserve
* sequential write streams, even if that stream crosses into the next
* zones and these zones are unlocked.
*/ */
spin_lock_irqsave(&dd->zone_lock, flags); spin_lock_irqsave(&dd->zone_lock, flags);
while (rq) { while (rq) {
if (blk_req_can_dispatch_to_zone(rq)) if (blk_req_can_dispatch_to_zone(rq))
break; break;
rq = deadline_latter_request(rq); if (blk_queue_nonrot(rq->q))
rq = deadline_latter_request(rq);
else
rq = deadline_skip_seq_writes(dd, rq);
} }
spin_unlock_irqrestore(&dd->zone_lock, flags); spin_unlock_irqrestore(&dd->zone_lock, flags);
@@ -736,6 +794,18 @@ static void dd_prepare_request(struct request *rq)
rq->elv.priv[0] = NULL; rq->elv.priv[0] = NULL;
} }
static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
enum dd_prio p;
for (p = 0; p <= DD_PRIO_MAX; p++)
if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
return true;
return false;
}
/* /*
* Callback from inside blk_mq_free_request(). * Callback from inside blk_mq_free_request().
* *
@@ -758,7 +828,6 @@ static void dd_finish_request(struct request *rq)
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
const u8 ioprio_class = dd_rq_ioclass(rq); const u8 ioprio_class = dd_rq_ioclass(rq);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio];
/* /*
* The block layer core may call dd_finish_request() without having * The block layer core may call dd_finish_request() without having
@@ -775,9 +844,10 @@ static void dd_finish_request(struct request *rq)
spin_lock_irqsave(&dd->zone_lock, flags); spin_lock_irqsave(&dd->zone_lock, flags);
blk_req_zone_write_unlock(rq); blk_req_zone_write_unlock(rq);
if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
spin_unlock_irqrestore(&dd->zone_lock, flags); spin_unlock_irqrestore(&dd->zone_lock, flags);
if (dd_has_write_work(rq->mq_hctx))
blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
} }
} }

View File

@@ -399,16 +399,68 @@ static const struct dmi_system_id medion_laptop[] = {
{ } { }
}; };
static const struct dmi_system_id asus_laptop[] = {
{
.ident = "Asus Vivobook K3402ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"),
},
},
{
.ident = "Asus Vivobook K3502ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"),
},
},
{ }
};
static const struct dmi_system_id lenovo_laptop[] = {
{
.ident = "LENOVO IdeaPad Flex 5 14ALC7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
},
},
{
.ident = "LENOVO IdeaPad Flex 5 16ALC7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82RA"),
},
},
{ }
};
static const struct dmi_system_id schenker_gm_rg[] = {
{
.ident = "XMG CORE 15 (M22)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
},
},
{ }
};
struct irq_override_cmp { struct irq_override_cmp {
const struct dmi_system_id *system; const struct dmi_system_id *system;
unsigned char irq; unsigned char irq;
unsigned char triggering; unsigned char triggering;
unsigned char polarity; unsigned char polarity;
unsigned char shareable; unsigned char shareable;
bool override;
}; };
static const struct irq_override_cmp skip_override_table[] = { static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
{ schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
}; };
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
@@ -416,6 +468,17 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
if (dmi_check_system(entry->system) &&
entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
entry->shareable == shareable)
return entry->override;
}
#ifdef CONFIG_X86 #ifdef CONFIG_X86
/* /*
* IRQ override isn't needed on modern AMD Zen systems and * IRQ override isn't needed on modern AMD Zen systems and
@@ -426,17 +489,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
return false; return false;
#endif #endif
for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
const struct irq_override_cmp *entry = &skip_override_table[i];
if (dmi_check_system(entry->system) &&
entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
entry->shareable == shareable)
return false;
}
return true; return true;
} }

View File

@@ -378,16 +378,13 @@ static int lps0_device_attach(struct acpi_device *adev,
* AMDI0006: * AMDI0006:
* - should use rev_id 0x0 * - should use rev_id 0x0
* - function mask = 0x3: Should use Microsoft method * - function mask = 0x3: Should use Microsoft method
* AMDI0007:
* - Should use rev_id 0x2
* - Should only use AMD method
*/ */
const char *hid = acpi_device_hid(adev); const char *hid = acpi_device_hid(adev);
rev_id = strcmp(hid, "AMDI0007") ? 0 : 2; rev_id = 0;
lps0_dsm_func_mask = validate_dsm(adev->handle, lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_MICROSOFT, 0, ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
&lps0_dsm_guid_microsoft); &lps0_dsm_guid_microsoft);
if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") || if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
!strcmp(hid, "AMD0005") || !strcmp(hid, "AMD0005") ||
@@ -395,9 +392,6 @@ static int lps0_device_attach(struct acpi_device *adev,
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
} else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) {
lps0_dsm_func_mask_microsoft = -EINVAL;
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
} }
} else { } else {
rev_id = 1; rev_id = 1;

View File

@@ -83,6 +83,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev); static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev); static void ahci_shutdown_one(struct pci_dev *dev);
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline); unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -668,6 +669,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
ahci_save_initial_config(&pdev->dev, hpriv); ahci_save_initial_config(&pdev->dev, hpriv);
} }
static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_reset_controller(host);
if (rc)
return rc;
/*
* If platform firmware failed to enable ports, try to enable
* them here.
*/
ahci_intel_pcs_quirk(pdev, hpriv);
return 0;
}
static void ahci_pci_init_controller(struct ata_host *host) static void ahci_pci_init_controller(struct ata_host *host)
{ {
struct ahci_host_priv *hpriv = host->private_data; struct ahci_host_priv *hpriv = host->private_data;
@@ -869,7 +889,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
struct ata_host *host = pci_get_drvdata(pdev); struct ata_host *host = pci_get_drvdata(pdev);
int rc; int rc;
rc = ahci_reset_controller(host); rc = ahci_pci_reset_controller(host);
if (rc) if (rc)
return rc; return rc;
ahci_pci_init_controller(host); ahci_pci_init_controller(host);
@@ -904,7 +924,7 @@ static int ahci_pci_device_resume(struct device *dev)
ahci_mcp89_apple_enable(pdev); ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_reset_controller(host); rc = ahci_pci_reset_controller(host);
if (rc) if (rc)
return rc; return rc;
@@ -1789,12 +1809,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* save initial config */ /* save initial config */
ahci_pci_save_initial_config(pdev, hpriv); ahci_pci_save_initial_config(pdev, hpriv);
/*
* If platform firmware failed to enable ports, try to enable
* them here.
*/
ahci_intel_pcs_quirk(pdev, hpriv);
/* prepare host */ /* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) { if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ; pi.flags |= ATA_FLAG_NCQ;
@@ -1904,7 +1918,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) if (rc)
return rc; return rc;
rc = ahci_reset_controller(host); rc = ahci_pci_reset_controller(host);
if (rc) if (rc)
return rc; return rc;

View File

@@ -1127,7 +1127,11 @@ static int __driver_attach(struct device *dev, void *data)
return 0; return 0;
} else if (ret < 0) { } else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret); dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret; /*
* Driver could not match with device, but may match with
* another device on the bus.
*/
return 0;
} /* ret > 0 means positive match */ } /* ret > 0 means positive match */
if (driver_allows_async_probing(drv)) { if (driver_allows_async_probing(drv)) {

View File

@@ -1273,6 +1273,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
unsigned long flags; unsigned long flags;
struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL; struct cmd_rcvr *rcvrs = NULL;
struct module *owner;
if (!acquire_ipmi_user(user, &i)) { if (!acquire_ipmi_user(user, &i)) {
/* /*
@@ -1334,8 +1335,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
kfree(rcvr); kfree(rcvr);
} }
owner = intf->owner;
kref_put(&intf->refcount, intf_free); kref_put(&intf->refcount, intf_free);
module_put(intf->owner); module_put(owner);
} }
int ipmi_destroy_user(struct ipmi_user *user) int ipmi_destroy_user(struct ipmi_user *user)

View File

@@ -2152,6 +2152,20 @@ skip_fallback_noirq:
} }
module_init(init_ipmi_si); module_init(init_ipmi_si);
static void wait_msg_processed(struct smi_info *smi_info)
{
unsigned long jiffies_now;
long time_diff;
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
smi_event_handler(smi_info, time_diff);
schedule_timeout_uninterruptible(1);
}
}
static void shutdown_smi(void *send_info) static void shutdown_smi(void *send_info)
{ {
struct smi_info *smi_info = send_info; struct smi_info *smi_info = send_info;
@@ -2186,16 +2200,13 @@ static void shutdown_smi(void *send_info)
* in the BMC. Note that timers and CPU interrupts are off, * in the BMC. Note that timers and CPU interrupts are off,
* so no need for locks. * so no need for locks.
*/ */
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { wait_msg_processed(smi_info);
poll(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers) if (smi_info->handlers)
disable_si_irq(smi_info); disable_si_irq(smi_info);
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
poll(smi_info); wait_msg_processed(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers) if (smi_info->handlers)
smi_info->handlers->cleanup(smi_info->si_sm); smi_info->handlers->cleanup(smi_info->si_sm);

View File

@@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
return -ENODEV; return -ENODEV;
if (tbl->header.length < if (tbl->header.length <
sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) {
acpi_put_table((struct acpi_table_header *)tbl);
return -ENODEV; return -ENODEV;
}
tpm2_phy = (void *)tbl + sizeof(*tbl); tpm2_phy = (void *)tbl + sizeof(*tbl);
len = tpm2_phy->log_area_minimum_length; len = tpm2_phy->log_area_minimum_length;
start = tpm2_phy->log_area_start_address; start = tpm2_phy->log_area_start_address;
if (!start || !len) if (!start || !len) {
acpi_put_table((struct acpi_table_header *)tbl);
return -ENODEV; return -ENODEV;
}
acpi_put_table((struct acpi_table_header *)tbl);
format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
} else { } else {
/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
@@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
break; break;
} }
acpi_put_table((struct acpi_table_header *)buff);
format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
} }
if (!len) { if (!len) {
dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__); dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
return -EIO; return -EIO;
@@ -156,5 +163,4 @@ err:
kfree(log->bios_event_log); kfree(log->bios_event_log);
log->bios_event_log = NULL; log->bios_event_log = NULL;
return ret; return ret;
} }

View File

@@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device)
/* Should the FIFO driver handle this? */ /* Should the FIFO driver handle this? */
sm = buf->start_method; sm = buf->start_method;
if (sm == ACPI_TPM2_MEMORY_MAPPED) if (sm == ACPI_TPM2_MEMORY_MAPPED) {
return -ENODEV; rc = -ENODEV;
goto out;
}
priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
if (!priv) if (!priv) {
return -ENOMEM; rc = -ENOMEM;
goto out;
}
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
@@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device)
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length, buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
return -EINVAL; rc = -EINVAL;
goto out;
} }
crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
priv->smc_func_id = crb_smc->smc_func_id; priv->smc_func_id = crb_smc->smc_func_id;
@@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device)
rc = crb_map_io(device, priv, buf); rc = crb_map_io(device, priv, buf);
if (rc) if (rc)
return rc; goto out;
chip = tpmm_chip_alloc(dev, &tpm_crb); chip = tpmm_chip_alloc(dev, &tpm_crb);
if (IS_ERR(chip)) if (IS_ERR(chip)) {
return PTR_ERR(chip); rc = PTR_ERR(chip);
goto out;
}
dev_set_drvdata(&chip->dev, priv); dev_set_drvdata(&chip->dev, priv);
chip->acpi_dev_handle = device->handle; chip->acpi_dev_handle = device->handle;
chip->flags = TPM_CHIP_FLAG_TPM2; chip->flags = TPM_CHIP_FLAG_TPM2;
return tpm_chip_register(chip); rc = tpm_chip_register(chip);
out:
acpi_put_table((struct acpi_table_header *)buf);
return rc;
} }
static int crb_acpi_remove(struct acpi_device *device) static int crb_acpi_remove(struct acpi_device *device)

View File

@@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev)
const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev); const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
struct acpi_table_tpm2 *tbl; struct acpi_table_tpm2 *tbl;
acpi_status st; acpi_status st;
int ret = 0;
if (!aid || aid->driver_data != DEVICE_IS_TPM2) if (!aid || aid->driver_data != DEVICE_IS_TPM2)
return 0; return 0;
@@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev)
/* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2 /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
* table is mandatory * table is mandatory
*/ */
st = st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
return -EINVAL; return -EINVAL;
@@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev)
/* The tpm2_crb driver handles this device */ /* The tpm2_crb driver handles this device */
if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
return -ENODEV; ret = -ENODEV;
return 0; acpi_put_table((struct acpi_table_header *)tbl);
return ret;
} }
#else #else
static int check_acpi_tpm2(struct device *dev) static int check_acpi_tpm2(struct device *dev)

View File

@@ -1226,6 +1226,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask; goto err_free_rcpumask;
init_completion(&policy->kobj_unregister);
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
cpufreq_global_kobject, "policy%u", cpu); cpufreq_global_kobject, "policy%u", cpu);
if (ret) { if (ret) {
@@ -1264,7 +1265,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_rwsem(&policy->rwsem); init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock); spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait); init_waitqueue_head(&policy->transition_wait);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update); INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu; policy->cpu = cpu;

View File

@@ -320,6 +320,15 @@ static const struct psp_vdata pspv3 = {
.inten_reg = 0x10690, .inten_reg = 0x10690,
.intsts_reg = 0x10694, .intsts_reg = 0x10694,
}; };
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
.feature_reg = 0x109fc,
.inten_reg = 0x10690,
.intsts_reg = 0x10694,
};
#endif #endif
static const struct sp_dev_vdata dev_vdata[] = { static const struct sp_dev_vdata dev_vdata[] = {
@@ -365,7 +374,7 @@ static const struct sp_dev_vdata dev_vdata[] = {
{ /* 5 */ { /* 5 */
.bar = 2, .bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_PSP #ifdef CONFIG_CRYPTO_DEV_SP_PSP
.psp_vdata = &pspv2, .psp_vdata = &pspv4,
#endif #endif
}, },
}; };

View File

@@ -1229,6 +1229,7 @@ struct n2_hash_tmpl {
const u8 *hash_init; const u8 *hash_init;
u8 hw_op_hashsz; u8 hw_op_hashsz;
u8 digest_size; u8 digest_size;
u8 statesize;
u8 block_size; u8 block_size;
u8 auth_type; u8 auth_type;
u8 hmac_type; u8 hmac_type;
@@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_MD5, .hmac_type = AUTH_TYPE_HMAC_MD5,
.hw_op_hashsz = MD5_DIGEST_SIZE, .hw_op_hashsz = MD5_DIGEST_SIZE,
.digest_size = MD5_DIGEST_SIZE, .digest_size = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
.block_size = MD5_HMAC_BLOCK_SIZE }, .block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1", { .name = "sha1",
.hash_zero = sha1_zero_message_hash, .hash_zero = sha1_zero_message_hash,
@@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA1, .hmac_type = AUTH_TYPE_HMAC_SHA1,
.hw_op_hashsz = SHA1_DIGEST_SIZE, .hw_op_hashsz = SHA1_DIGEST_SIZE,
.digest_size = SHA1_DIGEST_SIZE, .digest_size = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
.block_size = SHA1_BLOCK_SIZE }, .block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256", { .name = "sha256",
.hash_zero = sha256_zero_message_hash, .hash_zero = sha256_zero_message_hash,
@@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_HMAC_SHA256, .hmac_type = AUTH_TYPE_HMAC_SHA256,
.hw_op_hashsz = SHA256_DIGEST_SIZE, .hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA256_DIGEST_SIZE, .digest_size = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.block_size = SHA256_BLOCK_SIZE }, .block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224", { .name = "sha224",
.hash_zero = sha224_zero_message_hash, .hash_zero = sha224_zero_message_hash,
@@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.hmac_type = AUTH_TYPE_RESERVED, .hmac_type = AUTH_TYPE_RESERVED,
.hw_op_hashsz = SHA256_DIGEST_SIZE, .hw_op_hashsz = SHA256_DIGEST_SIZE,
.digest_size = SHA224_DIGEST_SIZE, .digest_size = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.block_size = SHA224_BLOCK_SIZE }, .block_size = SHA224_BLOCK_SIZE },
}; };
#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
@@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
halg = &ahash->halg; halg = &ahash->halg;
halg->digestsize = tmpl->digest_size; halg->digestsize = tmpl->digest_size;
halg->statesize = tmpl->statesize;
base = &halg->base; base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);

View File

@@ -775,8 +775,7 @@ static void remove_sysfs_files(struct devfreq *devfreq,
* @dev: the device to add devfreq feature. * @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq. * @profile: device-specific profile to run devfreq.
* @governor_name: name of the policy to choose frequency. * @governor_name: name of the policy to choose frequency.
* @data: private data for the governor. The devfreq framework does not * @data: devfreq driver pass to governors, governor should not change it.
* touch this value.
*/ */
struct devfreq *devfreq_add_device(struct device *dev, struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile, struct devfreq_dev_profile *profile,
@@ -1003,8 +1002,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res)
* @dev: the device to add devfreq feature. * @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq. * @profile: device-specific profile to run devfreq.
* @governor_name: name of the policy to choose frequency. * @governor_name: name of the policy to choose frequency.
* @data: private data for the governor. The devfreq framework does not * @data: devfreq driver pass to governors, governor should not change it.
* touch this value.
* *
* This function manages automatically the memory of devfreq device using device * This function manages automatically the memory of devfreq device using device
* resource management and simplify the free operation for memory of devfreq * resource management and simplify the free operation for memory of devfreq

View File

@@ -21,7 +21,7 @@ struct userspace_data {
static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
{ {
struct userspace_data *data = df->data; struct userspace_data *data = df->governor_data;
if (data->valid) if (data->valid)
*freq = data->user_frequency; *freq = data->user_frequency;
@@ -40,7 +40,7 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
int err = 0; int err = 0;
mutex_lock(&devfreq->lock); mutex_lock(&devfreq->lock);
data = devfreq->data; data = devfreq->governor_data;
sscanf(buf, "%lu", &wanted); sscanf(buf, "%lu", &wanted);
data->user_frequency = wanted; data->user_frequency = wanted;
@@ -60,7 +60,7 @@ static ssize_t set_freq_show(struct device *dev,
int err = 0; int err = 0;
mutex_lock(&devfreq->lock); mutex_lock(&devfreq->lock);
data = devfreq->data; data = devfreq->governor_data;
if (data->valid) if (data->valid)
err = sprintf(buf, "%lu\n", data->user_frequency); err = sprintf(buf, "%lu\n", data->user_frequency);
@@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq)
goto out; goto out;
} }
data->valid = false; data->valid = false;
devfreq->data = data; devfreq->governor_data = data;
err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
out: out:
@@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq)
if (devfreq->dev.kobj.sd) if (devfreq->dev.kobj.sd)
sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
kfree(devfreq->data); kfree(devfreq->governor_data);
devfreq->data = NULL; devfreq->governor_data = NULL;
} }
static int devfreq_userspace_handler(struct devfreq *devfreq, static int devfreq_userspace_handler(struct devfreq *devfreq,

View File

@@ -590,7 +590,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
seed = early_memremap(efi_rng_seed, sizeof(*seed)); seed = early_memremap(efi_rng_seed, sizeof(*seed));
if (seed != NULL) { if (seed != NULL) {
size = min(seed->size, EFI_RANDOM_SEED_SIZE); size = min_t(u32, seed->size, SZ_1K); // sanity check
early_memunmap(seed, sizeof(*seed)); early_memunmap(seed, sizeof(*seed));
} else { } else {
pr_err("Could not map UEFI random seed!\n"); pr_err("Could not map UEFI random seed!\n");
@@ -599,8 +599,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
seed = early_memremap(efi_rng_seed, seed = early_memremap(efi_rng_seed,
sizeof(*seed) + size); sizeof(*seed) + size);
if (seed != NULL) { if (seed != NULL) {
pr_notice("seeding entropy pool\n");
add_bootloader_randomness(seed->bits, size); add_bootloader_randomness(seed->bits, size);
memzero_explicit(seed->bits, size);
early_memunmap(seed, sizeof(*seed) + size); early_memunmap(seed, sizeof(*seed) + size);
} else { } else {
pr_err("Could not map UEFI random seed!\n"); pr_err("Could not map UEFI random seed!\n");

View File

@@ -766,6 +766,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
efi_status_t efi_random_alloc(unsigned long size, unsigned long align, efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed); unsigned long *addr, unsigned long random_seed);
efi_status_t efi_random_get_seed(void);
efi_status_t check_platform_features(void); efi_status_t check_platform_features(void);
void *get_efi_config_table(efi_guid_t guid); void *get_efi_config_table(efi_guid_t guid);

View File

@@ -67,27 +67,43 @@ efi_status_t efi_random_get_seed(void)
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
struct linux_efi_random_seed *prev_seed, *seed = NULL;
int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE;
efi_rng_protocol_t *rng = NULL; efi_rng_protocol_t *rng = NULL;
struct linux_efi_random_seed *seed = NULL;
efi_status_t status; efi_status_t status;
status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
return status; return status;
/*
* Check whether a seed was provided by a prior boot stage. In that
* case, instead of overwriting it, let's create a new buffer that can
* hold both, and concatenate the existing and the new seeds.
* Note that we should read the seed size with caution, in case the
* table got corrupted in memory somehow.
*/
prev_seed = get_efi_config_table(LINUX_EFI_RANDOM_SEED_TABLE_GUID);
if (prev_seed && prev_seed->size <= 512U) {
prev_seed_size = prev_seed->size;
seed_size += prev_seed_size;
}
/* /*
* Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the
* allocation will survive a kexec reboot (although we refresh the seed * allocation will survive a kexec reboot (although we refresh the seed
* beforehand) * beforehand)
*/ */
status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*seed) + EFI_RANDOM_SEED_SIZE, struct_size(seed, bits, seed_size),
(void **)&seed); (void **)&seed);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS) {
return status; efi_warn("Failed to allocate memory for RNG seed.\n");
goto err_warn;
}
status = efi_call_proto(rng, get_rng, &rng_algo_raw, status = efi_call_proto(rng, get_rng, &rng_algo_raw,
EFI_RANDOM_SEED_SIZE, seed->bits); EFI_RANDOM_SEED_SIZE, seed->bits);
if (status == EFI_UNSUPPORTED) if (status == EFI_UNSUPPORTED)
/* /*
@@ -100,14 +116,28 @@ efi_status_t efi_random_get_seed(void)
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
goto err_freepool; goto err_freepool;
seed->size = EFI_RANDOM_SEED_SIZE; seed->size = seed_size;
if (prev_seed_size)
memcpy(seed->bits + EFI_RANDOM_SEED_SIZE, prev_seed->bits,
prev_seed_size);
status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
if (status != EFI_SUCCESS) if (status != EFI_SUCCESS)
goto err_freepool; goto err_freepool;
if (prev_seed_size) {
/* wipe and free the old seed if we managed to install the new one */
memzero_explicit(prev_seed->bits, prev_seed_size);
efi_bs_call(free_pool, prev_seed);
}
return EFI_SUCCESS; return EFI_SUCCESS;
err_freepool: err_freepool:
memzero_explicit(seed, struct_size(seed, bits, seed_size));
efi_bs_call(free_pool, seed); efi_bs_call(free_pool, seed);
efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL\n");
err_warn:
if (prev_seed)
efi_warn("Retaining bootloader-supplied seed only");
return status; return status;
} }

View File

@@ -209,6 +209,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return -ENODEV; return -ENODEV;
} }
parent = irq_find_host(irq_parent); parent = irq_find_host(irq_parent);
of_node_put(irq_parent);
if (!parent) { if (!parent) {
dev_err(dev, "no IRQ parent domain\n"); dev_err(dev, "no IRQ parent domain\n");
return -ENODEV; return -ENODEV;

View File

@@ -2008,6 +2008,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
"See modparam exp_hw_support\n"); "See modparam exp_hw_support\n");
return -ENODEV; return -ENODEV;
} }
/* differentiate between P10 and P11 asics with the same DID */
if (pdev->device == 0x67FF &&
(pdev->revision == 0xE3 ||
pdev->revision == 0xE7 ||
pdev->revision == 0xF3 ||
pdev->revision == 0xF7)) {
flags &= ~AMD_ASIC_MASK;
flags |= CHIP_POLARIS10;
}
/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
* however, SME requires an indirect IOMMU mapping because the encryption * however, SME requires an indirect IOMMU mapping because the encryption
@@ -2081,12 +2090,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ddev); pci_set_drvdata(pdev, ddev);
ret = amdgpu_driver_load_kms(adev, ent->driver_data); ret = amdgpu_driver_load_kms(adev, flags);
if (ret) if (ret)
goto err_pci; goto err_pci;
retry_init: retry_init:
ret = drm_dev_register(ddev, ent->driver_data); ret = drm_dev_register(ddev, flags);
if (ret == -EAGAIN && ++retry <= 3) { if (ret == -EAGAIN && ++retry <= 3) {
DRM_INFO("retry init %d\n", retry); DRM_INFO("retry init %d\n", retry);
/* Don't request EX mode too frequently which is attacking */ /* Don't request EX mode too frequently which is attacking */

View File

@@ -1510,7 +1510,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain) uint32_t domain)
{ {
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
domain = AMDGPU_GEM_DOMAIN_VRAM; domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
domain = AMDGPU_GEM_DOMAIN_GTT; domain = AMDGPU_GEM_DOMAIN_GTT;

View File

@@ -487,6 +487,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
mutex_destroy(&connector->mutex); mutex_destroy(&connector->mutex);
memset(connector, 0, sizeof(*connector)); memset(connector, 0, sizeof(*connector));
if (dev->registered)
drm_sysfs_hotplug_event(dev);
} }
EXPORT_SYMBOL(drm_connector_cleanup); EXPORT_SYMBOL(drm_connector_cleanup);

View File

@@ -133,9 +133,9 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
return ffs(intel_dsi->ports) - 1; return ffs(intel_dsi->ports) - 1;
if (seq_port) { if (seq_port) {
if (intel_dsi->ports & PORT_B) if (intel_dsi->ports & BIT(PORT_B))
return PORT_B; return PORT_B;
else if (intel_dsi->ports & PORT_C) else if (intel_dsi->ports & BIT(PORT_C))
return PORT_C; return PORT_C;
} }

View File

@@ -13,7 +13,6 @@
struct insert_pte_data { struct insert_pte_data {
u64 offset; u64 offset;
bool is_lmem;
}; };
#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
@@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm,
struct insert_pte_data *d = data; struct insert_pte_data *d = data;
vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
d->is_lmem ? PTE_LM : 0); i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
d->offset += PAGE_SIZE; d->offset += PAGE_SIZE;
} }
@@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
goto err_vm; goto err_vm;
/* Now allow the GPU to rewrite the PTE via its own ppGTT */ /* Now allow the GPU to rewrite the PTE via its own ppGTT */
d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]); vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d);
vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d);
} }
return &vm->vm; return &vm->vm;
@@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq,
GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
/* Compute the page directory offset for the target address range */ /* Compute the page directory offset for the target address range */
offset += (u64)rq->engine->instance << 32;
offset >>= 12; offset >>= 12;
offset *= sizeof(u64); offset *= sizeof(u64);
offset += 2 * CHUNK_SZ; offset += 2 * CHUNK_SZ;
offset += (u64)rq->engine->instance << 32;
cs = intel_ring_begin(rq, 6); cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs)) if (IS_ERR(cs))

View File

@@ -175,8 +175,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
*/ */
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{ {
debugfs_remove_recursive(vgpu->debugfs); struct intel_gvt *gvt = vgpu->gvt;
vgpu->debugfs = NULL; struct drm_minor *minor = gvt->gt->i915->drm.primary;
if (minor->debugfs_root && gvt->debugfs_root) {
debugfs_remove_recursive(vgpu->debugfs);
vgpu->debugfs = NULL;
}
} }
/** /**
@@ -199,6 +204,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/ */
void intel_gvt_debugfs_clean(struct intel_gvt *gvt) void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{ {
debugfs_remove_recursive(gvt->debugfs_root); struct drm_minor *minor = gvt->gt->i915->drm.primary;
gvt->debugfs_root = NULL;
if (minor->debugfs_root) {
debugfs_remove_recursive(gvt->debugfs_root);
gvt->debugfs_root = NULL;
}
} }

View File

@@ -694,6 +694,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) { !workload->shadow_mm->ppgtt_mm.shadowed) {
intel_vgpu_unpin_mm(workload->shadow_mm);
gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL; return -EINVAL;
} }

View File

@@ -619,6 +619,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break; break;
} }
if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
width = ipu_src_rect_width(new_state);
else
width = drm_rect_width(&new_state->src) >> 16;
eba = drm_plane_state_to_eba(new_state, 0); eba = drm_plane_state_to_eba(new_state, 0);
/* /*
@@ -627,8 +632,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
*/ */
if (ipu_state->use_pre) { if (ipu_state->use_pre) {
axi_id = ipu_chan_assign_axi_id(ipu_plane->dma); axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
ipu_src_rect_width(new_state),
drm_rect_height(&new_state->src) >> 16, drm_rect_height(&new_state->src) >> 16,
fb->pitches[0], fb->format->format, fb->pitches[0], fb->format->format,
fb->modifier, &eba); fb->modifier, &eba);
@@ -683,9 +687,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break; break;
} }
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8)); ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
width = ipu_src_rect_width(new_state);
height = drm_rect_height(&new_state->src) >> 16; height = drm_rect_height(&new_state->src) >> 16;
info = drm_format_info(fb->format->format); info = drm_format_info(fb->format->format);
ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0], ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
@@ -749,8 +752,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16); ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
ipu_cpmem_zero(ipu_plane->alpha_ch); ipu_cpmem_zero(ipu_plane->alpha_ch);
ipu_cpmem_set_resolution(ipu_plane->alpha_ch, ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
ipu_src_rect_width(new_state),
drm_rect_height(&new_state->src) >> 16); drm_rect_height(&new_state->src) >> 16);
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8); ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch); ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);

View File

@@ -1326,7 +1326,11 @@ static int ingenic_drm_init(void)
return err; return err;
} }
return platform_driver_register(&ingenic_drm_driver); err = platform_driver_register(&ingenic_drm_driver);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err)
platform_driver_unregister(ingenic_ipu_driver_ptr);
return err;
} }
module_init(ingenic_drm_init); module_init(ingenic_drm_init);

View File

@@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv)
/* Initialize OSD1 fifo control register */ /* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT | reg = VIU_OSD_DDR_PRIORITY_URGENT |
VIU_OSD_HOLD_FIFO_LINES(31) |
VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */ VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */ VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
reg |= VIU_OSD_BURST_LENGTH_32; reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31));
else else
reg |= VIU_OSD_BURST_LENGTH_64; reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));

View File

@@ -268,7 +268,8 @@ static void mgag200_pixpll_update_g200se_04(struct mgag200_pll *pixpll,
pixpllcp = pixpllc->p - 1; pixpllcp = pixpllc->p - 1;
pixpllcs = pixpllc->s; pixpllcs = pixpllc->s;
xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1); // For G200SE A, BIT(7) should be set unconditionally.
xpixpllcm = BIT(7) | pixpllcm;
xpixpllcn = pixpllcn; xpixpllcn = pixpllcn;
xpixpllcp = (pixpllcs << 3) | pixpllcp; xpixpllcp = (pixpllcs << 3) | pixpllcp;

View File

@@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data; struct drm_panfrost_create_bo *args = data;
struct panfrost_gem_mapping *mapping; struct panfrost_gem_mapping *mapping;
int ret;
if (!args->size || args->pad || if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
!(args->flags & PANFROST_BO_NOEXEC)) !(args->flags & PANFROST_BO_NOEXEC))
return -EINVAL; return -EINVAL;
bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, bo = panfrost_gem_create(dev, args->size, args->flags);
&args->handle);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
if (ret)
goto out;
mapping = panfrost_gem_mapping_get(bo, priv); mapping = panfrost_gem_mapping_get(bo, priv);
if (!mapping) { if (mapping) {
drm_gem_object_put(&bo->base.base); args->offset = mapping->mmnode.start << PAGE_SHIFT;
return -EINVAL; panfrost_gem_mapping_put(mapping);
} else {
/* This can only happen if the handle from
* drm_gem_handle_create() has already been guessed and freed
* by user space
*/
ret = -EINVAL;
} }
args->offset = mapping->mmnode.start << PAGE_SHIFT; out:
panfrost_gem_mapping_put(mapping); drm_gem_object_put(&bo->base.base);
return ret;
return 0;
} }
/** /**

View File

@@ -234,12 +234,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
} }
struct panfrost_gem_object * struct panfrost_gem_object *
panfrost_gem_create_with_handle(struct drm_file *file_priv, panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
struct drm_device *dev, size_t size,
u32 flags,
uint32_t *handle)
{ {
int ret;
struct drm_gem_shmem_object *shmem; struct drm_gem_shmem_object *shmem;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
@@ -255,16 +251,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
bo->noexec = !!(flags & PANFROST_BO_NOEXEC); bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
bo->is_heap = !!(flags & PANFROST_BO_HEAP); bo->is_heap = !!(flags & PANFROST_BO_HEAP);
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
if (ret)
return ERR_PTR(ret);
return bo; return bo;
} }

View File

@@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt); struct sg_table *sgt);
struct panfrost_gem_object * struct panfrost_gem_object *
panfrost_gem_create_with_handle(struct drm_file *file_priv, panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags);
struct drm_device *dev, size_t size,
u32 flags,
uint32_t *handle);
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj, void panfrost_gem_close(struct drm_gem_object *obj,

View File

@@ -186,7 +186,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
if (cmd->dma.guest.ptr.offset % PAGE_SIZE || if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 || box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
box->d != 1 || box_count != 1) { box->d != 1 || box_count != 1 ||
box->w > 64 || box->h > 64) {
/* TODO handle none page aligned offsets */ /* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */ /* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */ /* TODO handle more then one copy */

View File

@@ -969,7 +969,10 @@
#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003 #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f #define USB_VENDOR_ID_PLANTRONICS 0x047f
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES 0xc055
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056 #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
#define USB_VENDOR_ID_PANASONIC 0x04da #define USB_VENDOR_ID_PANASONIC 0x04da
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044

View File

@@ -1965,6 +1965,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ELAN, 0x313a) }, USB_VENDOR_ID_ELAN, 0x313a) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ELAN, 0x3148) },
/* Elitegroup panel */ /* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL, { .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,

View File

@@ -198,9 +198,18 @@ err:
} }
static const struct hid_device_id plantronics_devices[] = { static const struct hid_device_id plantronics_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ } { }
}; };

View File

@@ -249,7 +249,6 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
int ret, num_counters; int ret, num_counters;
u32 mdev_port_num;
if (!stats) if (!stats)
return -EINVAL; return -EINVAL;
@@ -270,8 +269,9 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
} }
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, if (!port_num)
&mdev_port_num); port_num = 1;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
if (!mdev) { if (!mdev) {
/* If port is not affiliated yet, its in down state /* If port is not affiliated yet, its in down state
* which doesn't have any counters yet, so it would be * which doesn't have any counters yet, so it would be

View File

@@ -4499,6 +4499,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
return false; return false;
} }
static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_type qp_type)
{
int log_max_ra_res;
int log_max_ra_req;
if (qp_type == MLX5_IB_QPT_DCI) {
log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
log_max_ra_res_dc);
log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
log_max_ra_req_dc);
} else {
log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
log_max_ra_res_qp);
log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
log_max_ra_req_qp);
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > log_max_ra_res) {
mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
attr->max_rd_atomic);
return false;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > log_max_ra_req) {
mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
attr->max_dest_rd_atomic);
return false;
}
return true;
}
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata) int attr_mask, struct ib_udata *udata)
{ {
@@ -4586,21 +4620,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out; goto out;
} }
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
attr->max_rd_atomic >
(1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
attr->max_rd_atomic);
goto out; goto out;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic >
(1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
attr->max_dest_rd_atomic);
goto out;
}
if (cur_state == new_state && cur_state == IB_QPS_RESET) { if (cur_state == new_state && cur_state == IB_QPS_RESET) {
err = 0; err = 0;

View File

@@ -3226,6 +3226,13 @@ static int __init parse_ivrs_acpihid(char *str)
return 1; return 1;
} }
/*
* Ignore leading zeroes after ':', so e.g., AMDI0095:00
* will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
*/
while (*uid == '0' && *(uid + 1))
uid++;
i = early_acpihid_map_size++; i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid));

View File

@@ -551,11 +551,13 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
return r; return r;
} }
static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd) static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
bool destroy_bm)
{ {
dm_sm_destroy(cmd->metadata_sm); dm_sm_destroy(cmd->metadata_sm);
dm_tm_destroy(cmd->tm); dm_tm_destroy(cmd->tm);
dm_block_manager_destroy(cmd->bm); if (destroy_bm)
dm_block_manager_destroy(cmd->bm);
} }
typedef unsigned long (*flags_mutator)(unsigned long); typedef unsigned long (*flags_mutator)(unsigned long);
@@ -826,7 +828,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
cmd2 = lookup(bdev); cmd2 = lookup(bdev);
if (cmd2) { if (cmd2) {
mutex_unlock(&table_lock); mutex_unlock(&table_lock);
__destroy_persistent_data_objects(cmd); __destroy_persistent_data_objects(cmd, true);
kfree(cmd); kfree(cmd);
return cmd2; return cmd2;
} }
@@ -874,7 +876,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
mutex_unlock(&table_lock); mutex_unlock(&table_lock);
if (!cmd->fail_io) if (!cmd->fail_io)
__destroy_persistent_data_objects(cmd); __destroy_persistent_data_objects(cmd, true);
kfree(cmd); kfree(cmd);
} }
} }
@@ -1808,14 +1810,52 @@ int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
{ {
int r; int r = -EINVAL;
struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
/* fail_io is double-checked with cmd->root_lock held below */
if (unlikely(cmd->fail_io))
return r;
/*
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
* cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
* shrinker associated with the block manager's bufio client vs cmd root_lock).
* - must take shrinker_rwsem without holding cmd->root_lock
*/
new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
WRITE_LOCK(cmd); WRITE_LOCK(cmd);
__destroy_persistent_data_objects(cmd); if (cmd->fail_io) {
r = __create_persistent_data_objects(cmd, false); WRITE_UNLOCK(cmd);
goto out;
}
__destroy_persistent_data_objects(cmd, false);
old_bm = cmd->bm;
if (IS_ERR(new_bm)) {
DMERR("could not create block manager during abort");
cmd->bm = NULL;
r = PTR_ERR(new_bm);
goto out_unlock;
}
cmd->bm = new_bm;
r = __open_or_format_metadata(cmd, false);
if (r) {
cmd->bm = NULL;
goto out_unlock;
}
new_bm = NULL;
out_unlock:
if (r) if (r)
cmd->fail_io = true; cmd->fail_io = true;
WRITE_UNLOCK(cmd); WRITE_UNLOCK(cmd);
dm_block_manager_destroy(old_bm);
out:
if (new_bm && !IS_ERR(new_bm))
dm_block_manager_destroy(new_bm);
return r; return r;
} }

View File

@@ -915,16 +915,16 @@ static void abort_transaction(struct cache *cache)
if (get_cache_mode(cache) >= CM_READ_ONLY) if (get_cache_mode(cache) >= CM_READ_ONLY)
return; return;
if (dm_cache_metadata_set_needs_check(cache->cmd)) {
DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
set_cache_mode(cache, CM_FAIL);
}
DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
if (dm_cache_metadata_abort(cache->cmd)) { if (dm_cache_metadata_abort(cache->cmd)) {
DMERR("%s: failed to abort metadata transaction", dev_name); DMERR("%s: failed to abort metadata transaction", dev_name);
set_cache_mode(cache, CM_FAIL); set_cache_mode(cache, CM_FAIL);
} }
if (dm_cache_metadata_set_needs_check(cache->cmd)) {
DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
set_cache_mode(cache, CM_FAIL);
}
} }
static void metadata_operation_failed(struct cache *cache, const char *op, int r) static void metadata_operation_failed(struct cache *cache, const char *op, int r)
@@ -1895,6 +1895,7 @@ static void destroy(struct cache *cache)
if (cache->prison) if (cache->prison)
dm_bio_prison_destroy_v2(cache->prison); dm_bio_prison_destroy_v2(cache->prison);
cancel_delayed_work_sync(&cache->waker);
if (cache->wq) if (cache->wq)
destroy_workqueue(cache->wq); destroy_workqueue(cache->wq);

View File

@@ -1959,6 +1959,7 @@ static void clone_dtr(struct dm_target *ti)
mempool_exit(&clone->hydration_pool); mempool_exit(&clone->hydration_pool);
dm_kcopyd_client_destroy(clone->kcopyd_client); dm_kcopyd_client_destroy(clone->kcopyd_client);
cancel_delayed_work_sync(&clone->waker);
destroy_workqueue(clone->wq); destroy_workqueue(clone->wq);
hash_table_exit(clone); hash_table_exit(clone);
dm_clone_metadata_close(clone->cmd); dm_clone_metadata_close(clone->cmd);

View File

@@ -4539,6 +4539,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
BUG_ON(!list_empty(&ic->wait_list)); BUG_ON(!list_empty(&ic->wait_list));
if (ic->mode == 'B')
cancel_delayed_work_sync(&ic->bitmap_flush_work);
if (ic->metadata_wq) if (ic->metadata_wq)
destroy_workqueue(ic->metadata_wq); destroy_workqueue(ic->metadata_wq);
if (ic->wait_wq) if (ic->wait_wq)

View File

@@ -724,6 +724,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
goto bad_cleanup_data_sm; goto bad_cleanup_data_sm;
} }
/*
* For pool metadata opening process, root setting is redundant
* because it will be set again in __begin_transaction(). But dm
* pool aborting process really needs to get last transaction's
* root to avoid accessing broken btree.
*/
pmd->root = le64_to_cpu(disk_super->data_mapping_root);
pmd->details_root = le64_to_cpu(disk_super->device_details_root);
__setup_btree_details(pmd); __setup_btree_details(pmd);
dm_bm_unlock(sblock); dm_bm_unlock(sblock);
@@ -776,13 +785,15 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
return r; return r;
} }
static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
bool destroy_bm)
{ {
dm_sm_destroy(pmd->data_sm); dm_sm_destroy(pmd->data_sm);
dm_sm_destroy(pmd->metadata_sm); dm_sm_destroy(pmd->metadata_sm);
dm_tm_destroy(pmd->nb_tm); dm_tm_destroy(pmd->nb_tm);
dm_tm_destroy(pmd->tm); dm_tm_destroy(pmd->tm);
dm_block_manager_destroy(pmd->bm); if (destroy_bm)
dm_block_manager_destroy(pmd->bm);
} }
static int __begin_transaction(struct dm_pool_metadata *pmd) static int __begin_transaction(struct dm_pool_metadata *pmd)
@@ -989,7 +1000,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
} }
pmd_write_unlock(pmd); pmd_write_unlock(pmd);
if (!pmd->fail_io) if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd); __destroy_persistent_data_objects(pmd, true);
kfree(pmd); kfree(pmd);
return 0; return 0;
@@ -1888,19 +1899,52 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
{ {
int r = -EINVAL; int r = -EINVAL;
struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
/* fail_io is double-checked with pmd->root_lock held below */
if (unlikely(pmd->fail_io))
return r;
/*
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
* pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
* shrinker associated with the block manager's bufio client vs pmd root_lock).
* - must take shrinker_rwsem without holding pmd->root_lock
*/
new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_MAX_CONCURRENT_LOCKS);
pmd_write_lock(pmd); pmd_write_lock(pmd);
if (pmd->fail_io) if (pmd->fail_io) {
pmd_write_unlock(pmd);
goto out; goto out;
}
__set_abort_with_changes_flags(pmd); __set_abort_with_changes_flags(pmd);
__destroy_persistent_data_objects(pmd); __destroy_persistent_data_objects(pmd, false);
r = __create_persistent_data_objects(pmd, false); old_bm = pmd->bm;
if (IS_ERR(new_bm)) {
DMERR("could not create block manager during abort");
pmd->bm = NULL;
r = PTR_ERR(new_bm);
goto out_unlock;
}
pmd->bm = new_bm;
r = __open_or_format_metadata(pmd, false);
if (r) {
pmd->bm = NULL;
goto out_unlock;
}
new_bm = NULL;
out_unlock:
if (r) if (r)
pmd->fail_io = true; pmd->fail_io = true;
out:
pmd_write_unlock(pmd); pmd_write_unlock(pmd);
dm_block_manager_destroy(old_bm);
out:
if (new_bm && !IS_ERR(new_bm))
dm_block_manager_destroy(new_bm);
return r; return r;
} }

View File

@@ -2907,6 +2907,8 @@ static void __pool_destroy(struct pool *pool)
dm_bio_prison_destroy(pool->prison); dm_bio_prison_destroy(pool->prison);
dm_kcopyd_client_destroy(pool->copier); dm_kcopyd_client_destroy(pool->copier);
cancel_delayed_work_sync(&pool->waker);
cancel_delayed_work_sync(&pool->no_space_timeout);
if (pool->wq) if (pool->wq)
destroy_workqueue(pool->wq); destroy_workqueue(pool->wq);
@@ -3566,20 +3568,28 @@ static int pool_preresume(struct dm_target *ti)
*/ */
r = bind_control_target(pool, ti); r = bind_control_target(pool, ti);
if (r) if (r)
return r; goto out;
r = maybe_resize_data_dev(ti, &need_commit1); r = maybe_resize_data_dev(ti, &need_commit1);
if (r) if (r)
return r; goto out;
r = maybe_resize_metadata_dev(ti, &need_commit2); r = maybe_resize_metadata_dev(ti, &need_commit2);
if (r) if (r)
return r; goto out;
if (need_commit1 || need_commit2) if (need_commit1 || need_commit2)
(void) commit(pool); (void) commit(pool);
out:
/*
* When a thin-pool is PM_FAIL, it cannot be rebuilt if
* bio is in deferred list. Therefore need to return 0
* to allow pool_resume() to flush IO.
*/
if (r && get_pool_mode(pool) == PM_FAIL)
r = 0;
return 0; return r;
} }
static void pool_suspend_active_thins(struct pool *pool) static void pool_suspend_active_thins(struct pool *pool)

View File

@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
sb = kmap_atomic(bitmap->storage.sb_page); sb = kmap_atomic(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %d\n", le32_to_cpu(sb->version)); pr_debug(" version: %u\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n", pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
le32_to_cpu(*(__le32 *)(sb->uuid+0)), le32_to_cpu(*(__le32 *)(sb->uuid+0)),
le32_to_cpu(*(__le32 *)(sb->uuid+4)), le32_to_cpu(*(__le32 *)(sb->uuid+4)),
@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug("events cleared: %llu\n", pr_debug("events cleared: %llu\n",
(unsigned long long) le64_to_cpu(sb->events_cleared)); (unsigned long long) le64_to_cpu(sb->events_cleared));
pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
pr_debug(" sync size: %llu KB\n", pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2); (unsigned long long)le64_to_cpu(sb->sync_size)/2);
pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb); kunmap_atomic(sb);
} }
@@ -2106,7 +2106,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bytes = DIV_ROUND_UP(chunks, 8); bytes = DIV_ROUND_UP(chunks, 8);
if (!bitmap->mddev->bitmap_info.external) if (!bitmap->mddev->bitmap_info.external)
bytes += sizeof(bitmap_super_t); bytes += sizeof(bitmap_super_t);
} while (bytes > (space << 9)); } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
} else } else
chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
@@ -2151,7 +2152,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = pages; bitmap->counts.missing_pages = pages;
bitmap->counts.chunkshift = chunkshift; bitmap->counts.chunkshift = chunkshift;
bitmap->counts.chunks = chunks; bitmap->counts.chunks = chunks;
bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
BITMAP_BLOCK_SHIFT); BITMAP_BLOCK_SHIFT);
blocks = min(old_counts.chunks << old_counts.chunkshift, blocks = min(old_counts.chunks << old_counts.chunkshift,
@@ -2177,8 +2178,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->counts.missing_pages = old_counts.pages; bitmap->counts.missing_pages = old_counts.pages;
bitmap->counts.chunkshift = old_counts.chunkshift; bitmap->counts.chunkshift = old_counts.chunkshift;
bitmap->counts.chunks = old_counts.chunks; bitmap->counts.chunks = old_counts.chunks;
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + bitmap->mddev->bitmap_info.chunksize =
BITMAP_BLOCK_SHIFT); 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift; blocks = old_counts.chunks << old_counts.chunkshift;
pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break; break;
@@ -2519,6 +2520,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
if (csize < 512 || if (csize < 512 ||
!is_power_of_2(csize)) !is_power_of_2(csize))
return -EINVAL; return -EINVAL;
if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
sizeof(((bitmap_super_t *)0)->chunksize))))
return -EOVERFLOW;
mddev->bitmap_info.chunksize = csize; mddev->bitmap_info.chunksize = csize;
return len; return len;
} }

View File

@@ -526,13 +526,14 @@ static void md_end_flush(struct bio *bio)
struct md_rdev *rdev = bio->bi_private; struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev; struct mddev *mddev = rdev->mddev;
bio_put(bio);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->flush_pending)) { if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */ /* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work); queue_work(md_wq, &mddev->flush_work);
} }
bio_put(bio);
} }
static void md_submit_flush_data(struct work_struct *ws); static void md_submit_flush_data(struct work_struct *ws);
@@ -935,10 +936,12 @@ static void super_written(struct bio *bio)
} else } else
clear_bit(LastDev, &rdev->flags); clear_bit(LastDev, &rdev->flags);
bio_put(bio);
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->pending_writes)) if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait); wake_up(&mddev->sb_wait);
rdev_dec_pending(rdev, mddev);
bio_put(bio);
} }
void md_super_write(struct mddev *mddev, struct md_rdev *rdev, void md_super_write(struct mddev *mddev, struct md_rdev *rdev,

View File

@@ -800,6 +800,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
if (mutex_lock_interruptible(&dmxdev->mutex)) if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS; return -ERESTARTSYS;
if (dmxdev->exit) {
mutex_unlock(&dmxdev->mutex);
return -ENODEV;
}
for (i = 0; i < dmxdev->filternum; i++) for (i = 0; i < dmxdev->filternum; i++)
if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
break; break;
@@ -1458,7 +1463,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init);
void dvb_dmxdev_release(struct dmxdev *dmxdev) void dvb_dmxdev_release(struct dmxdev *dmxdev)
{ {
mutex_lock(&dmxdev->mutex);
dmxdev->exit = 1; dmxdev->exit = 1;
mutex_unlock(&dmxdev->mutex);
if (dmxdev->dvbdev->users > 1) { if (dmxdev->dvbdev->users > 1) {
wait_event(dmxdev->dvbdev->wait_queue, wait_event(dmxdev->dvbdev->wait_queue,
dmxdev->dvbdev->users == 1); dmxdev->dvbdev->users == 1);

View File

@@ -345,6 +345,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
GFP_KERNEL); GFP_KERNEL);
if (!dvbdev->pads) { if (!dvbdev->pads) {
kfree(dvbdev->entity); kfree(dvbdev->entity);
dvbdev->entity = NULL;
return -ENOMEM; return -ENOMEM;
} }
} }

View File

@@ -440,9 +440,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe)
struct stv0288_state *state = fe->demodulator_priv; struct stv0288_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dtv_frontend_properties *c = &fe->dtv_property_cache;
char tm; u8 tda[3], reg, time_out = 0;
unsigned char tda[3]; s8 tm;
u8 reg, time_out = 0;
dprintk("%s : FE_SET_FRONTEND\n", __func__); dprintk("%s : FE_SET_FRONTEND\n", __func__);

View File

@@ -468,8 +468,10 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
/* Wait until instance is returned or timeout occurred */ /* Wait until instance is returned or timeout occurred */
if (s5p_mfc_wait_for_done_ctx(ctx, if (s5p_mfc_wait_for_done_ctx(ctx,
S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)){
clear_work_bit_irqsave(ctx);
mfc_err("Err returning instance\n"); mfc_err("Err returning instance\n");
}
/* Free resources */ /* Free resources */
s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx); s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);

View File

@@ -1218,6 +1218,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
unsigned long mb_y_addr, mb_c_addr; unsigned long mb_y_addr, mb_c_addr;
int slice_type; int slice_type;
unsigned int strm_size; unsigned int strm_size;
bool src_ready;
slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
@@ -1257,7 +1258,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
} }
} }
} }
if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) { if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING ||
ctx->state == MFCINST_FINISHING)) {
mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
list); list);
if (mb_entry->flags & MFC_BUF_FLAG_USED) { if (mb_entry->flags & MFC_BUF_FLAG_USED) {
@@ -1288,7 +1290,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size); vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
} }
if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
src_ready = true;
if (ctx->state == MFCINST_RUNNING && ctx->src_queue_cnt == 0)
src_ready = false;
if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
src_ready = false;
if (!src_ready || ctx->dst_queue_cnt == 0)
clear_work_bit(ctx); clear_work_bit(ctx);
return 0; return 0;

View File

@@ -1060,7 +1060,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
} }
/* aspect ratio VUI */ /* aspect ratio VUI */
readl(mfc_regs->e_h264_options); reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 5); reg &= ~(0x1 << 5);
reg |= ((p_h264->vui_sar & 0x1) << 5); reg |= ((p_h264->vui_sar & 0x1) << 5);
writel(reg, mfc_regs->e_h264_options); writel(reg, mfc_regs->e_h264_options);
@@ -1083,7 +1083,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
/* intra picture period for H.264 open GOP */ /* intra picture period for H.264 open GOP */
/* control */ /* control */
readl(mfc_regs->e_h264_options); reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 4); reg &= ~(0x1 << 4);
reg |= ((p_h264->open_gop & 0x1) << 4); reg |= ((p_h264->open_gop & 0x1) << 4);
writel(reg, mfc_regs->e_h264_options); writel(reg, mfc_regs->e_h264_options);
@@ -1097,23 +1097,23 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
} }
/* 'WEIGHTED_BI_PREDICTION' for B is disable */ /* 'WEIGHTED_BI_PREDICTION' for B is disable */
readl(mfc_regs->e_h264_options); reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x3 << 9); reg &= ~(0x3 << 9);
writel(reg, mfc_regs->e_h264_options); writel(reg, mfc_regs->e_h264_options);
/* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */ /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */
readl(mfc_regs->e_h264_options); reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 14); reg &= ~(0x1 << 14);
writel(reg, mfc_regs->e_h264_options); writel(reg, mfc_regs->e_h264_options);
/* ASO */ /* ASO */
readl(mfc_regs->e_h264_options); reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 6); reg &= ~(0x1 << 6);
reg |= ((p_h264->aso & 0x1) << 6); reg |= ((p_h264->aso & 0x1) << 6);
writel(reg, mfc_regs->e_h264_options); writel(reg, mfc_regs->e_h264_options);
/* hier qp enable */ /* hier qp enable */
readl(mfc_regs->e_h264_options); reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 8); reg &= ~(0x1 << 8);
reg |= ((p_h264->open_gop & 0x1) << 8); reg |= ((p_h264->open_gop & 0x1) << 8);
writel(reg, mfc_regs->e_h264_options); writel(reg, mfc_regs->e_h264_options);
@@ -1134,7 +1134,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
writel(reg, mfc_regs->e_h264_num_t_layer); writel(reg, mfc_regs->e_h264_num_t_layer);
/* frame packing SEI generation */ /* frame packing SEI generation */
readl(mfc_regs->e_h264_options); reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 25); reg &= ~(0x1 << 25);
reg |= ((p_h264->sei_frame_packing & 0x1) << 25); reg |= ((p_h264->sei_frame_packing & 0x1) << 25);
writel(reg, mfc_regs->e_h264_options); writel(reg, mfc_regs->e_h264_options);

View File

@@ -402,7 +402,7 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
struct mt6360_ddata *ddata = context; struct mt6360_ddata *ddata = context;
u8 bank = *(u8 *)reg; u8 bank = *(u8 *)reg;
u8 reg_addr = *(u8 *)(reg + 1); u8 reg_addr = *(u8 *)(reg + 1);
struct i2c_client *i2c = ddata->i2c[bank]; struct i2c_client *i2c;
bool crc_needed = false; bool crc_needed = false;
u8 *buf; u8 *buf;
int buf_len = MT6360_ALLOC_READ_SIZE(val_size); int buf_len = MT6360_ALLOC_READ_SIZE(val_size);
@@ -410,6 +410,11 @@ static int mt6360_regmap_read(void *context, const void *reg, size_t reg_size,
u8 crc; u8 crc;
int ret; int ret;
if (bank >= MT6360_SLAVE_MAX)
return -EINVAL;
i2c = ddata->i2c[bank];
if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) { if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
crc_needed = true; crc_needed = true;
ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size); ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size);
@@ -453,13 +458,18 @@ static int mt6360_regmap_write(void *context, const void *val, size_t val_size)
struct mt6360_ddata *ddata = context; struct mt6360_ddata *ddata = context;
u8 bank = *(u8 *)val; u8 bank = *(u8 *)val;
u8 reg_addr = *(u8 *)(val + 1); u8 reg_addr = *(u8 *)(val + 1);
struct i2c_client *i2c = ddata->i2c[bank]; struct i2c_client *i2c;
bool crc_needed = false; bool crc_needed = false;
u8 *buf; u8 *buf;
int buf_len = MT6360_ALLOC_WRITE_SIZE(val_size); int buf_len = MT6360_ALLOC_WRITE_SIZE(val_size);
int write_size = val_size - MT6360_REGMAP_REG_BYTE_SIZE; int write_size = val_size - MT6360_REGMAP_REG_BYTE_SIZE;
int ret; int ret;
if (bank >= MT6360_SLAVE_MAX)
return -EINVAL;
i2c = ddata->i2c[bank];
if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) { if (bank == MT6360_SLAVE_PMIC || bank == MT6360_SLAVE_LDO) {
crc_needed = true; crc_needed = true;
ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size - MT6360_REGMAP_REG_BYTE_SIZE); ret = mt6360_xlate_pmicldo_addr(&reg_addr, val_size - MT6360_REGMAP_REG_BYTE_SIZE);

View File

@@ -224,13 +224,15 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
sdhci_enable_clk(host, div); sdhci_enable_clk(host, div);
/* enable auto gate sdhc_enable_auto_gate */ /* Enable CLK_AUTO when the clock is greater than 400K. */
val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); if (clk > 400000) {
mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
if (mask != (val & mask)) { SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
val |= mask; if (mask != (val & mask)) {
sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); val |= mask;
sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
}
} }
} }

View File

@@ -2049,6 +2049,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
return; return;
kref_get(&vub300->kref); kref_get(&vub300->kref);
if (enable) { if (enable) {
set_current_state(TASK_RUNNING);
mutex_lock(&vub300->irq_mutex); mutex_lock(&vub300->irq_mutex);
if (vub300->irqs_queued) { if (vub300->irqs_queued) {
vub300->irqs_queued -= 1; vub300->irqs_queued -= 1;
@@ -2064,6 +2065,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
vub300_queue_poll_work(vub300, 0); vub300_queue_poll_work(vub300, 0);
} }
mutex_unlock(&vub300->irq_mutex); mutex_unlock(&vub300->irq_mutex);
set_current_state(TASK_INTERRUPTIBLE);
} else { } else {
vub300->irq_enabled = 0; vub300->irq_enabled = 0;
} }

View File

@@ -1409,6 +1409,8 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
continue; continue;
erase = &map->erase_type[i]; erase = &map->erase_type[i];
if (!erase->size)
continue;
/* Alignment is not mandatory for overlaid regions */ /* Alignment is not mandatory for overlaid regions */
if (region->offset & SNOR_OVERLAID_REGION && if (region->offset & SNOR_OVERLAID_REGION &&

View File

@@ -1539,6 +1539,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
slave_err(bond->dev, port->slave->dev, slave_err(bond->dev, port->slave->dev,
"Port %d did not find a suitable aggregator\n", "Port %d did not find a suitable aggregator\n",
port->actor_port_number); port->actor_port_number);
return;
} }
} }
/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE

View File

@@ -2,7 +2,6 @@
config NET_DSA_MV88E6XXX config NET_DSA_MV88E6XXX
tristate "Marvell 88E6xxx Ethernet switch fabric support" tristate "Marvell 88E6xxx Ethernet switch fabric support"
depends on NET_DSA depends on NET_DSA
depends on PTP_1588_CLOCK_OPTIONAL
select IRQ_DOMAIN select IRQ_DOMAIN
select NET_DSA_TAG_EDSA select NET_DSA_TAG_EDSA
select NET_DSA_TAG_DSA select NET_DSA_TAG_DSA
@@ -13,7 +12,8 @@ config NET_DSA_MV88E6XXX
config NET_DSA_MV88E6XXX_PTP config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx" bool "PTP support for Marvell 88E6xxx"
default n default n
depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \
(NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK)
help help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it. chips that support it.

View File

@@ -2392,29 +2392,18 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
switch (func) { if ((func == ENA_ADMIN_TOEPLITZ) && key) {
case ENA_ADMIN_TOEPLITZ: if (key_len != sizeof(hash_key->key)) {
if (key) { netdev_err(ena_dev->net_device,
if (key_len != sizeof(hash_key->key)) { "key len (%u) doesn't equal the supported size (%zu)\n",
netdev_err(ena_dev->net_device, key_len, sizeof(hash_key->key));
"key len (%u) doesn't equal the supported size (%zu)\n", return -EINVAL;
key_len, sizeof(hash_key->key));
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
rss->hash_init_val = init_val;
hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
} }
break; memcpy(hash_key->key, key, key_len);
case ENA_ADMIN_CRC32: hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
rss->hash_init_val = init_val;
break;
default:
netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
func);
return -EINVAL;
} }
rss->hash_init_val = init_val;
old_func = rss->hash_func; old_func = rss->hash_func;
rss->hash_func = func; rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev); rc = ena_com_set_hash_function(ena_dev);

View File

@@ -880,11 +880,7 @@ static int ena_set_tunable(struct net_device *netdev,
switch (tuna->id) { switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK: case ETHTOOL_RX_COPYBREAK:
len = *(u32 *)data; len = *(u32 *)data;
if (len > adapter->netdev->mtu) { ret = ena_set_rx_copybreak(adapter, len);
ret = -EINVAL;
break;
}
adapter->rx_copybreak = len;
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;

View File

@@ -378,9 +378,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{ {
u32 verdict = ENA_XDP_PASS;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct ena_ring *xdp_ring; struct ena_ring *xdp_ring;
u32 verdict = XDP_PASS;
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
u64 *xdp_stat; u64 *xdp_stat;
@@ -397,7 +397,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(!xdpf)) { if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted; xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = XDP_ABORTED; verdict = ENA_XDP_DROP;
break; break;
} }
@@ -413,29 +413,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
spin_unlock(&xdp_ring->xdp_tx_lock); spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx; xdp_stat = &rx_ring->rx_stats.xdp_tx;
verdict = ENA_XDP_TX;
break; break;
case XDP_REDIRECT: case XDP_REDIRECT:
if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
xdp_stat = &rx_ring->rx_stats.xdp_redirect; xdp_stat = &rx_ring->rx_stats.xdp_redirect;
verdict = ENA_XDP_REDIRECT;
break; break;
} }
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted; xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = XDP_ABORTED; verdict = ENA_XDP_DROP;
break; break;
case XDP_ABORTED: case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted; xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = ENA_XDP_DROP;
break; break;
case XDP_DROP: case XDP_DROP:
xdp_stat = &rx_ring->rx_stats.xdp_drop; xdp_stat = &rx_ring->rx_stats.xdp_drop;
verdict = ENA_XDP_DROP;
break; break;
case XDP_PASS: case XDP_PASS:
xdp_stat = &rx_ring->rx_stats.xdp_pass; xdp_stat = &rx_ring->rx_stats.xdp_pass;
verdict = ENA_XDP_PASS;
break; break;
default: default:
bpf_warn_invalid_xdp_action(verdict); bpf_warn_invalid_xdp_action(verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid; xdp_stat = &rx_ring->rx_stats.xdp_invalid;
verdict = ENA_XDP_DROP;
} }
ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
@@ -516,16 +522,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
struct bpf_prog *prog, struct bpf_prog *prog,
int first, int count) int first, int count)
{ {
struct bpf_prog *old_bpf_prog;
struct ena_ring *rx_ring; struct ena_ring *rx_ring;
int i = 0; int i = 0;
for (i = first; i < count; i++) { for (i = first; i < count; i++) {
rx_ring = &adapter->rx_ring[i]; rx_ring = &adapter->rx_ring[i];
xchg(&rx_ring->xdp_bpf_prog, prog); old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
if (prog) {
if (!old_bpf_prog && prog) {
ena_xdp_register_rxq_info(rx_ring); ena_xdp_register_rxq_info(rx_ring);
rx_ring->rx_headroom = XDP_PACKET_HEADROOM; rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
} else { } else if (old_bpf_prog && !prog) {
ena_xdp_unregister_rxq_info(rx_ring); ena_xdp_unregister_rxq_info(rx_ring);
rx_ring->rx_headroom = NET_SKB_PAD; rx_ring->rx_headroom = NET_SKB_PAD;
} }
@@ -676,6 +684,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev; ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0; ring->per_napi_packets = 0;
ring->cpu = 0; ring->cpu = 0;
ring->numa_node = 0;
ring->no_interrupt_event_cnt = 0; ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp); u64_stats_init(&ring->syncp);
} }
@@ -779,6 +788,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_use = 0; tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu; tx_ring->cpu = ena_irq->cpu;
tx_ring->numa_node = node;
return 0; return 0;
err_push_buf_intermediate_buf: err_push_buf_intermediate_buf:
@@ -911,6 +921,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
rx_ring->cpu = ena_irq->cpu; rx_ring->cpu = ena_irq->cpu;
rx_ring->numa_node = node;
return 0; return 0;
} }
@@ -1629,12 +1640,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
* we expect, then we simply drop it * we expect, then we simply drop it
*/ */
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
return XDP_DROP; return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp); ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */ /* The xdp program might expand the headers */
if (ret == XDP_PASS) { if (ret == ENA_XDP_PASS) {
rx_info->page_offset = xdp->data - xdp->data_hard_start; rx_info->page_offset = xdp->data - xdp->data_hard_start;
rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
} }
@@ -1673,7 +1684,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do { do {
xdp_verdict = XDP_PASS; xdp_verdict = ENA_XDP_PASS;
skb = NULL; skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size; ena_rx_ctx.max_bufs = rx_ring->sgl_size;
@@ -1701,7 +1712,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
/* allocate skb and fill it */ /* allocate skb and fill it */
if (xdp_verdict == XDP_PASS) if (xdp_verdict == ENA_XDP_PASS)
skb = ena_rx_skb(rx_ring, skb = ena_rx_skb(rx_ring,
rx_ring->ena_bufs, rx_ring->ena_bufs,
ena_rx_ctx.descs, ena_rx_ctx.descs,
@@ -1719,14 +1730,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* Packets was passed for transmission, unmap it /* Packets was passed for transmission, unmap it
* from RX side. * from RX side.
*/ */
if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) { if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff(rx_ring, ena_unmap_rx_buff(rx_ring,
&rx_ring->rx_buffer_info[req_id]); &rx_ring->rx_buffer_info[req_id]);
rx_ring->rx_buffer_info[req_id].page = NULL; rx_ring->rx_buffer_info[req_id].page = NULL;
} }
} }
if (xdp_verdict != XDP_PASS) { if (xdp_verdict != ENA_XDP_PASS) {
xdp_flags |= xdp_verdict; xdp_flags |= xdp_verdict;
total_len += ena_rx_ctx.ena_bufs[0].len;
res_budget--; res_budget--;
continue; continue;
} }
@@ -1770,7 +1782,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_refill_rx_bufs(rx_ring, refill_required); ena_refill_rx_bufs(rx_ring, refill_required);
} }
if (xdp_flags & XDP_REDIRECT) if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush_map(); xdp_do_flush_map();
return work_done; return work_done;
@@ -1827,8 +1839,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
static void ena_unmask_interrupt(struct ena_ring *tx_ring, static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring) struct ena_ring *rx_ring)
{ {
u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg; struct ena_eth_io_intr_reg intr_reg;
u32 rx_interval = 0;
/* Rx ring can be NULL when for XDP tx queues which don't have an /* Rx ring can be NULL when for XDP tx queues which don't have an
* accompanying rx_ring pair. * accompanying rx_ring pair.
*/ */
@@ -1866,20 +1879,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (likely(tx_ring->cpu == cpu)) if (likely(tx_ring->cpu == cpu))
goto out; goto out;
tx_ring->cpu = cpu;
if (rx_ring)
rx_ring->cpu = cpu;
numa_node = cpu_to_node(cpu); numa_node = cpu_to_node(cpu);
if (likely(tx_ring->numa_node == numa_node))
goto out;
put_cpu(); put_cpu();
if (numa_node != NUMA_NO_NODE) { if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
if (rx_ring) tx_ring->numa_node = numa_node;
if (rx_ring) {
rx_ring->numa_node = numa_node;
ena_com_update_numa_node(rx_ring->ena_com_io_cq, ena_com_update_numa_node(rx_ring->ena_com_io_cq,
numa_node); numa_node);
}
} }
tx_ring->cpu = cpu;
if (rx_ring)
rx_ring->cpu = cpu;
return; return;
out: out:
put_cpu(); put_cpu();
@@ -2000,11 +2020,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_adaptive_rx_intr_moderation(ena_napi); ena_adjust_adaptive_rx_intr_moderation(ena_napi);
ena_update_ring_numa_node(tx_ring, rx_ring);
ena_unmask_interrupt(tx_ring, rx_ring); ena_unmask_interrupt(tx_ring, rx_ring);
} }
ena_update_ring_numa_node(tx_ring, rx_ring);
ret = rx_work_done; ret = rx_work_done;
} else { } else {
ret = budget; ret = budget;
@@ -2391,7 +2410,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ena_dev->tx_mem_queue_type; ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.msix_vector = msix_vector; ctx.msix_vector = msix_vector;
ctx.queue_size = tx_ring->ring_size; ctx.queue_size = tx_ring->ring_size;
ctx.numa_node = cpu_to_node(tx_ring->cpu); ctx.numa_node = tx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx); rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) { if (rc) {
@@ -2459,7 +2478,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector; ctx.msix_vector = msix_vector;
ctx.queue_size = rx_ring->ring_size; ctx.queue_size = rx_ring->ring_size;
ctx.numa_node = cpu_to_node(rx_ring->cpu); ctx.numa_node = rx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx); rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) { if (rc) {
@@ -2820,6 +2839,24 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
return dev_was_up ? ena_up(adapter) : 0; return dev_was_up ? ena_up(adapter) : 0;
} }
int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
{
struct ena_ring *rx_ring;
int i;
if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
return -EINVAL;
adapter->rx_copybreak = rx_copybreak;
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
rx_ring->rx_copybreak = rx_copybreak;
}
return 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
{ {
struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_com_dev *ena_dev = adapter->ena_dev;

View File

@@ -273,9 +273,11 @@ struct ena_ring {
bool disable_meta_caching; bool disable_meta_caching;
u16 no_interrupt_event_cnt; u16 no_interrupt_event_cnt;
/* cpu for TPH */ /* cpu and NUMA for TPH */
int cpu; int cpu;
/* number of tx/rx_buffer_info's entries */ int numa_node;
/* number of tx/rx_buffer_info's entries */
int ring_size; int ring_size;
enum ena_admin_placement_policy_type tx_mem_queue_type; enum ena_admin_placement_policy_type tx_mem_queue_type;
@@ -404,6 +406,8 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count); int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
int ena_get_sset_count(struct net_device *netdev, int sset); int ena_get_sset_count(struct net_device *netdev, int sset);
enum ena_xdp_errors_t { enum ena_xdp_errors_t {
@@ -412,6 +416,15 @@ enum ena_xdp_errors_t {
ENA_XDP_NO_ENOUGH_QUEUES, ENA_XDP_NO_ENOUGH_QUEUES,
}; };
enum ENA_XDP_ACTIONS {
ENA_XDP_PASS = 0,
ENA_XDP_TX = BIT(0),
ENA_XDP_REDIRECT = BIT(1),
ENA_XDP_DROP = BIT(2)
};
#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
static inline bool ena_xdp_present(struct ena_adapter *adapter) static inline bool ena_xdp_present(struct ena_adapter *adapter)
{ {
return !!adapter->xdp_bpf_prog; return !!adapter->xdp_bpf_prog;

View File

@@ -1064,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata); devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
tasklet_kill(&pdata->tasklet_dev);
tasklet_kill(&pdata->tasklet_ecc);
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);

View File

@@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
xgbe_i2c_disable(pdata); xgbe_i2c_disable(pdata);
xgbe_i2c_clear_all_interrupts(pdata); xgbe_i2c_clear_all_interrupts(pdata);
if (pdata->dev_irq != pdata->i2c_irq) if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata); devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
tasklet_kill(&pdata->tasklet_i2c);
}
} }
static int xgbe_i2c_start(struct xgbe_prv_data *pdata) static int xgbe_i2c_start(struct xgbe_prv_data *pdata)

View File

@@ -1390,8 +1390,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation */ /* Disable auto-negotiation */
xgbe_an_disable_all(pdata); xgbe_an_disable_all(pdata);
if (pdata->dev_irq != pdata->an_irq) if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata); devm_free_irq(pdata->dev, pdata->an_irq, pdata);
tasklet_kill(&pdata->tasklet_an);
}
pdata->phy_if.phy_impl.stop(pdata); pdata->phy_if.phy_impl.stop(pdata);

View File

@@ -1005,9 +1005,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
return false; return false;
if (ALIGN(len, dma_get_cache_alignment()) > space) { if (ALIGN(len, dma_get_cache_alignment()) > space) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_spare_full);
ring->stats.tx_spare_full++;
u64_stats_update_end(&ring->syncp);
return false; return false;
} }
@@ -1024,9 +1022,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
return false; return false;
if (space < HNS3_MAX_SGL_SIZE) { if (space < HNS3_MAX_SGL_SIZE) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_spare_full);
ring->stats.tx_spare_full++;
u64_stats_update_end(&ring->syncp);
return false; return false;
} }
@@ -1554,9 +1550,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_handle_vtags(ring, skb); ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_vlan_err);
ring->stats.tx_vlan_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) { } else if (ret == HNS3_INNER_VLAN_TAG) {
inner_vtag = skb_vlan_tag_get(skb); inner_vtag = skb_vlan_tag_get(skb);
@@ -1591,9 +1585,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_l4_proto_err);
ring->stats.tx_l4_proto_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
@@ -1601,18 +1593,14 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
&type_cs_vlan_tso, &type_cs_vlan_tso,
&ol_type_vlan_len_msec); &ol_type_vlan_len_msec);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_l2l3l4_err);
ring->stats.tx_l2l3l4_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
&type_cs_vlan_tso, &desc_cb->send_bytes); &type_cs_vlan_tso, &desc_cb->send_bytes);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_tso_err);
ring->stats.tx_tso_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
} }
@@ -1705,9 +1693,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
} }
if (unlikely(dma_mapping_error(dev, dma))) { if (unlikely(dma_mapping_error(dev, dma))) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
@@ -1853,9 +1839,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* recursion level of over HNS3_MAX_RECURSION_LEVEL. * recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/ */
if (bd_num == UINT_MAX) { if (bd_num == UINT_MAX) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, over_max_recursion);
ring->stats.over_max_recursion++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
@@ -1864,16 +1848,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
*/ */
if (skb->len > HNS3_MAX_TSO_SIZE || if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, hw_limitation);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
if (__skb_linearize(skb)) { if (__skb_linearize(skb)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
@@ -1903,9 +1883,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len); bd_num = hns3_tx_bd_count(skb->len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_copy);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
} }
out: out:
@@ -1925,9 +1903,7 @@ out:
return bd_num; return bd_num;
} }
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_busy);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
return -EBUSY; return -EBUSY;
} }
@@ -2012,9 +1988,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->pending_buf += num; ring->pending_buf += num;
if (!doorbell) { if (!doorbell) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_more);
ring->stats.tx_more++;
u64_stats_update_end(&ring->syncp);
return; return;
} }
@@ -2064,9 +2038,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
ret = skb_copy_bits(skb, 0, buf, size); ret = skb_copy_bits(skb, 0, buf, size);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
hns3_tx_spare_rollback(ring, cb_len); hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, copy_bits_err);
ring->stats.copy_bits_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
@@ -2089,9 +2061,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
dma_sync_single_for_device(ring_to_dev(ring), dma, size, dma_sync_single_for_device(ring_to_dev(ring), dma, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_bounce);
ring->stats.tx_bounce++;
u64_stats_update_end(&ring->syncp);
return bd_num; return bd_num;
} }
@@ -2121,9 +2092,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
if (unlikely(nents < 0)) { if (unlikely(nents < 0)) {
hns3_tx_spare_rollback(ring, cb_len); hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, skb2sgl_err);
ring->stats.skb2sgl_err++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
@@ -2132,9 +2101,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(!sgt->nents)) { if (unlikely(!sgt->nents)) {
hns3_tx_spare_rollback(ring, cb_len); hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, map_sg_err);
ring->stats.map_sg_err++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
@@ -2146,10 +2113,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
for (i = 0; i < sgt->nents; i++) for (i = 0; i < sgt->nents; i++)
bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
sg_dma_len(sgt->sgl + i)); sg_dma_len(sgt->sgl + i));
hns3_ring_stats_update(ring, tx_sgl);
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_sgl++;
u64_stats_update_end(&ring->syncp);
return bd_num; return bd_num;
} }
@@ -2188,9 +2152,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@@ -3522,17 +3484,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
for (i = 0; i < cleand_count; i++) { for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use]; desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) { if (desc_cb->reuse_flag) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, reuse_pg_cnt);
ring->stats.reuse_pg_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_reuse_buffer(ring, ring->next_to_use); hns3_reuse_buffer(ring, ring->next_to_use);
} else { } else {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs); ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) { if (ret) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring), hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n", "alloc rx buffer failed: %d\n",
@@ -3544,9 +3502,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
} }
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, non_reuse_pg);
ring->stats.non_reuse_pg++;
u64_stats_update_end(&ring->syncp);
} }
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
@@ -3561,6 +3517,34 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
return page_count(cb->priv) == cb->pagecnt_bias; return page_count(cb->priv) == cb->pagecnt_bias;
} }
static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring,
int pull_len,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
u32 frag_offset = desc_cb->page_offset + pull_len;
int size = le16_to_cpu(desc->rx.size);
u32 frag_size = size - pull_len;
void *frag = napi_alloc_frag(frag_size);
if (unlikely(!frag)) {
hns3_ring_stats_update(ring, frag_alloc_err);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
return -ENOMEM;
}
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
hns3_ring_stats_update(ring, frag_alloc);
return 0;
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i, static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
@@ -3570,6 +3554,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len; u32 frag_size = size - pull_len;
int ret = 0;
bool reused; bool reused;
if (ring->page_pool) { if (ring->page_pool) {
@@ -3604,27 +3589,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->page_offset = 0; desc_cb->page_offset = 0;
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) { } else if (frag_size <= ring->rx_copybreak) {
void *frag = napi_alloc_frag(frag_size); ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
if (!ret)
if (unlikely(!frag)) { return;
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
goto out;
}
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return;
} }
out: out:
@@ -3700,20 +3667,16 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
return 0; return 0;
} }
static bool hns3_checksum_complete(struct hns3_enet_ring *ring, static void hns3_checksum_complete(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 ptype, u16 csum) struct sk_buff *skb, u32 ptype, u16 csum)
{ {
if (ptype == HNS3_INVALID_PTYPE || if (ptype == HNS3_INVALID_PTYPE ||
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
return false; return;
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, csum_complete);
ring->stats.csum_complete++;
u64_stats_update_end(&ring->syncp);
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum); skb->csum = csum_unfold((__force __sum16)csum);
return true;
} }
static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
@@ -3773,8 +3736,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
HNS3_RXD_PTYPE_S); HNS3_RXD_PTYPE_S);
if (hns3_checksum_complete(ring, skb, ptype, csum)) hns3_checksum_complete(ring, skb, ptype, csum);
return;
/* check if hardware has done checksum */ /* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
@@ -3783,9 +3745,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) { BIT(HNS3_RXD_OL4E_B)))) {
u64_stats_update_begin(&ring->syncp); skb->ip_summed = CHECKSUM_NONE;
ring->stats.l3l4_csum_err++; hns3_ring_stats_update(ring, l3l4_csum_err);
u64_stats_update_end(&ring->syncp);
return; return;
} }
@@ -3876,10 +3837,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
skb = ring->skb; skb = ring->skb;
if (unlikely(!skb)) { if (unlikely(!skb)) {
hns3_rl_err(netdev, "alloc rx skb fail\n"); hns3_rl_err(netdev, "alloc rx skb fail\n");
hns3_ring_stats_update(ring, sw_err_cnt);
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
@@ -3910,9 +3868,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (ring->page_pool) if (ring->page_pool)
skb_mark_for_recycle(skb); skb_mark_for_recycle(skb);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, seg_pkt_cnt);
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len); __skb_put(skb, ring->pull_len);
@@ -4104,9 +4060,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ret = hns3_set_gro_and_checksum(ring, skb, l234info, ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info, csum); bd_base_info, ol_info, csum);
if (unlikely(ret)) { if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, rx_err_cnt);
ring->stats.rx_err_cnt++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
@@ -5318,9 +5272,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
if (!ring->desc_cb[ring->next_to_use].reuse_flag) { if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs); ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) { if (ret) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
/* if alloc new buffer fail, exit directly /* if alloc new buffer fail, exit directly
* and reclear in up flow. * and reclear in up flow.
*/ */

View File

@@ -654,6 +654,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_buf_size(_ring) ((_ring)->buf_size) #define hns3_buf_size(_ring) ((_ring)->buf_size)
#define hns3_ring_stats_update(ring, cnt) do { \
typeof(ring) (tmp) = (ring); \
u64_stats_update_begin(&(tmp)->syncp); \
((tmp)->stats.cnt)++; \
u64_stats_update_end(&(tmp)->syncp); \
} while (0) \
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{ {
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)

View File

@@ -12825,60 +12825,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return ret; return ret;
} }
static void hclge_sync_promisc_mode(struct hclge_dev *hdev) static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
{ {
struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic; struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
bool uc_en = false;
bool mc_en = false;
u8 tmp_flags; u8 tmp_flags;
bool bc_en;
int ret; int ret;
u16 i;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) { if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_promisc_flags = vport->overflow_promisc_flags; vport->last_promisc_flags = vport->overflow_promisc_flags;
} }
if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) { if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
&vport->state))
return 0;
/* for PF */
if (!vport->vport_id) {
tmp_flags = handle->netdev_flags | vport->last_promisc_flags; tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
tmp_flags & HNAE3_MPE); tmp_flags & HNAE3_MPE);
if (!ret) { if (!ret)
clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
&vport->state);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state); &vport->state);
} else
}
for (i = 1; i < hdev->num_alloc_vport; i++) {
bool uc_en = false;
bool mc_en = false;
bool bc_en;
vport = &hdev->vport[i];
if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
&vport->state))
continue;
if (vport->vf_info.trusted) {
uc_en = vport->vf_info.request_uc_en > 0 ||
vport->overflow_promisc_flags &
HNAE3_OVERFLOW_UPE;
mc_en = vport->vf_info.request_mc_en > 0 ||
vport->overflow_promisc_flags &
HNAE3_OVERFLOW_MPE;
}
bc_en = vport->vf_info.request_bc_en > 0;
ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
mc_en, bc_en);
if (ret) {
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
&vport->state); &vport->state);
return ret;
}
/* for VF */
if (vport->vf_info.trusted) {
uc_en = vport->vf_info.request_uc_en > 0 ||
vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
mc_en = vport->vf_info.request_mc_en > 0 ||
vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
}
bc_en = vport->vf_info.request_bc_en > 0;
ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
mc_en, bc_en);
if (ret) {
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
return ret;
}
hclge_set_vport_vlan_fltr_change(vport);
return 0;
}
static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
{
struct hclge_vport *vport;
int ret;
u16 i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
ret = hclge_sync_vport_promisc_mode(vport);
if (ret)
return; return;
}
hclge_set_vport_vlan_fltr_change(vport);
} }
} }

View File

@@ -3258,7 +3258,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
struct pci_dev *pdev = hdev->pdev; struct pci_dev *pdev = hdev->pdev;
int ret = 0; int ret = 0;
if (hdev->reset_type == HNAE3_VF_FULL_RESET && if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
hdev->reset_type == HNAE3_FLR_RESET) &&
test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev); hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev); hclgevf_uninit_msi(hdev);

View File

@@ -962,6 +962,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
rbpool = cq->rbpool; rbpool = cq->rbpool;
free_ptrs = cq->pool_ptrs; free_ptrs = cq->pool_ptrs;
get_cpu();
while (cq->pool_ptrs) { while (cq->pool_ptrs) {
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the /* Schedule a WQ if we fails to free atleast half of the
@@ -981,6 +982,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--; cq->pool_ptrs--;
} }
put_cpu();
cq->refill_task_sched = false; cq->refill_task_sched = false;
} }
@@ -1314,6 +1316,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
if (err) if (err)
goto fail; goto fail;
get_cpu();
/* Allocate pointers and free them to aura/pool */ /* Allocate pointers and free them to aura/pool */
for (qidx = 0; qidx < hw->tx_queues; qidx++) { for (qidx = 0; qidx < hw->tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
@@ -1322,18 +1325,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
sq = &qset->sq[qidx]; sq = &qset->sq[qidx];
sq->sqb_count = 0; sq->sqb_count = 0;
sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
if (!sq->sqb_ptrs) if (!sq->sqb_ptrs) {
return -ENOMEM; err = -ENOMEM;
goto err_mem;
}
for (ptr = 0; ptr < num_sqbs; ptr++) { for (ptr = 0; ptr < num_sqbs; ptr++) {
if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
return -ENOMEM; if (err)
goto err_mem;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
} }
} }
return 0; err_mem:
put_cpu();
return err ? -ENOMEM : 0;
fail: fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf); otx2_aura_pool_free(pfvf);
@@ -1372,18 +1381,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
if (err) if (err)
goto fail; goto fail;
get_cpu();
/* Allocate pointers and free them to aura/pool */ /* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id]; pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) { for (ptr = 0; ptr < num_ptrs; ptr++) {
if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
return -ENOMEM; if (err)
goto err_mem;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM); bufptr + OTX2_HEAD_ROOM);
} }
} }
err_mem:
return 0; put_cpu();
return err ? -ENOMEM : 0;
fail: fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf); otx2_aura_pool_free(pfvf);

Some files were not shown because too many files have changed in this diff Show More