Merge 5.15.13 into android13-5.15

Changes in 5.15.13
	Input: i8042 - add deferred probe support
	Input: i8042 - enable deferred probe quirk for ASUS UM325UA
	tomoyo: Check exceeded quota early in tomoyo_domain_quota_is_ok().
	tomoyo: use hwight16() in tomoyo_domain_quota_is_ok()
	net/sched: Extend qdisc control block with tc control block
	parisc: Clear stale IIR value on instruction access rights trap
	platform/mellanox: mlxbf-pmc: Fix an IS_ERR() vs NULL bug in mlxbf_pmc_map_counters
	platform/x86: apple-gmux: use resource_size() with res
	memblock: fix memblock_phys_alloc() section mismatch error
	ALSA: hda: intel-sdw-acpi: harden detection of controller
	ALSA: hda: intel-sdw-acpi: go through HDAS ACPI at max depth of 2
	recordmcount.pl: fix typo in s390 mcount regex
	powerpc/ptdump: Fix DEBUG_WX since generic ptdump conversion
	efi: Move efifb_setup_from_dmi() prototype from arch headers
	selinux: initialize proto variable in selinux_ip_postroute_compat()
	scsi: lpfc: Terminate string in lpfc_debugfs_nvmeio_trc_write()
	net/mlx5: DR, Fix NULL vs IS_ERR checking in dr_domain_init_resources
	net/mlx5: Fix error print in case of IRQ request failed
	net/mlx5: Fix SF health recovery flow
	net/mlx5: Fix tc max supported prio for nic mode
	net/mlx5e: Wrap the tx reporter dump callback to extract the sq
	net/mlx5e: Fix interoperability between XSK and ICOSQ recovery flow
	net/mlx5e: Fix ICOSQ recovery flow for XSK
	net/mlx5e: Use tc sample stubs instead of ifdefs in source file
	net/mlx5e: Delete forward rule for ct or sample action
	udp: using datalen to cap ipv6 udp max gso segments
	selftests: Calculate udpgso segment count without header adjustment
	sctp: use call_rcu to free endpoint
	net/smc: fix using of uninitialized completions
	net: usb: pegasus: Do not drop long Ethernet frames
	net: ag71xx: Fix a potential double free in error handling paths
	net: lantiq_xrx200: fix statistics of received bytes
	NFC: st21nfca: Fix memory leak in device probe and remove
	net/smc: don't send CDC/LLC message if link not ready
	net/smc: fix kernel panic caused by race of smc_sock
	igc: Do not enable crosstimestamping for i225-V models
	igc: Fix TX timestamp support for non-MSI-X platforms
	drm/amd/display: Send s0i2_rdy in stream_count == 0 optimization
	drm/amd/display: Set optimize_pwr_state for DCN31
	ionic: Initialize the 'lif->dbid_inuse' bitmap
	net/mlx5e: Fix wrong features assignment in case of error
	net: bridge: mcast: add and enforce query interval minimum
	net: bridge: mcast: add and enforce startup query interval minimum
	selftests/net: udpgso_bench_tx: fix dst ip argument
	selftests: net: Fix a typo in udpgro_fwd.sh
	net: bridge: mcast: fix br_multicast_ctx_vlan_global_disabled helper
	net/ncsi: check for error return from call to nla_put_u32
	selftests: net: using ping6 for IPv6 in udpgro_fwd.sh
	fsl/fman: Fix missing put_device() call in fman_port_probe
	i2c: validate user data in compat ioctl
	nfc: uapi: use kernel size_t to fix user-space builds
	uapi: fix linux/nfc.h userspace compilation errors
	drm/nouveau: wait for the exclusive fence after the shared ones v2
	drm/amdgpu: When the VCN(1.0) block is suspended, powergating is explicitly enabled
	drm/amdgpu: add support for IP discovery gc_info table v2
	drm/amd/display: Changed pipe split policy to allow for multi-display pipe split
	xhci: Fresco FL1100 controller should not have BROKEN_MSI quirk set.
	usb: gadget: f_fs: Clear ffs_eventfd in ffs_data_clear.
	usb: mtu3: add memory barrier before set GPD's HWO
	usb: mtu3: fix list_head check warning
	usb: mtu3: set interval of FS intr and isoc endpoint
	nitro_enclaves: Use get_user_pages_unlocked() call to handle mmap assert
	binder: fix async_free_space accounting for empty parcels
	scsi: vmw_pvscsi: Set residual data length conditionally
	Input: appletouch - initialize work before device registration
	Input: spaceball - fix parsing of movement data packets
	mm/damon/dbgfs: fix 'struct pid' leaks in 'dbgfs_target_ids_write()'
	net: fix use-after-free in tw_timer_handler
	fs/mount_setattr: always cleanup mount_kattr
	perf intel-pt: Fix parsing of VM time correlation arguments
	perf script: Fix CPU filtering of a script's switch events
	perf scripts python: intel-pt-events.py: Fix printing of switch events
	Linux 5.15.13

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie0d5130643ce211d5e21ea9d11bc5577efeddc90
This commit is contained in:
Greg Kroah-Hartman
2022-01-05 15:36:44 +01:00
102 changed files with 746 additions and 385 deletions

View File

@@ -1694,6 +1694,8 @@
architectures force reset to be always executed architectures force reset to be always executed
i8042.unlock [HW] Unlock (ignore) the keylock i8042.unlock [HW] Unlock (ignore) the keylock
i8042.kbdreset [HW] Reset device connected to KBD port i8042.kbdreset [HW] Reset device connected to KBD port
i8042.probe_defer
[HW] Allow deferred probing upon i8042 probe errors
i810= [HW,DRM] i810= [HW,DRM]

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 12 SUBLEVEL = 13
EXTRAVERSION = EXTRAVERSION =
NAME = Trick or Treat NAME = Trick or Treat

View File

@@ -17,7 +17,6 @@
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
void efi_init(void); void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);

View File

@@ -14,7 +14,6 @@
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
extern void efi_init(void); extern void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else #else
#define efi_init() #define efi_init()
#endif #endif

View File

@@ -729,6 +729,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
} }
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
} }
/* CPU could not fetch instruction, so clear stale IIR value. */
regs->iir = 0xbaadf00d;
fallthrough; fallthrough;
case 27: case 27:
/* Data memory protection ID trap */ /* Data memory protection ID trap */

View File

@@ -183,7 +183,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
{ {
pte_t pte = __pte(st->current_flags); pte_t pte = __pte(st->current_flags);
if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
return; return;
if (!pte_write(pte) || !pte_exec(pte)) if (!pte_write(pte) || !pte_exec(pte))

View File

@@ -13,7 +13,6 @@
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
extern void efi_init(void); extern void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else #else
#define efi_init() #define efi_init()
#endif #endif

View File

@@ -197,8 +197,6 @@ static inline bool efi_runtime_supported(void)
extern void parse_efi_setup(u64 phys_addr, u32 data_len); extern void parse_efi_setup(u64 phys_addr, u32 data_len);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
extern void efi_thunk_runtime_setup(void); extern void efi_thunk_runtime_setup(void);
efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
unsigned long descriptor_size, unsigned long descriptor_size,

View File

@@ -671,7 +671,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) { if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer); alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n", "%d: binder_free_buf size %zd async free %zd\n",

View File

@@ -415,10 +415,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
} }
} }
union gc_info {
struct gc_info_v1_0 v1;
struct gc_info_v2_0 v2;
};
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{ {
struct binary_header *bhdr; struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info; union gc_info *gc_info;
if (!adev->mman.discovery_bin) { if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n"); DRM_ERROR("ip discovery uninitialized\n");
@@ -426,27 +431,54 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
} }
bhdr = (struct binary_header *)adev->mman.discovery_bin; bhdr = (struct binary_header *)adev->mman.discovery_bin;
gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin + gc_info = (union gc_info *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset)); le16_to_cpu(bhdr->table_list[GC].offset));
switch (gc_info->v1.header.version_major) {
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se); case 1:
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) + adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
le32_to_cpu(gc_info->gc_num_wgp1_per_sa)); adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se); le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se); adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c); adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs); adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds); adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth); adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth); adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer); adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size); adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd); adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu); adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size); adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) / adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
le32_to_cpu(gc_info->gc_num_sa_per_se); adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc); le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
break;
case 2:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
break;
default:
dev_err(adev->dev,
"Unhandled GC info table %d.%d\n",
gc_info->v1.header.version_major,
gc_info->v1.header.version_minor);
return -EINVAL;
}
return 0; return 0;
} }

View File

@@ -253,6 +253,13 @@ static int vcn_v1_0_suspend(void *handle)
{ {
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool idle_work_unexecuted;
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
}
r = vcn_v1_0_hw_fini(adev); r = vcn_v1_0_hw_fini(adev);
if (r) if (r)

View File

@@ -157,6 +157,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
union display_idle_optimization_u idle_info = { 0 }; union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1; idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1; idle_info.idle_info.phy_ref_clk_off = 1;
idle_info.idle_info.s0i2_rdy = 1;
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data); dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */ /* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;

View File

@@ -1067,7 +1067,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000, .min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@@ -863,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true, .disable_clock_gate = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.disable_pplib_wm_range = true, .disable_pplib_wm_range = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.z10_save_init = dcn31_z10_save_init, .z10_save_init = dcn31_z10_save_init,
.is_abm_supported = dcn31_is_abm_supported, .is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn20_update_visual_confirm_color, .update_visual_confirm_color = dcn20_update_visual_confirm_color,
}; };

View File

@@ -923,7 +923,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = false, .disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_AVOID, .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,

View File

@@ -143,6 +143,55 @@ struct gc_info_v1_0 {
uint32_t gc_num_gl2a; uint32_t gc_num_gl2a;
}; };
struct gc_info_v1_1 {
struct gpu_info_header header;
uint32_t gc_num_se;
uint32_t gc_num_wgp0_per_sa;
uint32_t gc_num_wgp1_per_sa;
uint32_t gc_num_rb_per_se;
uint32_t gc_num_gl2c;
uint32_t gc_num_gprs;
uint32_t gc_num_max_gs_thds;
uint32_t gc_gs_table_depth;
uint32_t gc_gsprim_buff_depth;
uint32_t gc_parameter_cache_depth;
uint32_t gc_double_offchip_lds_buffer;
uint32_t gc_wave_size;
uint32_t gc_max_waves_per_simd;
uint32_t gc_max_scratch_slots_per_cu;
uint32_t gc_lds_size;
uint32_t gc_num_sc_per_se;
uint32_t gc_num_sa_per_se;
uint32_t gc_num_packer_per_sc;
uint32_t gc_num_gl2a;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;
uint32_t gc_num_tcps;
};
struct gc_info_v2_0 {
struct gpu_info_header header;
uint32_t gc_num_se;
uint32_t gc_num_cu_per_sh;
uint32_t gc_num_sh_per_se;
uint32_t gc_num_rb_per_se;
uint32_t gc_num_tccs;
uint32_t gc_num_gprs;
uint32_t gc_num_max_gs_thds;
uint32_t gc_gs_table_depth;
uint32_t gc_gsprim_buff_depth;
uint32_t gc_parameter_cache_depth;
uint32_t gc_double_offchip_lds_buffer;
uint32_t gc_wave_size;
uint32_t gc_max_waves_per_simd;
uint32_t gc_max_scratch_slots_per_cu;
uint32_t gc_lds_size;
uint32_t gc_num_sc_per_se;
uint32_t gc_num_packer_per_sc;
};
typedef struct harvest_info_header { typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */ uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */ uint32_t version; /* Table Version */

View File

@@ -353,34 +353,16 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
if (ret) if (ret)
return ret; return ret;
}
fobj = NULL;
} else {
fobj = dma_resv_shared_list(resv); fobj = dma_resv_shared_list(resv);
fence = dma_resv_excl_fence(resv);
if (fence) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;
f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
prev = rcu_dereference(f->channel);
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
} }
if (must_wait) /* Waiting for the exclusive fence first causes performance regressions
ret = dma_fence_wait(fence, intr); * under some circumstances. So manually wait for the shared ones first.
*/
return ret; for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
}
if (!exclusive || !fobj)
return ret;
for (i = 0; i < fobj->shared_count && !ret; ++i) {
struct nouveau_channel *prev = NULL; struct nouveau_channel *prev = NULL;
bool must_wait = true; bool must_wait = true;
@@ -400,6 +382,26 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
ret = dma_fence_wait(fence, intr); ret = dma_fence_wait(fence, intr);
} }
fence = dma_resv_excl_fence(resv);
if (fence) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;
f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
prev = rcu_dereference(f->channel);
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
}
if (must_wait)
ret = dma_fence_wait(fence, intr);
return ret;
}
return ret; return ret;
} }

View File

@@ -535,6 +535,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
sizeof(rdwr_arg))) sizeof(rdwr_arg)))
return -EFAULT; return -EFAULT;
if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
return -EINVAL;
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL; return -EINVAL;

View File

@@ -19,6 +19,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/input.h> #include <linux/input.h>
#include <linux/serio.h> #include <linux/serio.h>
#include <asm/unaligned.h>
#define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver" #define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver"
@@ -75,9 +76,15 @@ static void spaceball_process_packet(struct spaceball* spaceball)
case 'D': /* Ball data */ case 'D': /* Ball data */
if (spaceball->idx != 15) return; if (spaceball->idx != 15) return;
for (i = 0; i < 6; i++) /*
* Skip first three bytes; read six axes worth of data.
* Axis values are signed 16-bit big-endian.
*/
data += 3;
for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) {
input_report_abs(dev, spaceball_axes[i], input_report_abs(dev, spaceball_axes[i],
(__s16)((data[2 * i + 3] << 8) | data[2 * i + 2])); (__s16)get_unaligned_be16(&data[i * 2]));
}
break; break;
case 'K': /* Button data */ case 'K': /* Button data */

View File

@@ -916,6 +916,8 @@ static int atp_probe(struct usb_interface *iface,
set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
set_bit(BTN_LEFT, input_dev->keybit); set_bit(BTN_LEFT, input_dev->keybit);
INIT_WORK(&dev->work, atp_reinit);
error = input_register_device(dev->input); error = input_register_device(dev->input);
if (error) if (error)
goto err_free_buffer; goto err_free_buffer;
@@ -923,8 +925,6 @@ static int atp_probe(struct usb_interface *iface,
/* save our data pointer in this interface device */ /* save our data pointer in this interface device */
usb_set_intfdata(iface, dev); usb_set_intfdata(iface, dev);
INIT_WORK(&dev->work, atp_reinit);
return 0; return 0;
err_free_buffer: err_free_buffer:

View File

@@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{ } { }
}; };
static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
{
/* ASUS ZenBook UX425UA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
},
},
{
/* ASUS ZenBook UM325UA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
},
},
{ }
};
#endif /* CONFIG_X86 */ #endif /* CONFIG_X86 */
#ifdef CONFIG_PNP #ifdef CONFIG_PNP
@@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_kbdreset_table)) if (dmi_check_system(i8042_dmi_kbdreset_table))
i8042_kbdreset = true; i8042_kbdreset = true;
if (dmi_check_system(i8042_dmi_probe_defer_table))
i8042_probe_defer = true;
/* /*
* A20 was already enabled during early kernel init. But some buggy * A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to

View File

@@ -45,6 +45,10 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0); module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock."); MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
static bool i8042_probe_defer;
module_param_named(probe_defer, i8042_probe_defer, bool, 0);
MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
enum i8042_controller_reset_mode { enum i8042_controller_reset_mode {
I8042_RESET_NEVER, I8042_RESET_NEVER,
I8042_RESET_ALWAYS, I8042_RESET_ALWAYS,
@@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
* LCS/Telegraphics. * LCS/Telegraphics.
*/ */
static int __init i8042_check_mux(void) static int i8042_check_mux(void)
{ {
unsigned char mux_version; unsigned char mux_version;
@@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
/* /*
* The following is used to test AUX IRQ delivery. * The following is used to test AUX IRQ delivery.
*/ */
static struct completion i8042_aux_irq_delivered __initdata; static struct completion i8042_aux_irq_delivered;
static bool i8042_irq_being_tested __initdata; static bool i8042_irq_being_tested;
static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id) static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
{ {
unsigned long flags; unsigned long flags;
unsigned char str, data; unsigned char str, data;
@@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
* verifies success by readinng CTR. Used when testing for presence of AUX * verifies success by readinng CTR. Used when testing for presence of AUX
* port. * port.
*/ */
static int __init i8042_toggle_aux(bool on) static int i8042_toggle_aux(bool on)
{ {
unsigned char param; unsigned char param;
int i; int i;
@@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
* the presence of an AUX interface. * the presence of an AUX interface.
*/ */
static int __init i8042_check_aux(void) static int i8042_check_aux(void)
{ {
int retval = -1; int retval = -1;
bool irq_registered = false; bool irq_registered = false;
@@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) { if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
pr_err("Can't read CTR while initializing i8042\n"); pr_err("Can't read CTR while initializing i8042\n");
return -EIO; return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
} }
} while (n < 2 || ctr[0] != ctr[1]); } while (n < 2 || ctr[0] != ctr[1]);
@@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
i8042_controller_reset(false); i8042_controller_reset(false);
} }
static int __init i8042_create_kbd_port(void) static int i8042_create_kbd_port(void)
{ {
struct serio *serio; struct serio *serio;
struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO]; struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
@@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
return 0; return 0;
} }
static int __init i8042_create_aux_port(int idx) static int i8042_create_aux_port(int idx)
{ {
struct serio *serio; struct serio *serio;
int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx; int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
@@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
return 0; return 0;
} }
static void __init i8042_free_kbd_port(void) static void i8042_free_kbd_port(void)
{ {
kfree(i8042_ports[I8042_KBD_PORT_NO].serio); kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
i8042_ports[I8042_KBD_PORT_NO].serio = NULL; i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
} }
static void __init i8042_free_aux_ports(void) static void i8042_free_aux_ports(void)
{ {
int i; int i;
@@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
} }
} }
static void __init i8042_register_ports(void) static void i8042_register_ports(void)
{ {
int i; int i;
@@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
i8042_aux_irq_registered = i8042_kbd_irq_registered = false; i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
} }
static int __init i8042_setup_aux(void) static int i8042_setup_aux(void)
{ {
int (*aux_enable)(void); int (*aux_enable)(void);
int error; int error;
@@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
return error; return error;
} }
static int __init i8042_setup_kbd(void) static int i8042_setup_kbd(void)
{ {
int error; int error;
@@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
return 0; return 0;
} }
static int __init i8042_probe(struct platform_device *dev) static int i8042_probe(struct platform_device *dev)
{ {
int error; int error;
@@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
.pm = &i8042_pm_ops, .pm = &i8042_pm_ops,
#endif #endif
}, },
.probe = i8042_probe,
.remove = i8042_remove, .remove = i8042_remove,
.shutdown = i8042_shutdown, .shutdown = i8042_shutdown,
}; };
@@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
static int __init i8042_init(void) static int __init i8042_init(void)
{ {
struct platform_device *pdev;
int err; int err;
dbg_init(); dbg_init();
@@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
/* Set this before creating the dev to allow i8042_command to work right away */ /* Set this before creating the dev to allow i8042_command to work right away */
i8042_present = true; i8042_present = true;
pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0); err = platform_driver_register(&i8042_driver);
if (IS_ERR(pdev)) { if (err)
err = PTR_ERR(pdev);
goto err_platform_exit; goto err_platform_exit;
i8042_platform_device = platform_device_alloc("i8042", -1);
if (!i8042_platform_device) {
err = -ENOMEM;
goto err_unregister_driver;
} }
err = platform_device_add(i8042_platform_device);
if (err)
goto err_free_device;
bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block); bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
panic_blink = i8042_panic_blink; panic_blink = i8042_panic_blink;
return 0; return 0;
err_free_device:
platform_device_put(i8042_platform_device);
err_unregister_driver:
platform_driver_unregister(&i8042_driver);
err_platform_exit: err_platform_exit:
i8042_platform_exit(); i8042_platform_exit();
return err; return err;

View File

@@ -1915,15 +1915,12 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
if (IS_ERR(ag->mac_reset)) { if (IS_ERR(ag->mac_reset)) {
netif_err(ag, probe, ndev, "missing mac reset\n"); netif_err(ag, probe, ndev, "missing mac reset\n");
err = PTR_ERR(ag->mac_reset); return PTR_ERR(ag->mac_reset);
goto err_free;
} }
ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ag->mac_base) { if (!ag->mac_base)
err = -ENOMEM; return -ENOMEM;
goto err_free;
}
ndev->irq = platform_get_irq(pdev, 0); ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1931,7 +1928,7 @@ static int ag71xx_probe(struct platform_device *pdev)
if (err) { if (err) {
netif_err(ag, probe, ndev, "unable to request IRQ %d\n", netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
ndev->irq); ndev->irq);
goto err_free; return err;
} }
ndev->netdev_ops = &ag71xx_netdev_ops; ndev->netdev_ops = &ag71xx_netdev_ops;
@@ -1959,10 +1956,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev, ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc), sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL); &ag->stop_desc_dma, GFP_KERNEL);
if (!ag->stop_desc) { if (!ag->stop_desc)
err = -ENOMEM; return -ENOMEM;
goto err_free;
}
ag->stop_desc->data = 0; ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0; ag->stop_desc->ctrl = 0;
@@ -1977,7 +1972,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = of_get_phy_mode(np, &ag->phy_if_mode); err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) { if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
goto err_free; return err;
} }
netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
@@ -1985,7 +1980,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = clk_prepare_enable(ag->clk_eth); err = clk_prepare_enable(ag->clk_eth);
if (err) { if (err) {
netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
goto err_free; return err;
} }
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
@@ -2021,8 +2016,6 @@ err_mdio_remove:
ag71xx_mdio_remove(ag); ag71xx_mdio_remove(ag);
err_put_clk: err_put_clk:
clk_disable_unprepare(ag->clk_eth); clk_disable_unprepare(ag->clk_eth);
err_free:
free_netdev(ndev);
return err; return err;
} }

View File

@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
fman = dev_get_drvdata(&fm_pdev->dev); fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) { if (!fman) {
err = -EINVAL; err = -EINVAL;
goto return_err; goto put_device;
} }
err = of_property_read_u32(port_node, "cell-index", &val); err = of_property_read_u32(port_node, "cell-index", &val);
@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: reading cell-index for %pOF failed\n", dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
__func__, port_node); __func__, port_node);
err = -EINVAL; err = -EINVAL;
goto return_err; goto put_device;
} }
port_id = (u8)val; port_id = (u8)val;
port->dts_params.id = port_id; port->dts_params.id = port_id;
@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else { } else {
dev_err(port->dev, "%s: Illegal port type\n", __func__); dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL; err = -EINVAL;
goto return_err; goto put_device;
} }
port->dts_params.type = port_type; port->dts_params.type = port_type;
@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: incorrect qman-channel-id\n", dev_err(port->dev, "%s: incorrect qman-channel-id\n",
__func__); __func__);
err = -EINVAL; err = -EINVAL;
goto return_err; goto put_device;
} }
port->dts_params.qman_channel_id = qman_channel_id; port->dts_params.qman_channel_id = qman_channel_id;
} }
@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: of_address_to_resource() failed\n", dev_err(port->dev, "%s: of_address_to_resource() failed\n",
__func__); __func__);
err = -ENOMEM; err = -ENOMEM;
goto return_err; goto put_device;
} }
port->dts_params.fman = fman; port->dts_params.fman = fman;
@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
return 0; return 0;
put_device:
put_device(&fm_pdev->dev);
return_err: return_err:
of_node_put(port_node); of_node_put(port_node);
free_port: free_port:

View File

@@ -5466,6 +5466,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1); mod_timer(&adapter->watchdog_timer, jiffies + 1);
} }
if (icr & IGC_ICR_TS)
igc_tsync_interrupt(adapter);
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
return IRQ_HANDLED; return IRQ_HANDLED;
@@ -5509,6 +5512,9 @@ static irqreturn_t igc_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1); mod_timer(&adapter->watchdog_timer, jiffies + 1);
} }
if (icr & IGC_ICR_TS)
igc_tsync_interrupt(adapter);
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
return IRQ_HANDLED; return IRQ_HANDLED;

View File

@@ -768,7 +768,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
*/ */
static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
{ {
return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false; if (!IS_ENABLED(CONFIG_X86_TSC))
return false;
/* FIXME: it was noticed that enabling support for PCIe PTM in
* some i225-V models could cause lockups when bringing the
* interface up/down. There should be no downsides to
* disabling crosstimestamping support for i225-V, as it
* doesn't have any PTP support. That way we gain some time
* while root causing the issue.
*/
if (adapter->pdev->device == IGC_DEV_ID_I225_V)
return false;
return pcie_ptm_enabled(adapter->pdev);
} }
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)

View File

@@ -209,7 +209,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
skb->protocol = eth_type_trans(skb, net_dev); skb->protocol = eth_type_trans(skb, net_dev);
netif_receive_skb(skb); netif_receive_skb(skb);
net_dev->stats.rx_packets++; net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += len - ETH_FCS_LEN; net_dev->stats.rx_bytes += len;
return 0; return 0;
} }

View File

@@ -727,6 +727,8 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix; int ix;
int cpu; int cpu;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
}; };
struct mlx5e_ptp; struct mlx5e_ptp;
@@ -954,9 +956,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq); void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param; struct mlx5e_sq_param;
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect); struct mlx5e_xdpsq *sq, bool is_redirect);

View File

@@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256 #define MLX5E_REPORTER_PER_Q_MAX_LEN 256
#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000 #define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000

View File

@@ -647,9 +647,7 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk
"Failed to restore tunnel info for sampled packet\n"); "Failed to restore tunnel info for sampled packet\n");
return; return;
} }
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_skb(skb, mapped_obj); mlx5e_tc_sample_skb(skb, mapped_obj);
#endif /* CONFIG_MLX5_TC_SAMPLE */
mlx5_rep_tc_post_napi_receive(tc_priv); mlx5_rep_tc_post_napi_receive(tc_priv);
} }

View File

@@ -59,6 +59,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
{ {
struct mlx5e_rq *xskrq = NULL;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5e_icosq *icosq; struct mlx5e_icosq *icosq;
struct net_device *dev; struct net_device *dev;
@@ -67,7 +68,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
int err; int err;
icosq = ctx; icosq = ctx;
mutex_lock(&icosq->channel->icosq_recovery_lock);
/* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
rq = &icosq->channel->rq; rq = &icosq->channel->rq;
if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
xskrq = &icosq->channel->xskrq;
mdev = icosq->channel->mdev; mdev = icosq->channel->mdev;
dev = icosq->channel->netdev; dev = icosq->channel->netdev;
err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state); err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
@@ -81,6 +88,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out; goto out;
mlx5e_deactivate_rq(rq); mlx5e_deactivate_rq(rq);
if (xskrq)
mlx5e_deactivate_rq(xskrq);
err = mlx5e_wait_for_icosq_flush(icosq); err = mlx5e_wait_for_icosq_flush(icosq);
if (err) if (err)
goto out; goto out;
@@ -94,15 +104,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out; goto out;
mlx5e_reset_icosq_cc_pc(icosq); mlx5e_reset_icosq_cc_pc(icosq);
mlx5e_free_rx_in_progress_descs(rq); mlx5e_free_rx_in_progress_descs(rq);
if (xskrq)
mlx5e_free_rx_in_progress_descs(xskrq);
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mlx5e_activate_icosq(icosq); mlx5e_activate_icosq(icosq);
mlx5e_activate_rq(rq);
mlx5e_activate_rq(rq);
rq->stats->recover++; rq->stats->recover++;
if (xskrq) {
mlx5e_activate_rq(xskrq);
xskrq->stats->recover++;
}
mutex_unlock(&icosq->channel->icosq_recovery_lock);
return 0; return 0;
out: out:
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mutex_unlock(&icosq->channel->icosq_recovery_lock);
return err; return err;
} }
@@ -703,6 +726,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
} }
void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
{
mutex_lock(&c->icosq_recovery_lock);
}
void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
{
mutex_unlock(&c->icosq_recovery_lock);
}
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx", .name = "rx",
.recover = mlx5e_rx_reporter_recover, .recover = mlx5e_rx_reporter_recover,

View File

@@ -463,6 +463,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
return mlx5e_health_fmsg_named_obj_nest_end(fmsg); return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
} }
static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
void *ctx)
{
struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
}
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg) struct devlink_fmsg *fmsg)
{ {
@@ -558,7 +566,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
to_ctx.sq = sq; to_ctx.sq = sq;
err_ctx.ctx = &to_ctx; err_ctx.ctx = &to_ctx;
err_ctx.recover = mlx5e_tx_reporter_timeout_recover; err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
err_ctx.dump = mlx5e_tx_reporter_dump_sq; err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
snprintf(err_str, sizeof(err_str), snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u", "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,

View File

@@ -19,6 +19,8 @@ struct mlx5e_sample_attr {
struct mlx5e_sample_flow *sample_flow; struct mlx5e_sample_flow *sample_flow;
}; };
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
struct mlx5_flow_handle * struct mlx5_flow_handle *
@@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act);
void void
mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample); mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
#else /* CONFIG_MLX5_TC_SAMPLE */
static inline struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
u32 tunnel_id)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void
mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr) {}
static inline struct mlx5e_tc_psample *
mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void
mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {}
static inline void
mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {}
#endif /* CONFIG_MLX5_TC_SAMPLE */
#endif /* __MLX5_EN_TC_SAMPLE_H__ */ #endif /* __MLX5_EN_TC_SAMPLE_H__ */

View File

@@ -4,6 +4,7 @@
#include "setup.h" #include "setup.h"
#include "en/params.h" #include "en/params.h"
#include "en/txrx.h" #include "en/txrx.h"
#include "en/health.h"
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding * change unexpectedly, and mlx5e has a minimum valid stride size for striding
@@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
void mlx5e_activate_xsk(struct mlx5e_channel *c) void mlx5e_activate_xsk(struct mlx5e_channel *c)
{ {
/* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
* activating XSKRQ in the middle of recovery.
*/
mlx5e_reporter_icosq_suspend_recovery(c);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
mlx5e_reporter_icosq_resume_recovery(c);
/* TX queue is created active. */ /* TX queue is created active. */
spin_lock_bh(&c->async_icosq_lock); spin_lock_bh(&c->async_icosq_lock);
@@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
void mlx5e_deactivate_xsk(struct mlx5e_channel *c) void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{ {
mlx5e_deactivate_rq(&c->xskrq); /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
* middle of recovery. Suspend the recovery to avoid it.
*/
mlx5e_reporter_icosq_suspend_recovery(c);
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
mlx5e_reporter_icosq_resume_recovery(c);
synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
/* TX queue is disabled on close. */ /* TX queue is disabled on close. */
} }

View File

@@ -911,8 +911,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq)
{ {
cancel_work_sync(&rq->dim.work); cancel_work_sync(&rq->dim.work);
if (rq->icosq)
cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work); cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq); mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq); mlx5e_free_rx_descs(rq);
@@ -1038,9 +1036,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_icosq_cqe_err(sq); mlx5e_reporter_icosq_cqe_err(sq);
} }
static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
{
struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
recover_work);
/* Not implemented yet. */
netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
}
static int mlx5e_alloc_icosq(struct mlx5e_channel *c, static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param, struct mlx5e_sq_param *param,
struct mlx5e_icosq *sq) struct mlx5e_icosq *sq,
work_func_t recover_work_func)
{ {
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev; struct mlx5_core_dev *mdev = c->mdev;
@@ -1061,7 +1070,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
if (err) if (err)
goto err_sq_wq_destroy; goto err_sq_wq_destroy;
INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); INIT_WORK(&sq->recover_work, recover_work_func);
return 0; return 0;
@@ -1399,13 +1408,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_tx_err_cqe(sq); mlx5e_reporter_tx_err_cqe(sq);
} }
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
work_func_t recover_work_func)
{ {
struct mlx5e_create_sq_param csp = {}; struct mlx5e_create_sq_param csp = {};
int err; int err;
err = mlx5e_alloc_icosq(c, param, sq); err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
if (err) if (err)
return err; return err;
@@ -1444,7 +1454,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
synchronize_net(); /* Sync with NAPI. */ synchronize_net(); /* Sync with NAPI. */
} }
void mlx5e_close_icosq(struct mlx5e_icosq *sq) static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{ {
struct mlx5e_channel *c = sq->channel; struct mlx5e_channel *c = sq->channel;
@@ -1871,11 +1881,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
spin_lock_init(&c->async_icosq_lock); spin_lock_init(&c->async_icosq_lock);
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
mlx5e_async_icosq_err_cqe_work);
if (err) if (err)
goto err_close_xdpsq_cq; goto err_close_xdpsq_cq;
err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); mutex_init(&c->icosq_recovery_lock);
err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
mlx5e_icosq_err_cqe_work);
if (err) if (err)
goto err_close_async_icosq; goto err_close_async_icosq;
@@ -1943,9 +1957,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_xdpsq(&c->xdpsq); mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp) if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq); mlx5e_close_xdpsq(&c->rq_xdpsq);
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
cancel_work_sync(&c->icosq.recover_work);
mlx5e_close_rq(&c->rq); mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c); mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq); mlx5e_close_icosq(&c->icosq);
mutex_destroy(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->async_icosq); mlx5e_close_icosq(&c->async_icosq);
if (c->xdp) if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq); mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -3433,12 +3450,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
static int mlx5e_handle_feature(struct net_device *netdev, static int mlx5e_handle_feature(struct net_device *netdev,
netdev_features_t *features, netdev_features_t *features,
netdev_features_t wanted_features,
netdev_features_t feature, netdev_features_t feature,
mlx5e_feature_handler feature_handler) mlx5e_feature_handler feature_handler)
{ {
netdev_features_t changes = wanted_features ^ netdev->features; netdev_features_t changes = *features ^ netdev->features;
bool enable = !!(wanted_features & feature); bool enable = !!(*features & feature);
int err; int err;
if (!(changes & feature)) if (!(changes & feature))
@@ -3446,22 +3462,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
err = feature_handler(netdev, enable); err = feature_handler(netdev, enable);
if (err) { if (err) {
MLX5E_SET_FEATURE(features, feature, !enable);
netdev_err(netdev, "%s feature %pNF failed, err %d\n", netdev_err(netdev, "%s feature %pNF failed, err %d\n",
enable ? "Enable" : "Disable", &feature, err); enable ? "Enable" : "Disable", &feature, err);
return err; return err;
} }
MLX5E_SET_FEATURE(features, feature, enable);
return 0; return 0;
} }
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
{ {
netdev_features_t oper_features = netdev->features; netdev_features_t oper_features = features;
int err = 0; int err = 0;
#define MLX5E_HANDLE_FEATURE(feature, handler) \ #define MLX5E_HANDLE_FEATURE(feature, handler) \
mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) mlx5e_handle_feature(netdev, &oper_features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,

View File

@@ -248,7 +248,6 @@ get_ct_priv(struct mlx5e_priv *priv)
return priv->fs.tc.ct; return priv->fs.tc.ct;
} }
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
static struct mlx5e_tc_psample * static struct mlx5e_tc_psample *
get_sample_priv(struct mlx5e_priv *priv) get_sample_priv(struct mlx5e_priv *priv)
{ {
@@ -265,7 +264,6 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL; return NULL;
} }
#endif
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv, mlx5_tc_rule_insert(struct mlx5e_priv *priv,
@@ -1148,11 +1146,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr, flow, spec, attr,
mod_hdr_acts); mod_hdr_acts);
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
} else if (flow_flag_test(flow, SAMPLE)) { } else if (flow_flag_test(flow, SAMPLE)) {
rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr, rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
mlx5e_tc_get_flow_tun_id(flow)); mlx5e_tc_get_flow_tun_id(flow));
#endif
} else { } else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
} }
@@ -1183,21 +1179,14 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
goto offload_rule_0; goto offload_rule_0;
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
}
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
if (flow_flag_test(flow, SAMPLE)) {
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
return;
}
#endif
if (attr->esw_attr->split_count) if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
if (flow_flag_test(flow, CT))
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
else if (flow_flag_test(flow, SAMPLE))
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
else
offload_rule_0: offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
} }
@@ -5014,9 +5003,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
MLX5_FLOW_NAMESPACE_FDB, MLX5_FLOW_NAMESPACE_FDB,
uplink_priv->post_act); uplink_priv->post_act);
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
#endif
mapping_id = mlx5_query_nic_system_image_guid(esw->dev); mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
@@ -5060,9 +5047,7 @@ err_ht_init:
err_enc_opts_mapping: err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping); mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping: err_tun_mapping:
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv); mlx5_tc_ct_clean(uplink_priv->ct_priv);
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err); "Failed to initialize tc (eswitch), err: %d", err);
@@ -5082,9 +5067,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping); mapping_destroy(uplink_priv->tunnel_mapping);
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv); mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_tc_post_act_destroy(uplink_priv->post_act); mlx5e_tc_post_act_destroy(uplink_priv->post_act);
} }

View File

@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{ {
if (!mlx5_chains_prios_supported(chains))
return 1;
if (mlx5_chains_ignore_flow_level_supported(chains)) if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX; return UINT_MAX;

View File

@@ -1775,12 +1775,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
int mlx5_recover_device(struct mlx5_core_dev *dev) int mlx5_recover_device(struct mlx5_core_dev *dev)
{ {
int ret = -EIO; if (!mlx5_core_is_sf(dev)) {
mlx5_pci_disable_device(dev); mlx5_pci_disable_device(dev);
if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
ret = mlx5_load_one(dev); return -EIO;
return ret; }
return mlx5_load_one(dev);
} }
static struct pci_driver mlx5_core_driver = { static struct pci_driver mlx5_core_driver = {

View File

@@ -346,8 +346,8 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
new_irq = irq_pool_create_irq(pool, affinity); new_irq = irq_pool_create_irq(pool, affinity);
if (IS_ERR(new_irq)) { if (IS_ERR(new_irq)) {
if (!least_loaded_irq) { if (!least_loaded_irq) {
mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n", mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
cpumask_first(affinity)); PTR_ERR(new_irq));
mutex_unlock(&pool->lock); mutex_unlock(&pool->lock);
return new_irq; return new_irq;
} }

View File

@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */ /* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/mlx5/eswitch.h> #include <linux/mlx5/eswitch.h>
#include <linux/err.h>
#include "dr_types.h" #include "dr_types.h"
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
@@ -75,9 +76,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
} }
dmn->uar = mlx5_get_uars_page(dmn->mdev); dmn->uar = mlx5_get_uars_page(dmn->mdev);
if (!dmn->uar) { if (IS_ERR(dmn->uar)) {
mlx5dr_err(dmn, "Couldn't allocate UAR\n"); mlx5dr_err(dmn, "Couldn't allocate UAR\n");
ret = -ENOMEM; ret = PTR_ERR(dmn->uar);
goto clean_pd; goto clean_pd;
} }

View File

@@ -3283,7 +3283,7 @@ int ionic_lif_init(struct ionic_lif *lif)
return -EINVAL; return -EINVAL;
} }
lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
if (!lif->dbid_inuse) { if (!lif->dbid_inuse) {
dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
return -ENOMEM; return -ENOMEM;

View File

@@ -493,11 +493,11 @@ static void read_bulk_callback(struct urb *urb)
goto goon; goto goon;
rx_status = buf[count - 2]; rx_status = buf[count - 2];
if (rx_status & 0x1e) { if (rx_status & 0x1c) {
netif_dbg(pegasus, rx_err, net, netif_dbg(pegasus, rx_err, net,
"RX packet error %x\n", rx_status); "RX packet error %x\n", rx_status);
net->stats.rx_errors++; net->stats.rx_errors++;
if (rx_status & 0x06) /* long or runt */ if (rx_status & 0x04) /* runt */
net->stats.rx_length_errors++; net->stats.rx_length_errors++;
if (rx_status & 0x08) if (rx_status & 0x08)
net->stats.rx_crc_errors++; net->stats.rx_crc_errors++;

View File

@@ -528,7 +528,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(phy->gpiod_ena)) { if (IS_ERR(phy->gpiod_ena)) {
nfc_err(dev, "Unable to get ENABLE GPIO\n"); nfc_err(dev, "Unable to get ENABLE GPIO\n");
return PTR_ERR(phy->gpiod_ena); r = PTR_ERR(phy->gpiod_ena);
goto out_free;
} }
phy->se_status.is_ese_present = phy->se_status.is_ese_present =
@@ -539,7 +540,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
r = st21nfca_hci_platform_init(phy); r = st21nfca_hci_platform_init(phy);
if (r < 0) { if (r < 0) {
nfc_err(&client->dev, "Unable to reboot st21nfca\n"); nfc_err(&client->dev, "Unable to reboot st21nfca\n");
return r; goto out_free;
} }
r = devm_request_threaded_irq(&client->dev, client->irq, NULL, r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
@@ -548,15 +549,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
ST21NFCA_HCI_DRIVER_NAME, phy); ST21NFCA_HCI_DRIVER_NAME, phy);
if (r < 0) { if (r < 0) {
nfc_err(&client->dev, "Unable to register IRQ handler\n"); nfc_err(&client->dev, "Unable to register IRQ handler\n");
return r; goto out_free;
} }
return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_HEADROOM,
ST21NFCA_FRAME_TAILROOM, ST21NFCA_FRAME_TAILROOM,
ST21NFCA_HCI_LLC_MAX_PAYLOAD, ST21NFCA_HCI_LLC_MAX_PAYLOAD,
&phy->hdev, &phy->hdev,
&phy->se_status); &phy->se_status);
if (r)
goto out_free;
return 0;
out_free:
kfree_skb(phy->pending_skb);
return r;
} }
static int st21nfca_hci_i2c_remove(struct i2c_client *client) static int st21nfca_hci_i2c_remove(struct i2c_client *client)
@@ -567,6 +576,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
if (phy->powered) if (phy->powered)
st21nfca_hci_i2c_disable(phy); st21nfca_hci_i2c_disable(phy);
if (phy->pending_skb)
kfree_skb(phy->pending_skb);
return 0; return 0;
} }

View File

@@ -1374,8 +1374,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
pmc->block[i].counters = info[2]; pmc->block[i].counters = info[2];
pmc->block[i].type = info[3]; pmc->block[i].type = info[3];
if (IS_ERR(pmc->block[i].mmio_base)) if (!pmc->block[i].mmio_base)
return PTR_ERR(pmc->block[i].mmio_base); return -ENOMEM;
ret = mlxbf_pmc_create_groups(dev, i); ret = mlxbf_pmc_create_groups(dev, i);
if (ret) if (ret)

View File

@@ -625,7 +625,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
} }
gmux_data->iostart = res->start; gmux_data->iostart = res->start;
gmux_data->iolen = res->end - res->start; gmux_data->iolen = resource_size(res);
if (gmux_data->iolen < GMUX_MIN_IO_LEN) { if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
pr_err("gmux I/O region too small (%lu < %u)\n", pr_err("gmux I/O region too small (%lu < %u)\n",

View File

@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64]; char mybuf[64];
char *pbuf; char *pbuf;
if (nbytes > 64) if (nbytes > 63)
nbytes = 64; nbytes = 63;
memset(mybuf, 0, sizeof(mybuf)); memset(mybuf, 0, sizeof(mybuf));

View File

@@ -586,8 +586,11 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
* Commands like INQUIRY may transfer less data than * Commands like INQUIRY may transfer less data than
* requested by the initiator via bufflen. Set residual * requested by the initiator via bufflen. Set residual
* count to make upper layer aware of the actual amount * count to make upper layer aware of the actual amount
* of data returned. * of data returned. There are cases when controller
* returns zero dataLen with non zero data - do not set
* residual count in that case.
*/ */
if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_OK << 16); cmd->result = (DID_OK << 16);
break; break;

View File

@@ -1773,11 +1773,15 @@ static void ffs_data_clear(struct ffs_data *ffs)
BUG_ON(ffs->gadget); BUG_ON(ffs->gadget);
if (ffs->epfiles) if (ffs->epfiles) {
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
ffs->epfiles = NULL;
}
if (ffs->ffs_eventfd) if (ffs->ffs_eventfd) {
eventfd_ctx_put(ffs->ffs_eventfd); eventfd_ctx_put(ffs->ffs_eventfd);
ffs->ffs_eventfd = NULL;
}
kfree(ffs->raw_descs_data); kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings); kfree(ffs->raw_strings);
@@ -1790,7 +1794,6 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs_data_clear(ffs); ffs_data_clear(ffs);
ffs->epfiles = NULL;
ffs->raw_descs_data = NULL; ffs->raw_descs_data = NULL;
ffs->raw_descs = NULL; ffs->raw_descs = NULL;
ffs->raw_strings = NULL; ffs->raw_strings = NULL;

View File

@@ -123,7 +123,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
/* Look for vendor-specific quirks */ /* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) { pdev->revision == 0x0) {
@@ -158,6 +157,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
xhci->quirks |= XHCI_BROKEN_STREAMS; xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_NEC) if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST; xhci->quirks |= XHCI_NEC_HOST;

View File

@@ -92,6 +92,13 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
interval = clamp_val(interval, 1, 16) - 1; interval = clamp_val(interval, 1, 16) - 1;
mult = usb_endpoint_maxp_mult(desc) - 1; mult = usb_endpoint_maxp_mult(desc) - 1;
} }
break;
case USB_SPEED_FULL:
if (usb_endpoint_xfer_isoc(desc))
interval = clamp_val(desc->bInterval, 1, 16);
else if (usb_endpoint_xfer_int(desc))
interval = clamp_val(desc->bInterval, 1, 255);
break; break;
default: default:
break; /*others are ignored */ break; /*others are ignored */
@@ -235,6 +242,7 @@ struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
mreq->request.dma = DMA_ADDR_INVALID; mreq->request.dma = DMA_ADDR_INVALID;
mreq->epnum = mep->epnum; mreq->epnum = mep->epnum;
mreq->mep = mep; mreq->mep = mep;
INIT_LIST_HEAD(&mreq->list);
trace_mtu3_alloc_request(mreq); trace_mtu3_alloc_request(mreq);
return &mreq->request; return &mreq->request;

View File

@@ -273,6 +273,8 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
} }
/* prevent reorder, make sure GPD's HWO is set last */
mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd; mreq->gpd = gpd;
@@ -306,6 +308,8 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma)); ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
gpd->dw3_info = cpu_to_le32(ext_addr); gpd->dw3_info = cpu_to_le32(ext_addr);
/* prevent reorder, make sure GPD's HWO is set last */
mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd; mreq->gpd = gpd;
@@ -445,7 +449,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
return; return;
} }
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
/* prevent reorder, make sure GPD's HWO is set last */
mb();
/* by pass the current GDP */ /* by pass the current GDP */
gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO); gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);

View File

@@ -886,8 +886,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto put_pages; goto put_pages;
} }
gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET, gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1,
ne_mem_region->pages + i, NULL); ne_mem_region->pages + i, FOLL_GET);
if (gup_rc < 0) { if (gup_rc < 0) {
rc = gup_rc; rc = gup_rc;

View File

@@ -4263,12 +4263,11 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err; return err;
err = user_path_at(dfd, path, kattr.lookup_flags, &target); err = user_path_at(dfd, path, kattr.lookup_flags, &target);
if (err) if (!err) {
return err;
err = do_mount_setattr(&target, &kattr); err = do_mount_setattr(&target, &kattr);
finish_mount_kattr(&kattr);
path_put(&target); path_put(&target);
}
finish_mount_kattr(&kattr);
return err; return err;
} }

View File

@@ -1282,4 +1282,10 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
} }
#endif #endif
#ifdef CONFIG_SYSFB
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { }
#endif
#endif /* _LINUX_EFI_H */ #endif /* _LINUX_EFI_H */

View File

@@ -388,7 +388,7 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t end, int nid, bool exact_nid); phys_addr_t end, int nid, bool exact_nid);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
phys_addr_t align) phys_addr_t align)
{ {
return memblock_phys_alloc_range(size, align, 0, return memblock_phys_alloc_range(size, align, 0,

View File

@@ -193,4 +193,19 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
skb->tstamp = ktime_set(0, 0); skb->tstamp = ktime_set(0, 0);
} }
struct tc_skb_cb {
struct qdisc_skb_cb qdisc_cb;
u16 mru;
bool post_ct;
};
static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
{
struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
return cb;
}
#endif #endif

View File

@@ -440,8 +440,6 @@ struct qdisc_skb_cb {
}; };
#define QDISC_CB_PRIV_LEN 20 #define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN]; unsigned char data[QDISC_CB_PRIV_LEN];
u16 mru;
bool post_ct;
}; };
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);

View File

@@ -105,6 +105,7 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
void sctp_transport_walk_start(struct rhashtable_iter *iter); void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter); void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net, struct sctp_transport *sctp_transport_get_next(struct net *net,
@@ -115,8 +116,7 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
struct net *net, struct net *net,
const union sctp_addr *laddr, const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p); const union sctp_addr *paddr, void *p);
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
int (*cb_done)(struct sctp_transport *, void *),
struct net *net, int *pos, void *p); struct net *net, int *pos, void *p);
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p); int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,

View File

@@ -1365,6 +1365,7 @@ struct sctp_endpoint {
u32 secid; u32 secid;
u32 peer_secid; u32 peer_secid;
struct rcu_head rcu;
}; };
/* Recover the outter endpoint structure. */ /* Recover the outter endpoint structure. */
@@ -1380,7 +1381,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t); struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
void sctp_endpoint_free(struct sctp_endpoint *); void sctp_endpoint_free(struct sctp_endpoint *);
void sctp_endpoint_put(struct sctp_endpoint *); void sctp_endpoint_put(struct sctp_endpoint *);
void sctp_endpoint_hold(struct sctp_endpoint *); int sctp_endpoint_hold(struct sctp_endpoint *ep);
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *); void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
struct sctp_association *sctp_endpoint_lookup_assoc( struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep, const struct sctp_endpoint *ep,

View File

@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
#define NFC_SE_ENABLED 0x1 #define NFC_SE_ENABLED 0x1
struct sockaddr_nfc { struct sockaddr_nfc {
sa_family_t sa_family; __kernel_sa_family_t sa_family;
__u32 dev_idx; __u32 dev_idx;
__u32 target_idx; __u32 target_idx;
__u32 nfc_protocol; __u32 nfc_protocol;
@@ -271,14 +271,14 @@ struct sockaddr_nfc {
#define NFC_LLCP_MAX_SERVICE_NAME 63 #define NFC_LLCP_MAX_SERVICE_NAME 63
struct sockaddr_nfc_llcp { struct sockaddr_nfc_llcp {
sa_family_t sa_family; __kernel_sa_family_t sa_family;
__u32 dev_idx; __u32 dev_idx;
__u32 target_idx; __u32 target_idx;
__u32 nfc_protocol; __u32 nfc_protocol;
__u8 dsap; /* Destination SAP, if known */ __u8 dsap; /* Destination SAP, if known */
__u8 ssap; /* Source SAP to be bound to */ __u8 ssap; /* Source SAP to be bound to */
char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */; char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
size_t service_name_len; __kernel_size_t service_name_len;
}; };
/* NFC socket protocols */ /* NFC socket protocols */

View File

@@ -185,6 +185,7 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos) const char __user *buf, size_t count, loff_t *ppos)
{ {
struct damon_ctx *ctx = file->private_data; struct damon_ctx *ctx = file->private_data;
struct damon_target *t, *next_t;
char *kbuf, *nrs; char *kbuf, *nrs;
unsigned long *targets; unsigned long *targets;
ssize_t nr_targets; ssize_t nr_targets;
@@ -224,6 +225,13 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
goto unlock_out; goto unlock_out;
} }
/* remove previously set targets */
damon_for_each_target_safe(t, next_t, ctx) {
if (targetid_is_pid(ctx))
put_pid((struct pid *)t->id);
damon_destroy_target(t);
}
err = damon_set_targets(ctx, targets, nr_targets); err = damon_set_targets(ctx, targets, nr_targets);
if (err) { if (err) {
if (targetid_is_pid(ctx)) if (targetid_is_pid(ctx))

View File

@@ -4522,6 +4522,38 @@ int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
} }
#endif #endif
void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
unsigned long val)
{
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
br_info(brmctx->br,
"trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
}
brmctx->multicast_query_interval = intvl_jiffies;
}
void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
unsigned long val)
{
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
br_info(brmctx->br,
"trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
}
brmctx->multicast_startup_query_interval = intvl_jiffies;
}
/** /**
* br_multicast_list_adjacent - Returns snooped multicast addresses * br_multicast_list_adjacent - Returns snooped multicast addresses
* @dev: The bridge port adjacent to which to retrieve addresses * @dev: The bridge port adjacent to which to retrieve addresses

View File

@@ -1357,7 +1357,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_QUERY_INTVL]) { if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val); br_multicast_set_query_intvl(&br->multicast_ctx, val);
} }
if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -1369,7 +1369,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
} }
if (data[IFLA_BR_MCAST_STATS_ENABLED]) { if (data[IFLA_BR_MCAST_STATS_ENABLED]) {

View File

@@ -28,6 +28,8 @@
#define BR_MAX_PORTS (1<<BR_PORT_BITS) #define BR_MAX_PORTS (1<<BR_PORT_BITS)
#define BR_MULTICAST_DEFAULT_HASH_MAX 4096 #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
#define BR_HWDOM_MAX BITS_PER_LONG #define BR_HWDOM_MAX BITS_PER_LONG
@@ -968,6 +970,10 @@ int br_multicast_dump_querier_state(struct sk_buff *skb,
int nest_attr); int nest_attr);
size_t br_multicast_querier_state_size(void); size_t br_multicast_querier_state_size(void);
size_t br_rports_size(const struct net_bridge_mcast *brmctx); size_t br_rports_size(const struct net_bridge_mcast *brmctx);
void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
unsigned long val);
void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
unsigned long val);
static inline bool br_group_is_l2(const struct br_ip *group) static inline bool br_group_is_l2(const struct br_ip *group)
{ {
@@ -1152,9 +1158,9 @@ br_multicast_port_ctx_get_global(const struct net_bridge_mcast_port *pmctx)
static inline bool static inline bool
br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx) br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx)
{ {
return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && return br_multicast_ctx_is_vlan(brmctx) &&
br_multicast_ctx_is_vlan(brmctx) && (!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) ||
!(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED); !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED));
} }
static inline bool static inline bool

View File

@@ -658,7 +658,7 @@ static ssize_t multicast_query_interval_show(struct device *d,
static int set_query_interval(struct net_bridge *br, unsigned long val, static int set_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val); br_multicast_set_query_intvl(&br->multicast_ctx, val);
return 0; return 0;
} }
@@ -706,7 +706,7 @@ static ssize_t multicast_startup_query_interval_show(
static int set_startup_query_interval(struct net_bridge *br, unsigned long val, static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
return 0; return 0;
} }

View File

@@ -521,7 +521,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
u64 val; u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]); val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val); br_multicast_set_query_intvl(&v->br_mcast_ctx, val);
*changed = true; *changed = true;
} }
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) { if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -535,7 +535,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
u64 val; u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]); val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val); br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val);
*changed = true; *changed = true;
} }
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) { if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {

View File

@@ -3941,8 +3941,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
return skb; return skb;
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
qdisc_skb_cb(skb)->mru = 0; tc_skb_cb(skb)->mru = 0;
qdisc_skb_cb(skb)->post_ct = false; tc_skb_cb(skb)->post_ct = false;
mini_qdisc_bstats_cpu_update(miniq, skb); mini_qdisc_bstats_cpu_update(miniq, skb);
switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) { switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
@@ -5095,8 +5095,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
} }
qdisc_skb_cb(skb)->pkt_len = skb->len; qdisc_skb_cb(skb)->pkt_len = skb->len;
qdisc_skb_cb(skb)->mru = 0; tc_skb_cb(skb)->mru = 0;
qdisc_skb_cb(skb)->post_ct = false; tc_skb_cb(skb)->post_ct = false;
skb->tc_at_ingress = 1; skb->tc_at_ingress = 1;
mini_qdisc_bstats_cpu_update(miniq, skb); mini_qdisc_bstats_cpu_update(miniq, skb);

View File

@@ -2004,6 +2004,10 @@ static int __init inet_init(void)
ip_init(); ip_init();
/* Initialise per-cpu ipv4 mibs */
if (init_ipv4_mibs())
panic("%s: Cannot init ipv4 mibs\n", __func__);
/* Setup TCP slab cache for open requests. */ /* Setup TCP slab cache for open requests. */
tcp_init(); tcp_init();
@@ -2034,12 +2038,6 @@ static int __init inet_init(void)
if (init_inet_pernet_ops()) if (init_inet_pernet_ops())
pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__); pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
/*
* Initialise per-cpu ipv4 mibs
*/
if (init_ipv4_mibs())
pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
ipv4_proc_init(); ipv4_proc_init();

View File

@@ -1204,7 +1204,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb); kfree_skb(skb);
return -EINVAL; return -EINVAL;
} }
if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb); kfree_skb(skb);
return -EINVAL; return -EINVAL;
} }

View File

@@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR); pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
if (!pnest) if (!pnest)
return -ENOMEM; return -ENOMEM;
nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id); rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
if (rc) {
nla_nest_cancel(skb, pnest);
return rc;
}
if ((0x1 << np->id) == ndp->package_whitelist) if ((0x1 << np->id) == ndp->package_whitelist)
nla_put_flag(skb, NCSI_PKG_ATTR_FORCED); nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST); cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);

View File

@@ -690,10 +690,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
u8 family, u16 zone, bool *defrag) u8 family, u16 zone, bool *defrag)
{ {
enum ip_conntrack_info ctinfo; enum ip_conntrack_info ctinfo;
struct qdisc_skb_cb cb;
struct nf_conn *ct; struct nf_conn *ct;
int err = 0; int err = 0;
bool frag; bool frag;
u16 mru;
/* Previously seen (loopback)? Ignore. */ /* Previously seen (loopback)? Ignore. */
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
@@ -708,7 +708,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
return err; return err;
skb_get(skb); skb_get(skb);
cb = *qdisc_skb_cb(skb); mru = tc_skb_cb(skb)->mru;
if (family == NFPROTO_IPV4) { if (family == NFPROTO_IPV4) {
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
@@ -722,7 +722,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
if (!err) { if (!err) {
*defrag = true; *defrag = true;
cb.mru = IPCB(skb)->frag_max_size; mru = IPCB(skb)->frag_max_size;
} }
} else { /* NFPROTO_IPV6 */ } else { /* NFPROTO_IPV6 */
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
@@ -735,7 +735,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
if (!err) { if (!err) {
*defrag = true; *defrag = true;
cb.mru = IP6CB(skb)->frag_max_size; mru = IP6CB(skb)->frag_max_size;
} }
#else #else
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
@@ -744,7 +744,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
} }
if (err != -EINPROGRESS) if (err != -EINPROGRESS)
*qdisc_skb_cb(skb) = cb; tc_skb_cb(skb)->mru = mru;
skb_clear_hash(skb); skb_clear_hash(skb);
skb->ignore_df = 1; skb->ignore_df = 1;
return err; return err;
@@ -963,7 +963,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tcf_action_update_bstats(&c->common, skb); tcf_action_update_bstats(&c->common, skb);
if (clear) { if (clear) {
qdisc_skb_cb(skb)->post_ct = false; tc_skb_cb(skb)->post_ct = false;
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
if (ct) { if (ct) {
nf_conntrack_put(&ct->ct_general); nf_conntrack_put(&ct->ct_general);
@@ -1048,7 +1048,7 @@ do_nat:
out_push: out_push:
skb_push_rcsum(skb, nh_ofs); skb_push_rcsum(skb, nh_ofs);
qdisc_skb_cb(skb)->post_ct = true; tc_skb_cb(skb)->post_ct = true;
out_clear: out_clear:
if (defrag) if (defrag)
qdisc_skb_cb(skb)->pkt_len = skb->len; qdisc_skb_cb(skb)->pkt_len = skb->len;

View File

@@ -1617,12 +1617,14 @@ int tcf_classify(struct sk_buff *skb,
/* If we missed on some chain */ /* If we missed on some chain */
if (ret == TC_ACT_UNSPEC && last_executed_chain) { if (ret == TC_ACT_UNSPEC && last_executed_chain) {
struct tc_skb_cb *cb = tc_skb_cb(skb);
ext = tc_skb_ext_alloc(skb); ext = tc_skb_ext_alloc(skb);
if (WARN_ON_ONCE(!ext)) if (WARN_ON_ONCE(!ext))
return TC_ACT_SHOT; return TC_ACT_SHOT;
ext->chain = last_executed_chain; ext->chain = last_executed_chain;
ext->mru = qdisc_skb_cb(skb)->mru; ext->mru = cb->mru;
ext->post_ct = qdisc_skb_cb(skb)->post_ct; ext->post_ct = cb->post_ct;
} }
return ret; return ret;

View File

@@ -19,6 +19,7 @@
#include <net/sch_generic.h> #include <net/sch_generic.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/flow_dissector.h> #include <net/flow_dissector.h>
#include <net/geneve.h> #include <net/geneve.h>
@@ -309,7 +310,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
struct cls_fl_head *head = rcu_dereference_bh(tp->root); struct cls_fl_head *head = rcu_dereference_bh(tp->root);
bool post_ct = qdisc_skb_cb(skb)->post_ct; bool post_ct = tc_skb_cb(skb)->post_ct;
struct fl_flow_key skb_key; struct fl_flow_key skb_key;
struct fl_flow_mask *mask; struct fl_flow_mask *mask;
struct cls_fl_filter *f; struct cls_fl_filter *f;

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
#include <net/netlink.h> #include <net/netlink.h>
#include <net/sch_generic.h> #include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h> #include <net/dst.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/ip6_fib.h> #include <net/ip6_fib.h>
@@ -137,7 +138,7 @@ err:
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)) int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
{ {
u16 mru = qdisc_skb_cb(skb)->mru; u16 mru = tc_skb_cb(skb)->mru;
int err; int err;
if (mru && skb->len > mru + skb->dev->hard_header_len) if (mru && skb->len > mru + skb->dev->hard_header_len)

View File

@@ -290,9 +290,8 @@ out:
return err; return err;
} }
static int sctp_sock_dump(struct sctp_transport *tsp, void *p) static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{ {
struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p; struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk; struct sock *sk = ep->base.sk;
struct sk_buff *skb = commp->skb; struct sk_buff *skb = commp->skb;
@@ -302,6 +301,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
int err = 0; int err = 0;
lock_sock(sk); lock_sock(sk);
if (ep != tsp->asoc->ep)
goto release;
list_for_each_entry(assoc, &ep->asocs, asocs) { list_for_each_entry(assoc, &ep->asocs, asocs) {
if (cb->args[4] < cb->args[1]) if (cb->args[4] < cb->args[1])
goto next; goto next;
@@ -344,9 +345,8 @@ release:
return err; return err;
} }
static int sctp_sock_filter(struct sctp_transport *tsp, void *p) static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{ {
struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p; struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk; struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *r = commp->r; const struct inet_diag_req_v2 *r = commp->r;
@@ -505,7 +505,7 @@ skip:
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
goto done; goto done;
sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
net, &pos, &commp); net, &pos, &commp);
cb->args[2] = pos; cb->args[2] = pos;

View File

@@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
} }
/* Final destructor for endpoint. */ /* Final destructor for endpoint. */
static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
{
struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
struct sock *sk = ep->base.sk;
sctp_sk(sk)->ep = NULL;
sock_put(sk);
kfree(ep);
SCTP_DBG_OBJCNT_DEC(ep);
}
static void sctp_endpoint_destroy(struct sctp_endpoint *ep) static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{ {
struct sock *sk; struct sock *sk;
@@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
if (sctp_sk(sk)->bind_hash) if (sctp_sk(sk)->bind_hash)
sctp_put_port(sk); sctp_put_port(sk);
sctp_sk(sk)->ep = NULL; call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
/* Give up our hold on the sock */
sock_put(sk);
kfree(ep);
SCTP_DBG_OBJCNT_DEC(ep);
} }
/* Hold a reference to an endpoint. */ /* Hold a reference to an endpoint. */
void sctp_endpoint_hold(struct sctp_endpoint *ep) int sctp_endpoint_hold(struct sctp_endpoint *ep)
{ {
refcount_inc(&ep->base.refcnt); return refcount_inc_not_zero(&ep->base.refcnt);
} }
/* Release a reference to an endpoint and clean up if there are /* Release a reference to an endpoint and clean up if there are

View File

@@ -5338,11 +5338,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
} }
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
int (*cb_done)(struct sctp_transport *, void *), struct net *net, int *pos, void *p)
struct net *net, int *pos, void *p) { {
struct rhashtable_iter hti; struct rhashtable_iter hti;
struct sctp_transport *tsp; struct sctp_transport *tsp;
struct sctp_endpoint *ep;
int ret; int ret;
again: again:
@@ -5351,26 +5352,32 @@ again:
tsp = sctp_transport_get_idx(net, &hti, *pos + 1); tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
ret = cb(tsp, p); ep = tsp->asoc->ep;
if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
ret = cb(ep, tsp, p);
if (ret) if (ret)
break; break;
sctp_endpoint_put(ep);
}
(*pos)++; (*pos)++;
sctp_transport_put(tsp); sctp_transport_put(tsp);
} }
sctp_transport_walk_stop(&hti); sctp_transport_walk_stop(&hti);
if (ret) { if (ret) {
if (cb_done && !cb_done(tsp, p)) { if (cb_done && !cb_done(ep, tsp, p)) {
(*pos)++; (*pos)++;
sctp_endpoint_put(ep);
sctp_transport_put(tsp); sctp_transport_put(tsp);
goto again; goto again;
} }
sctp_endpoint_put(ep);
sctp_transport_put(tsp); sctp_transport_put(tsp);
} }
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(sctp_for_each_transport); EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
/* 7.2.1 Association Status (SCTP_STATUS) /* 7.2.1 Association Status (SCTP_STATUS)

View File

@@ -170,6 +170,11 @@ struct smc_connection {
u16 tx_cdc_seq; /* sequence # for CDC send */ u16 tx_cdc_seq; /* sequence # for CDC send */
u16 tx_cdc_seq_fin; /* sequence # - tx completed */ u16 tx_cdc_seq_fin; /* sequence # - tx completed */
spinlock_t send_lock; /* protect wr_sends */ spinlock_t send_lock; /* protect wr_sends */
atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe
* - inc when post wqe,
* - dec on polled tx cqe
*/
wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
struct delayed_work tx_work; /* retry of smc_cdc_msg_send */ struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
u32 tx_off; /* base offset in peer rmb */ u32 tx_off; /* base offset in peer rmb */

View File

@@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
struct smc_sock *smc; struct smc_sock *smc;
int diff; int diff;
if (!conn)
/* already dismissed */
return;
smc = container_of(conn, struct smc_sock, conn); smc = container_of(conn, struct smc_sock, conn);
bh_lock_sock(&smc->sk); bh_lock_sock(&smc->sk);
if (!wc_status) { if (!wc_status) {
@@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
conn); conn);
conn->tx_cdc_seq_fin = cdcpend->ctrl_seq; conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
} }
if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
wake_up(&conn->cdc_pend_tx_wq);
WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
smc_tx_sndbuf_nonfull(smc); smc_tx_sndbuf_nonfull(smc);
bh_unlock_sock(&smc->sk); bh_unlock_sock(&smc->sk);
} }
@@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
conn->tx_cdc_seq++; conn->tx_cdc_seq++;
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
atomic_inc(&conn->cdc_pend_tx_wr);
smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
if (!rc) { if (!rc) {
smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
@@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
} else { } else {
conn->tx_cdc_seq--; conn->tx_cdc_seq--;
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
atomic_dec(&conn->cdc_pend_tx_wr);
} }
return rc; return rc;
@@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
peer->token = htonl(local->token); peer->token = htonl(local->token);
peer->prod_flags.failover_validation = 1; peer->prod_flags.failover_validation = 1;
/* We need to set pend->conn here to make sure smc_cdc_tx_handler()
* can handle properly
*/
smc_cdc_add_pending_send(conn, pend);
atomic_inc(&conn->cdc_pend_tx_wr);
smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
if (unlikely(rc))
atomic_dec(&conn->cdc_pend_tx_wr);
return rc; return rc;
} }
@@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
return rc; return rc;
} }
static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend, void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
unsigned long data)
{ {
struct smc_connection *conn = (struct smc_connection *)data; wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
struct smc_cdc_tx_pend *cdc_pend =
(struct smc_cdc_tx_pend *)tx_pend;
return cdc_pend->conn == conn;
}
static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
{
struct smc_cdc_tx_pend *cdc_pend =
(struct smc_cdc_tx_pend *)tx_pend;
cdc_pend->conn = NULL;
}
void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
{
struct smc_link *link = conn->lnk;
smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
smc_cdc_tx_filter, smc_cdc_tx_dismisser,
(unsigned long)conn);
} }
/* Send a SMC-D CDC header. /* Send a SMC-D CDC header.

View File

@@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
struct smc_wr_buf **wr_buf, struct smc_wr_buf **wr_buf,
struct smc_rdma_wr **wr_rdma_buf, struct smc_rdma_wr **wr_rdma_buf,
struct smc_cdc_tx_pend **pend); struct smc_cdc_tx_pend **pend);
void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
struct smc_cdc_tx_pend *pend); struct smc_cdc_tx_pend *pend);
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn); int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);

View File

@@ -604,7 +604,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *lnk = &lgr->lnk[i]; struct smc_link *lnk = &lgr->lnk[i];
if (smc_link_usable(lnk)) if (smc_link_sendable(lnk))
lnk->state = SMC_LNK_INACTIVE; lnk->state = SMC_LNK_INACTIVE;
} }
wake_up_all(&lgr->llc_msg_waiter); wake_up_all(&lgr->llc_msg_waiter);
@@ -1056,7 +1056,7 @@ void smc_conn_free(struct smc_connection *conn)
smc_ism_unset_conn(conn); smc_ism_unset_conn(conn);
tasklet_kill(&conn->rx_tsklet); tasklet_kill(&conn->rx_tsklet);
} else { } else {
smc_cdc_tx_dismiss_slots(conn); smc_cdc_wait_pend_tx_wr(conn);
if (current_work() != &conn->abort_work) if (current_work() != &conn->abort_work)
cancel_work_sync(&conn->abort_work); cancel_work_sync(&conn->abort_work);
} }
@@ -1133,7 +1133,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
smc_llc_link_clear(lnk, log); smc_llc_link_clear(lnk, log);
smcr_buf_unmap_lgr(lnk); smcr_buf_unmap_lgr(lnk);
smcr_rtoken_clear_link(lnk); smcr_rtoken_clear_link(lnk);
smc_ib_modify_qp_reset(lnk); smc_ib_modify_qp_error(lnk);
smc_wr_free_link(lnk); smc_wr_free_link(lnk);
smc_ib_destroy_queue_pair(lnk); smc_ib_destroy_queue_pair(lnk);
smc_ib_dealloc_protection_domain(lnk); smc_ib_dealloc_protection_domain(lnk);
@@ -1264,7 +1264,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
else else
tasklet_unlock_wait(&conn->rx_tsklet); tasklet_unlock_wait(&conn->rx_tsklet);
} else { } else {
smc_cdc_tx_dismiss_slots(conn); smc_cdc_wait_pend_tx_wr(conn);
} }
smc_lgr_unregister_conn(conn); smc_lgr_unregister_conn(conn);
smc_close_active_abort(smc); smc_close_active_abort(smc);
@@ -1387,11 +1387,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
/* Called when an SMCR device is removed or the smc module is unloaded. /* Called when an SMCR device is removed or the smc module is unloaded.
* If smcibdev is given, all SMCR link groups using this device are terminated. * If smcibdev is given, all SMCR link groups using this device are terminated.
* If smcibdev is NULL, all SMCR link groups are terminated. * If smcibdev is NULL, all SMCR link groups are terminated.
*
* We must wait here for QPs been destroyed before we destroy the CQs,
* or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
* smc_sock cannot be released.
*/ */
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
{ {
struct smc_link_group *lgr, *lg; struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_free_list); LIST_HEAD(lgr_free_list);
LIST_HEAD(lgr_linkdown_list);
int i; int i;
spin_lock_bh(&smc_lgr_list.lock); spin_lock_bh(&smc_lgr_list.lock);
@@ -1403,7 +1408,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) { list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (lgr->lnk[i].smcibdev == smcibdev) if (lgr->lnk[i].smcibdev == smcibdev)
smcr_link_down_cond_sched(&lgr->lnk[i]); list_move_tail(&lgr->list, &lgr_linkdown_list);
} }
} }
} }
@@ -1415,6 +1420,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
__smc_lgr_terminate(lgr, false); __smc_lgr_terminate(lgr, false);
} }
list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (lgr->lnk[i].smcibdev == smcibdev) {
mutex_lock(&lgr->llc_conf_mutex);
smcr_link_down_cond(&lgr->lnk[i]);
mutex_unlock(&lgr->llc_conf_mutex);
}
}
}
if (smcibdev) { if (smcibdev) {
if (atomic_read(&smcibdev->lnk_cnt)) if (atomic_read(&smcibdev->lnk_cnt))
wait_event(smcibdev->lnks_deleted, wait_event(smcibdev->lnks_deleted,
@@ -1514,7 +1529,6 @@ static void smcr_link_down(struct smc_link *lnk)
if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list)) if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
return; return;
smc_ib_modify_qp_reset(lnk);
to_lnk = smc_switch_conns(lgr, lnk, true); to_lnk = smc_switch_conns(lgr, lnk, true);
if (!to_lnk) { /* no backup link available */ if (!to_lnk) { /* no backup link available */
smcr_link_clear(lnk, true); smcr_link_clear(lnk, true);
@@ -1742,6 +1756,7 @@ create:
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
conn->urg_state = SMC_URG_READ; conn->urg_state = SMC_URG_READ;
init_waitqueue_head(&conn->cdc_pend_tx_wq);
INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work); INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
if (ini->is_smcd) { if (ini->is_smcd) {
conn->rx_off = sizeof(struct smcd_cdc_msg); conn->rx_off = sizeof(struct smcd_cdc_msg);

View File

@@ -366,6 +366,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
return true; return true;
} }
static inline bool smc_link_sendable(struct smc_link *lnk)
{
return smc_link_usable(lnk) &&
lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
}
static inline bool smc_link_active(struct smc_link *lnk) static inline bool smc_link_active(struct smc_link *lnk)
{ {
return lnk->state == SMC_LNK_ACTIVE; return lnk->state == SMC_LNK_ACTIVE;

View File

@@ -101,12 +101,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
IB_QP_MAX_QP_RD_ATOMIC); IB_QP_MAX_QP_RD_ATOMIC);
} }
int smc_ib_modify_qp_reset(struct smc_link *lnk) int smc_ib_modify_qp_error(struct smc_link *lnk)
{ {
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(qp_attr)); memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.qp_state = IB_QPS_RESET; qp_attr.qp_state = IB_QPS_ERR;
return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
} }

View File

@@ -79,6 +79,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
int smc_ib_ready_link(struct smc_link *lnk); int smc_ib_ready_link(struct smc_link *lnk);
int smc_ib_modify_qp_rts(struct smc_link *lnk); int smc_ib_modify_qp_rts(struct smc_link *lnk);
int smc_ib_modify_qp_reset(struct smc_link *lnk); int smc_ib_modify_qp_reset(struct smc_link *lnk);
int smc_ib_modify_qp_error(struct smc_link *lnk);
long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct smc_buf_desc *buf_slot, u8 link_idx); struct smc_buf_desc *buf_slot, u8 link_idx);

View File

@@ -1358,7 +1358,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
delllc.reason = htonl(rsn); delllc.reason = htonl(rsn);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (!smc_link_usable(&lgr->lnk[i])) if (!smc_link_sendable(&lgr->lnk[i]))
continue; continue;
if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc)) if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
break; break;

View File

@@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
} }
/* wait till all pending tx work requests on the given link are completed */ /* wait till all pending tx work requests on the given link are completed */
int smc_wr_tx_wait_no_pending_sends(struct smc_link *link) void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
{ {
if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link), wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
SMC_WR_TX_WAIT_PENDING_TIME))
return 0;
else /* timeout */
return -EPIPE;
} }
static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
@@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
struct smc_wr_tx_pend pnd_snd; struct smc_wr_tx_pend pnd_snd;
struct smc_link *link; struct smc_link *link;
u32 pnd_snd_idx; u32 pnd_snd_idx;
int i;
link = wc->qp->qp_context; link = wc->qp->qp_context;
@@ -115,14 +110,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
return; return;
if (wc->status) { if (wc->status) {
for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
/* clear full struct smc_wr_tx_pend including .priv */
memset(&link->wr_tx_pends[i], 0,
sizeof(link->wr_tx_pends[i]));
memset(&link->wr_tx_bufs[i], 0,
sizeof(link->wr_tx_bufs[i]));
clear_bit(i, link->wr_tx_mask);
}
/* terminate link */ /* terminate link */
smcr_link_down_cond_sched(link); smcr_link_down_cond_sched(link);
} }
@@ -169,7 +156,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
{ {
*idx = link->wr_tx_cnt; *idx = link->wr_tx_cnt;
if (!smc_link_usable(link)) if (!smc_link_sendable(link))
return -ENOLINK; return -ENOLINK;
for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
if (!test_and_set_bit(*idx, link->wr_tx_mask)) if (!test_and_set_bit(*idx, link->wr_tx_mask))
@@ -212,7 +199,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
} else { } else {
rc = wait_event_interruptible_timeout( rc = wait_event_interruptible_timeout(
link->wr_tx_wait, link->wr_tx_wait,
!smc_link_usable(link) || !smc_link_sendable(link) ||
lgr->terminating || lgr->terminating ||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME); SMC_WR_TX_WAIT_FREE_SLOT_TIME);
@@ -288,18 +275,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
unsigned long timeout) unsigned long timeout)
{ {
struct smc_wr_tx_pend *pend; struct smc_wr_tx_pend *pend;
u32 pnd_idx;
int rc; int rc;
pend = container_of(priv, struct smc_wr_tx_pend, priv); pend = container_of(priv, struct smc_wr_tx_pend, priv);
pend->compl_requested = 1; pend->compl_requested = 1;
init_completion(&link->wr_tx_compl[pend->idx]); pnd_idx = pend->idx;
init_completion(&link->wr_tx_compl[pnd_idx]);
rc = smc_wr_tx_send(link, priv); rc = smc_wr_tx_send(link, priv);
if (rc) if (rc)
return rc; return rc;
/* wait for completion by smc_wr_tx_process_cqe() */ /* wait for completion by smc_wr_tx_process_cqe() */
rc = wait_for_completion_interruptible_timeout( rc = wait_for_completion_interruptible_timeout(
&link->wr_tx_compl[pend->idx], timeout); &link->wr_tx_compl[pnd_idx], timeout);
if (rc <= 0) if (rc <= 0)
rc = -ENODATA; rc = -ENODATA;
if (rc > 0) if (rc > 0)
@@ -349,25 +338,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
return rc; return rc;
} }
void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
smc_wr_tx_filter filter,
smc_wr_tx_dismisser dismisser,
unsigned long data)
{
struct smc_wr_tx_pend_priv *tx_pend;
struct smc_wr_rx_hdr *wr_tx;
int i;
for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
if (wr_tx->type != wr_tx_hdr_type)
continue;
tx_pend = &link->wr_tx_pends[i].priv;
if (filter(tx_pend, data))
dismisser(tx_pend);
}
}
/****************************** receive queue ********************************/ /****************************** receive queue ********************************/
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler) int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
@@ -572,10 +542,7 @@ void smc_wr_free_link(struct smc_link *lnk)
smc_wr_wakeup_reg_wait(lnk); smc_wr_wakeup_reg_wait(lnk);
smc_wr_wakeup_tx_wait(lnk); smc_wr_wakeup_tx_wait(lnk);
if (smc_wr_tx_wait_no_pending_sends(lnk)) smc_wr_tx_wait_no_pending_sends(lnk);
memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) *
sizeof(*lnk->wr_tx_mask));
wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt))); wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt))); wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));

View File

@@ -22,7 +22,6 @@
#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
#define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ)
#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */ #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
@@ -62,7 +61,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
static inline bool smc_wr_tx_link_hold(struct smc_link *link) static inline bool smc_wr_tx_link_hold(struct smc_link *link)
{ {
if (!smc_link_usable(link)) if (!smc_link_sendable(link))
return false; return false;
atomic_inc(&link->wr_tx_refcnt); atomic_inc(&link->wr_tx_refcnt);
return true; return true;
@@ -122,7 +121,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
smc_wr_tx_filter filter, smc_wr_tx_filter filter,
smc_wr_tx_dismisser dismisser, smc_wr_tx_dismisser dismisser,
unsigned long data); unsigned long data);
int smc_wr_tx_wait_no_pending_sends(struct smc_link *link); void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
int smc_wr_rx_post_init(struct smc_link *link); int smc_wr_rx_post_init(struct smc_link *link);

View File

@@ -219,7 +219,7 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "s390" && $bits == 64) { } elsif ($arch eq "s390" && $bits == 64) {
if ($cc =~ /-DCC_USING_HOTPATCH/) { if ($cc =~ /-DCC_USING_HOTPATCH/) {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$"; $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
$mcount_adjust = 0; $mcount_adjust = 0;
} }
$alignment = 8; $alignment = 8;

View File

@@ -5812,7 +5812,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
struct common_audit_data ad; struct common_audit_data ad;
struct lsm_network_audit net = {0,}; struct lsm_network_audit net = {0,};
char *addrp; char *addrp;
u8 proto; u8 proto = 0;
if (sk == NULL) if (sk == NULL)
return NF_ACCEPT; return NF_ACCEPT;

View File

@@ -1051,10 +1051,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
return false; return false;
if (!domain) if (!domain)
return true; return true;
if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
return false;
list_for_each_entry_rcu(ptr, &domain->acl_info_list, list, list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
srcu_read_lock_held(&tomoyo_ss)) { srcu_read_lock_held(&tomoyo_ss)) {
u16 perm; u16 perm;
u8 i;
if (ptr->is_deleted) if (ptr->is_deleted)
continue; continue;
@@ -1065,23 +1066,23 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
*/ */
switch (ptr->type) { switch (ptr->type) {
case TOMOYO_TYPE_PATH_ACL: case TOMOYO_TYPE_PATH_ACL:
data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm); perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
break; break;
case TOMOYO_TYPE_PATH2_ACL: case TOMOYO_TYPE_PATH2_ACL:
data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm); perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
break; break;
case TOMOYO_TYPE_PATH_NUMBER_ACL: case TOMOYO_TYPE_PATH_NUMBER_ACL:
data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head) perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
->perm); ->perm);
break; break;
case TOMOYO_TYPE_MKDEV_ACL: case TOMOYO_TYPE_MKDEV_ACL:
data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm); perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
break; break;
case TOMOYO_TYPE_INET_ACL: case TOMOYO_TYPE_INET_ACL:
data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm); perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
break; break;
case TOMOYO_TYPE_UNIX_ACL: case TOMOYO_TYPE_UNIX_ACL:
data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm); perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
break; break;
case TOMOYO_TYPE_MANUAL_TASK_ACL: case TOMOYO_TYPE_MANUAL_TASK_ACL:
perm = 0; perm = 0;
@@ -1089,21 +1090,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
default: default:
perm = 1; perm = 1;
} }
for (i = 0; i < 16; i++) count += hweight16(perm);
if (perm & (1 << i))
count++;
} }
if (count < tomoyo_profile(domain->ns, domain->profile)-> if (count < tomoyo_profile(domain->ns, domain->profile)->
pref[TOMOYO_PREF_MAX_LEARNING_ENTRY]) pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
return true; return true;
if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) { WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
/* r->granted = false; */ /* r->granted = false; */
tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]); tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
#ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING #ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n", pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
domain->domainname->name); domain->domainname->name);
#endif #endif
}
return false; return false;
} }

View File

@@ -132,8 +132,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
return AE_NOT_FOUND; return AE_NOT_FOUND;
} }
info->handle = handle;
/* /*
* On some Intel platforms, multiple children of the HDAS * On some Intel platforms, multiple children of the HDAS
* device can be found, but only one of them is the SoundWire * device can be found, but only one of them is the SoundWire
@@ -144,6 +142,9 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE) if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
return AE_OK; /* keep going */ return AE_OK; /* keep going */
/* found the correct SoundWire controller */
info->handle = handle;
/* device found, stop namespace walk */ /* device found, stop namespace walk */
return AE_CTRL_TERMINATE; return AE_CTRL_TERMINATE;
} }
@@ -164,8 +165,14 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
acpi_status status; acpi_status status;
info->handle = NULL; info->handle = NULL;
/*
* In the HDAS ACPI scope, 'SNDW' may be either the child of
* 'HDAS' or the grandchild of 'HDAS'. So let's go through
* the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW'
* device.
*/
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
parent_handle, 1, parent_handle, 2,
sdw_intel_acpi_cb, sdw_intel_acpi_cb,
NULL, info, NULL); NULL, info, NULL);
if (ACPI_FAILURE(status) || info->handle == NULL) if (ACPI_FAILURE(status) || info->handle == NULL)

View File

@@ -2463,7 +2463,7 @@ static int process_switch_event(struct perf_tool *tool,
if (perf_event__process_switch(tool, event, sample, machine) < 0) if (perf_event__process_switch(tool, event, sample, machine) < 0)
return -1; return -1;
if (scripting_ops && scripting_ops->process_switch) if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
scripting_ops->process_switch(event, sample, machine); scripting_ops->process_switch(event, sample, machine);
if (!script->show_switch_events) if (!script->show_switch_events)

View File

@@ -32,8 +32,7 @@ try:
except: except:
broken_pipe_exception = IOError broken_pipe_exception = IOError
glb_switch_str = None glb_switch_str = {}
glb_switch_printed = True
glb_insn = False glb_insn = False
glb_disassembler = None glb_disassembler = None
glb_src = False glb_src = False
@@ -70,6 +69,7 @@ def trace_begin():
ap = argparse.ArgumentParser(usage = "", add_help = False) ap = argparse.ArgumentParser(usage = "", add_help = False)
ap.add_argument("--insn-trace", action='store_true') ap.add_argument("--insn-trace", action='store_true')
ap.add_argument("--src-trace", action='store_true') ap.add_argument("--src-trace", action='store_true')
ap.add_argument("--all-switch-events", action='store_true')
global glb_args global glb_args
global glb_insn global glb_insn
global glb_src global glb_src
@@ -256,10 +256,6 @@ def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
print(start_str, src_str) print(start_str, src_str)
def do_process_event(param_dict): def do_process_event(param_dict):
global glb_switch_printed
if not glb_switch_printed:
print(glb_switch_str)
glb_switch_printed = True
event_attr = param_dict["attr"] event_attr = param_dict["attr"]
sample = param_dict["sample"] sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"] raw_buf = param_dict["raw_buf"]
@@ -274,6 +270,11 @@ def do_process_event(param_dict):
dso = get_optional(param_dict, "dso") dso = get_optional(param_dict, "dso")
symbol = get_optional(param_dict, "symbol") symbol = get_optional(param_dict, "symbol")
cpu = sample["cpu"]
if cpu in glb_switch_str:
print(glb_switch_str[cpu])
del glb_switch_str[cpu]
if name[0:12] == "instructions": if name[0:12] == "instructions":
if glb_src: if glb_src:
print_srccode(comm, param_dict, sample, symbol, dso, True) print_srccode(comm, param_dict, sample, symbol, dso, True)
@@ -336,8 +337,6 @@ def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
sys.exit(1) sys.exit(1)
def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x): def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
global glb_switch_printed
global glb_switch_str
if out: if out:
out_str = "Switch out " out_str = "Switch out "
else: else:
@@ -350,6 +349,10 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree
machine_str = "" machine_str = ""
else: else:
machine_str = "machine PID %d" % machine_pid machine_str = "machine PID %d" % machine_pid
glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \ switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
(out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str) (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
glb_switch_printed = False if glb_args.all_switch_events:
print(switch_str);
else:
global glb_switch_str
glb_switch_str[cpu] = switch_str

View File

@@ -3540,6 +3540,7 @@ static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
*args = p; *args = p;
return 0; return 0;
} }
p += 1;
while (1) { while (1) {
vmcs = strtoull(p, &p, 0); vmcs = strtoull(p, &p, 0);
if (errno) if (errno)

View File

@@ -132,7 +132,7 @@ run_test() {
local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \ local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \
sed -e 's/\[//' -e 's/:.*//'` sed -e 's/\[//' -e 's/:.*//'`
if [ $rcv != $pkts ]; then if [ $rcv != $pkts ]; then
echo " fail - received $rvs packets, expected $pkts" echo " fail - received $rcv packets, expected $pkts"
ret=1 ret=1
return return
fi fi
@@ -185,6 +185,7 @@ for family in 4 6; do
IPT=iptables IPT=iptables
SUFFIX=24 SUFFIX=24
VXDEV=vxlan VXDEV=vxlan
PING=ping
if [ $family = 6 ]; then if [ $family = 6 ]; then
BM_NET=$BM_NET_V6 BM_NET=$BM_NET_V6
@@ -192,6 +193,7 @@ for family in 4 6; do
SUFFIX="64 nodad" SUFFIX="64 nodad"
VXDEV=vxlan6 VXDEV=vxlan6
IPT=ip6tables IPT=ip6tables
PING="ping6"
fi fi
echo "IPv$family" echo "IPv$family"
@@ -237,7 +239,7 @@ for family in 4 6; do
# load arp cache before running the test to reduce the amount of # load arp cache before running the test to reduce the amount of
# stray traffic on top of the UDP tunnel # stray traffic on top of the UDP tunnel
ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
cleanup cleanup

Some files were not shown because too many files have changed in this diff Show More