Files
kernel_arpi/lib/stackdepot.c
Greg Kroah-Hartman 33f5d1daec Merge 5.15.34 into android13-5.15
Changes in 5.15.34
	lib/logic_iomem: correct fallback config references
	um: fix and optimize xor select template for CONFIG64 and timetravel mode
	rtc: wm8350: Handle error for wm8350_register_irq
	nbd: add error handling support for add_disk()
	nbd: Fix incorrect error handle when first_minor is illegal in nbd_dev_add
	nbd: Fix hungtask when nbd_config_put
	nbd: fix possible overflow on 'first_minor' in nbd_dev_add()
	kfence: count unexpectedly skipped allocations
	kfence: move saving stack trace of allocations into __kfence_alloc()
	kfence: limit currently covered allocations when pool nearly full
	KVM: x86/pmu: Use different raw event masks for AMD and Intel
	KVM: SVM: Fix kvm_cache_regs.h inclusions for is_guest_mode()
	KVM: x86/svm: Clear reserved bits written to PerfEvtSeln MSRs
	KVM: x86/pmu: Fix and isolate TSX-specific performance event logic
	KVM: x86/emulator: Emulate RDPID only if it is enabled in guest
	drm: Add orientation quirk for GPD Win Max
	ath5k: fix OOB in ath5k_eeprom_read_pcal_info_5111
	drm/amd/display: Add signal type check when verify stream backends same
	drm/amd/amdgpu/amdgpu_cs: fix refcount leak of a dma_fence obj
	drm/amd/display: Fix memory leak
	drm/amd/display: Use PSR version selected during set_psr_caps
	usb: gadget: tegra-xudc: Do not program SPARAM
	usb: gadget: tegra-xudc: Fix control endpoint's definitions
	usb: cdnsp: fix cdnsp_decode_trb function to properly handle ret value
	ptp: replace snprintf with sysfs_emit
	drm/amdkfd: Don't take process mutex for svm ioctls
	powerpc: dts: t104xrdb: fix phy type for FMAN 4/5
	ath11k: fix kernel panic during unload/load ath11k modules
	ath11k: pci: fix crash on suspend if board file is not found
	ath11k: mhi: use mhi_sync_power_up()
	net/smc: Send directly when TCP_CORK is cleared
	drm/bridge: Add missing pm_runtime_put_sync
	bpf: Make dst_port field in struct bpf_sock 16-bit wide
	scsi: mvsas: Replace snprintf() with sysfs_emit()
	scsi: bfa: Replace snprintf() with sysfs_emit()
	drm/v3d: fix missing unlock
	power: supply: axp20x_battery: properly report current when discharging
	mt76: mt7921: fix crash when startup fails.
	mt76: dma: initialize skip_unmap in mt76_dma_rx_fill
	cfg80211: don't add non transmitted BSS to 6GHz scanned channels
	libbpf: Fix build issue with llvm-readelf
	ipv6: make mc_forwarding atomic
	net: initialize init_net earlier
	powerpc: Set crashkernel offset to mid of RMA region
	drm/amdgpu: Fix recursive locking warning
	scsi: smartpqi: Fix kdump issue when controller is locked up
	PCI: aardvark: Fix support for MSI interrupts
	iommu/arm-smmu-v3: fix event handling soft lockup
	usb: ehci: add pci device support for Aspeed platforms
	PCI: endpoint: Fix alignment fault error in copy tests
	tcp: Don't acquire inet_listen_hashbucket::lock with disabled BH.
	PCI: pciehp: Add Qualcomm quirk for Command Completed erratum
	scsi: mpi3mr: Fix reporting of actual data transfer size
	scsi: mpi3mr: Fix memory leaks
	powerpc/set_memory: Avoid spinlock recursion in change_page_attr()
	power: supply: axp288-charger: Set Vhold to 4.4V
	net/mlx5e: Disable TX queues before registering the netdev
	usb: dwc3: pci: Set the swnode from inside dwc3_pci_quirks()
	iwlwifi: mvm: Correctly set fragmented EBS
	iwlwifi: mvm: move only to an enabled channel
	drm/msm/dsi: Remove spurious IRQF_ONESHOT flag
	ipv4: Invalidate neighbour for broadcast address upon address addition
	dm ioctl: prevent potential spectre v1 gadget
	dm: requeue IO if mapping table not yet available
	drm/amdkfd: make CRAT table missing message informational only
	vfio/pci: Stub vfio_pci_vga_rw when !CONFIG_VFIO_PCI_VGA
	scsi: pm8001: Fix pm80xx_pci_mem_copy() interface
	scsi: pm8001: Fix pm8001_mpi_task_abort_resp()
	scsi: pm8001: Fix task leak in pm8001_send_abort_all()
	scsi: pm8001: Fix tag leaks on error
	scsi: pm8001: Fix memory leak in pm8001_chip_fw_flash_update_req()
	mt76: mt7915: fix injected MPDU transmission to not use HW A-MSDU
	powerpc/64s/hash: Make hash faults work in NMI context
	mt76: mt7615: Fix assigning negative values to unsigned variable
	scsi: aha152x: Fix aha152x_setup() __setup handler return value
	scsi: hisi_sas: Free irq vectors in order for v3 HW
	scsi: hisi_sas: Limit users changing debugfs BIST count value
	net/smc: correct settings of RMB window update limit
	mips: ralink: fix a refcount leak in ill_acc_of_setup()
	macvtap: advertise link netns via netlink
	tuntap: add sanity checks about msg_controllen in sendmsg
	Bluetooth: Fix not checking for valid hdev on bt_dev_{info,warn,err,dbg}
	Bluetooth: use memset avoid memory leaks
	bnxt_en: Eliminate unintended link toggle during FW reset
	PCI: endpoint: Fix misused goto label
	MIPS: fix fortify panic when copying asm exception handlers
	powerpc/64e: Tie PPC_BOOK3E_64 to PPC_FSL_BOOK3E
	powerpc/secvar: fix refcount leak in format_show()
	scsi: libfc: Fix use after free in fc_exch_abts_resp()
	can: isotp: set default value for N_As to 50 micro seconds
	can: etas_es58x: es58x_fd_rx_event_msg(): initialize rx_event_msg before calling es58x_check_msg_len()
	riscv: Fixed misaligned memory access. Fixed pointer comparison.
	net: account alternate interface name memory
	net: limit altnames to 64k total
	net/mlx5e: Remove overzealous validations in netlink EEPROM query
	net: sfp: add 2500base-X quirk for Lantech SFP module
	usb: dwc3: omap: fix "unbalanced disables for smps10_out1" on omap5evm
	mt76: fix monitor mode crash with sdio driver
	xtensa: fix DTC warning unit_address_format
	MIPS: ingenic: correct unit node address
	Bluetooth: Fix use after free in hci_send_acl
	netfilter: conntrack: revisit gc autotuning
	netlabel: fix out-of-bounds memory accesses
	ceph: fix inode reference leakage in ceph_get_snapdir()
	ceph: fix memory leak in ceph_readdir when note_last_dentry returns error
	lib/Kconfig.debug: add ARCH dependency for FUNCTION_ALIGN option
	init/main.c: return 1 from handled __setup() functions
	minix: fix bug when opening a file with O_DIRECT
	clk: si5341: fix reported clk_rate when output divider is 2
	staging: vchiq_arm: Avoid NULL ptr deref in vchiq_dump_platform_instances
	staging: vchiq_core: handle NULL result of find_service_by_handle
	phy: amlogic: phy-meson-gxl-usb2: fix shared reset controller use
	phy: amlogic: meson8b-usb2: Use dev_err_probe()
	phy: amlogic: meson8b-usb2: fix shared reset control use
	clk: rockchip: drop CLK_SET_RATE_PARENT from dclk_vop* on rk3568
	cpufreq: CPPC: Fix performance/frequency conversion
	opp: Expose of-node's name in debugfs
	staging: wfx: fix an error handling in wfx_init_common()
	w1: w1_therm: fixes w1_seq for ds28ea00 sensors
	NFSv4.2: fix reference count leaks in _nfs42_proc_copy_notify()
	NFSv4: Protect the state recovery thread against direct reclaim
	habanalabs: fix possible memory leak in MMU DR fini
	xen: delay xen_hvm_init_time_ops() if kdump is boot on vcpu>=32
	clk: ti: Preserve node in ti_dt_clocks_register()
	clk: Enforce that disjoints limits are invalid
	SUNRPC/call_alloc: async tasks mustn't block waiting for memory
	SUNRPC/xprt: async tasks mustn't block waiting for memory
	SUNRPC: remove scheduling boost for "SWAPPER" tasks.
	NFS: swap IO handling is slightly different for O_DIRECT IO
	NFS: swap-out must always use STABLE writes.
	x86: Annotate call_on_stack()
	x86/Kconfig: Do not allow CONFIG_X86_X32_ABI=y with llvm-objcopy
	serial: samsung_tty: do not unlock port->lock for uart_write_wakeup()
	virtio_console: eliminate anonymous module_init & module_exit
	jfs: prevent NULL deref in diFree
	SUNRPC: Fix socket waits for write buffer space
	NFS: nfsiod should not block forever in mempool_alloc()
	NFS: Avoid writeback threads getting stuck in mempool_alloc()
	selftests: net: Add tls config dependency for tls selftests
	parisc: Fix CPU affinity for Lasi, WAX and Dino chips
	parisc: Fix patch code locking and flushing
	mm: fix race between MADV_FREE reclaim and blkdev direct IO read
	rtc: mc146818-lib: change return values of mc146818_get_time()
	rtc: Check return value from mc146818_get_time()
	rtc: mc146818-lib: fix RTC presence check
	drm/amdgpu: fix off by one in amdgpu_gfx_kiq_acquire()
	Drivers: hv: vmbus: Fix potential crash on module unload
	Revert "NFSv4: Handle the special Linux file open access mode"
	NFSv4: fix open failure with O_ACCMODE flag
	scsi: sr: Fix typo in CDROM(CLOSETRAY|EJECT) handling
	scsi: core: Fix sbitmap depth in scsi_realloc_sdev_budget_map()
	scsi: zorro7xx: Fix a resource leak in zorro7xx_remove_one()
	vdpa/mlx5: Rename control VQ workqueue to vdpa wq
	vdpa/mlx5: Propagate link status from device to vdpa driver
	vdpa: mlx5: prevent cvq work from hogging CPU
	net: sfc: add missing xdp queue reinitialization
	net/tls: fix slab-out-of-bounds bug in decrypt_internal
	vrf: fix packet sniffing for traffic originating from ip tunnels
	skbuff: fix coalescing for page_pool fragment recycling
	ice: Clear default forwarding VSI during VSI release
	mctp: Fix check for dev_hard_header() result
	net: ipv4: fix route with nexthop object delete warning
	net: stmmac: Fix unset max_speed difference between DT and non-DT platforms
	drm/imx: imx-ldb: Check for null pointer after calling kmemdup
	drm/imx: Fix memory leak in imx_pd_connector_get_modes
	drm/imx: dw_hdmi-imx: Fix bailout in error cases of probe
	regulator: rtq2134: Fix missing active_discharge_on setting
	regulator: atc260x: Fix missing active_discharge_on setting
	arch/arm64: Fix topology initialization for core scheduling
	bnxt_en: Synchronize tx when xdp redirects happen on same ring
	bnxt_en: reserve space inside receive page for skb_shared_info
	bnxt_en: Prevent XDP redirect from running when stopping TX queue
	sfc: Do not free an empty page_ring
	RDMA/mlx5: Don't remove cache MRs when a delay is needed
	RDMA/mlx5: Add a missing update of cache->last_add
	IB/cm: Cancel mad on the DREQ event when the state is MRA_REP_RCVD
	IB/rdmavt: add lock to call to rvt_error_qp to prevent a race condition
	sctp: count singleton chunks in assoc user stats
	dpaa2-ptp: Fix refcount leak in dpaa2_ptp_probe
	ice: Set txq_teid to ICE_INVAL_TEID on ring creation
	ice: Do not skip not enabled queues in ice_vc_dis_qs_msg
	ipv6: Fix stats accounting in ip6_pkt_drop
	ice: synchronize_rcu() when terminating rings
	ice: xsk: fix VSI state check in ice_xsk_wakeup()
	net: openvswitch: don't send internal clone attribute to the userspace.
	net: ethernet: mv643xx: Fix over zealous checking of_get_mac_address()
	net: openvswitch: fix leak of nested actions
	rxrpc: fix a race in rxrpc_exit_net()
	net: sfc: fix using uninitialized xdp tx_queue
	net: phy: mscc-miim: reject clause 45 register accesses
	qede: confirm skb is allocated before using
	spi: bcm-qspi: fix MSPI only access with bcm_qspi_exec_mem_op()
	bpf: Support dual-stack sockets in bpf_tcp_check_syncookie
	drbd: Fix five use after free bugs in get_initial_state
	scsi: ufs: ufshpb: Fix a NULL check on list iterator
	io_uring: nospec index for tags on files update
	io_uring: don't touch scm_fp_list after queueing skb
	SUNRPC: Handle ENOMEM in call_transmit_status()
	SUNRPC: Handle low memory situations in call_status()
	SUNRPC: svc_tcp_sendmsg() should handle errors from xdr_alloc_bvec()
	iommu/omap: Fix regression in probe for NULL pointer dereference
	perf: arm-spe: Fix perf report --mem-mode
	perf tools: Fix perf's libperf_print callback
	perf session: Remap buf if there is no space for event
	arm64: Add part number for Arm Cortex-A78AE
	scsi: mpt3sas: Fix use after free in _scsih_expander_node_remove()
	scsi: ufs: ufs-pci: Add support for Intel MTL
	Revert "mmc: sdhci-xenon: fix annoying 1.8V regulator warning"
	mmc: block: Check for errors after write on SPI
	mmc: mmci: stm32: correctly check all elements of sg list
	mmc: renesas_sdhi: don't overwrite TAP settings when HS400 tuning is complete
	mmc: core: Fixup support for writeback-cache for eMMC and SD
	lz4: fix LZ4_decompress_safe_partial read out of bound
	highmem: fix checks in __kmap_local_sched_{in,out}
	mmmremap.c: avoid pointless invalidate_range_start/end on mremap(old_size=0)
	mm/mempolicy: fix mpol_new leak in shared_policy_replace
	io_uring: don't check req->file in io_fsync_prep()
	io_uring: defer splice/tee file validity check until command issue
	io_uring: implement compat handling for IORING_REGISTER_IOWQ_AFF
	io_uring: fix race between timeout flush and removal
	x86/pm: Save the MSR validity status at context setup
	x86/speculation: Restore speculation related MSRs during S3 resume
	perf/x86/intel: Update the FRONTEND MSR mask on Sapphire Rapids
	btrfs: fix qgroup reserve overflow the qgroup limit
	btrfs: prevent subvol with swapfile from being deleted
	spi: core: add dma_map_dev for __spi_unmap_msg()
	arm64: patch_text: Fixup last cpu should be master
	RDMA/hfi1: Fix use-after-free bug for mm struct
	gpio: Restrict usage of GPIO chip irq members before initialization
	x86/msi: Fix msi message data shadow struct
	x86/mm/tlb: Revert retpoline avoidance approach
	perf/x86/intel: Don't extend the pseudo-encoding to GP counters
	ata: sata_dwc_460ex: Fix crash due to OOB write
	perf: qcom_l2_pmu: fix an incorrect NULL check on list iterator
	perf/core: Inherit event_caps
	irqchip/gic-v3: Fix GICR_CTLR.RWP polling
	fbdev: Fix unregistering of framebuffers without device
	amd/display: set backlight only if required
	SUNRPC: Prevent immediate close+reconnect
	drm/panel: ili9341: fix optional regulator handling
	drm/amdgpu/display: change pipe policy for DCN 2.1
	drm/amdgpu/smu10: fix SoC/fclk units in auto mode
	drm/amdgpu/vcn: Fix the register setting for vcn1
	drm/nouveau/pmu: Add missing callbacks for Tegra devices
	drm/amdkfd: Create file descriptor after client is added to smi_clients list
	drm/amdgpu: don't use BACO for reset in S3
	KVM: SVM: Allow AVIC support on system w/ physical APIC ID > 255
	net/smc: send directly on setting TCP_NODELAY
	Revert "selftests: net: Add tls config dependency for tls selftests"
	bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide
	selftests/bpf: Fix u8 narrow load checks for bpf_sk_lookup remote_port
	rtc: mc146818-lib: fix signedness bug in mc146818_get_time()
	SUNRPC: Don't call connect() more than once on a TCP socket
	Revert "nbd: fix possible overflow on 'first_minor' in nbd_dev_add()"
	perf build: Don't use -ffat-lto-objects in the python feature test when building with clang-13
	perf python: Fix probing for some clang command line options
	tools build: Filter out options and warnings not supported by clang
	tools build: Use $(shell ) instead of `` to get embedded libperl's ccopts
	dmaengine: Revert "dmaengine: shdma: Fix runtime PM imbalance on error"
	KVM: avoid NULL pointer dereference in kvm_dirty_ring_push
	Revert "net/mlx5: Accept devlink user input after driver initialization complete"
	ubsan: remove CONFIG_UBSAN_OBJECT_SIZE
	selftests: cgroup: Make cg_create() use 0755 for permission instead of 0644
	selftests: cgroup: Test open-time credential usage for migration checks
	selftests: cgroup: Test open-time cgroup namespace usage for migration checks
	mm: don't skip swap entry even if zap_details specified
	Drivers: hv: vmbus: Replace smp_store_mb() with virt_store_mb()
	x86/bug: Prevent shadowing in __WARN_FLAGS
	sched: Teach the forced-newidle balancer about CPU affinity limitation.
	x86,static_call: Fix __static_call_return0 for i386
	irqchip/gic-v4: Wait for GICR_VPENDBASER.Dirty to clear before descheduling
	powerpc/64: Fix build failure with allyesconfig in book3s_64_entry.S
	irqchip/gic, gic-v3: Prevent GSI to SGI translations
	mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning
	static_call: Don't make __static_call_return0 static
	powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit
	stacktrace: move filter_irq_stacks() to kernel/stacktrace.c
	Linux 5.15.34

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I98049d0d8ebd427296418d31085bfde482ad30e7
2022-04-24 16:57:32 +02:00

386 lines
11 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic stack depot for storing stack traces.
*
* Some debugging tools need to save stack traces of certain events which can
* be later presented to the user. For example, KASAN needs to safe alloc and
* free stacks for each object, but storing two stack traces per object
* requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
* that).
*
* Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
* and free stacks repeat a lot, we save about 100x space.
* Stacks are never removed from depot, so we store them contiguously one after
* another in a contiguous memory allocation.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
* Based on code by Dmitry Chernenkov.
*/
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/memblock.h>
#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
#define STACK_ALLOC_NULL_PROTECTION_BITS 1
#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
#define STACK_ALLOC_ALIGN 4
#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
STACK_ALLOC_ALIGN)
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
#define STACK_ALLOC_SLABS_CAP 8192
#define STACK_ALLOC_MAX_SLABS \
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
(1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
/* The compact structure to store the reference to stacks. */
union handle_parts {
depot_stack_handle_t handle;
struct {
u32 slabindex : STACK_ALLOC_INDEX_BITS;
u32 offset : STACK_ALLOC_OFFSET_BITS;
u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
};
};
struct stack_record {
struct stack_record *next; /* Link in the hashtable */
u32 hash; /* Hash in the hastable */
u32 size; /* Number of frames in the stack */
union handle_parts handle;
unsigned long entries[]; /* Variable-sized array of entries. */
};
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
static int next_slab_inited;
static size_t depot_offset;
static DEFINE_RAW_SPINLOCK(depot_lock);
static bool init_stack_slab(void **prealloc)
{
if (!*prealloc)
return false;
/*
* This smp_load_acquire() pairs with smp_store_release() to
* |next_slab_inited| below and in depot_alloc_stack().
*/
if (smp_load_acquire(&next_slab_inited))
return true;
if (stack_slabs[depot_index] == NULL) {
stack_slabs[depot_index] = *prealloc;
*prealloc = NULL;
} else {
/* If this is the last depot slab, do not touch the next one. */
if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
stack_slabs[depot_index + 1] = *prealloc;
*prealloc = NULL;
}
/*
* This smp_store_release pairs with smp_load_acquire() from
* |next_slab_inited| above and in stack_depot_save().
*/
smp_store_release(&next_slab_inited, 1);
}
return true;
}
/* Allocation of a new stack in raw storage */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
size_t required_size = struct_size(stack, entries, size);
required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
depot_index++;
depot_offset = 0;
/*
* smp_store_release() here pairs with smp_load_acquire() from
* |next_slab_inited| in stack_depot_save() and
* init_stack_slab().
*/
if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
smp_store_release(&next_slab_inited, 0);
}
init_stack_slab(prealloc);
if (stack_slabs[depot_index] == NULL)
return NULL;
stack = stack_slabs[depot_index] + depot_offset;
stack->hash = hash;
stack->size = size;
stack->handle.slabindex = depot_index;
stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
stack->handle.valid = 1;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
depot_offset += required_size;
return stack;
}
#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
#define STACK_HASH_SEED 0x9747b28c
static bool stack_depot_disable;
static struct stack_record **stack_table;
static int __init is_stack_depot_disabled(char *str)
{
int ret;
ret = kstrtobool(str, &stack_depot_disable);
if (!ret && stack_depot_disable) {
pr_info("Stack Depot is disabled\n");
stack_table = NULL;
}
return 0;
}
early_param("stack_depot_disable", is_stack_depot_disabled);
int __init stack_depot_init(void)
{
if (!stack_depot_disable) {
size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
int i;
stack_table = memblock_alloc(size, size);
for (i = 0; i < STACK_HASH_SIZE; i++)
stack_table[i] = NULL;
}
return 0;
}
/* Calculate hash for a stack */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
array_size(size, sizeof(*entries)) / sizeof(u32),
STACK_HASH_SEED);
}
/* Use our own, non-instrumented version of memcmp().
*
* We actually don't care about the order, just the equality.
*/
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
unsigned int n)
{
for ( ; n-- ; u1++, u2++) {
if (*u1 != *u2)
return 1;
}
return 0;
}
/* Find a stack that is equal to the one stored in entries in the hash */
static inline struct stack_record *find_stack(struct stack_record *bucket,
unsigned long *entries, int size,
u32 hash)
{
struct stack_record *found;
for (found = bucket; found; found = found->next) {
if (found->hash == hash &&
found->size == size &&
!stackdepot_memcmp(entries, found->entries, size))
return found;
}
return NULL;
}
/**
* stack_depot_fetch - Fetch stack entries from a depot
*
* @handle: Stack depot handle which was returned from
* stack_depot_save().
* @entries: Pointer to store the entries address
*
* Return: The number of trace entries for this depot.
*/
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
void *slab;
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
struct stack_record *stack;
*entries = NULL;
if (parts.slabindex > depot_index) {
WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
parts.slabindex, depot_index, handle);
return 0;
}
slab = stack_slabs[parts.slabindex];
if (!slab)
return 0;
stack = slab + offset;
*entries = stack->entries;
return stack->size;
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
* __stack_depot_save - Save a stack trace from an array
*
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
* @alloc_flags: Allocation gfp flags
* @can_alloc: Allocate stack slabs (increased chance of failure if false)
*
* Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
* %true, is allowed to replenish the stack slab pool in case no space is left
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
* any allocations and will fail if no space is left to store the stack trace.
*
* If the stack trace in @entries is from an interrupt, only the portion up to
* interrupt entry is saved.
*
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case from contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
* Return: The handle of the stack struct stored in depot, 0 on failure.
*/
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags, bool can_alloc)
{
struct stack_record *found = NULL, **bucket;
depot_stack_handle_t retval = 0;
struct page *page = NULL;
void *prealloc = NULL;
unsigned long flags;
u32 hash;
/*
* If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stackdepot growth.
*
* Because use of filter_irq_stacks() is a requirement to ensure
* stackdepot can efficiently deduplicate interrupt stacks, always
* filter_irq_stacks() to simplify all callers' use of stackdepot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disable)
goto fast_exit;
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
* Fast path: look the stack trace up without locking.
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
found = find_stack(smp_load_acquire(bucket), entries,
nr_entries, hash);
if (found)
goto exit;
/*
* Check if the current or the next stack slab need to be initialized.
* If so, allocate the memory - we won't be able to do that under the
* lock.
*
* The smp_load_acquire() here pairs with smp_store_release() to
* |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
*/
if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
* contexts and I/O.
*/
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
if (page)
prealloc = page_address(page);
}
raw_spin_lock_irqsave(&depot_lock, flags);
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
new->next = *bucket;
/*
* This smp_store_release() pairs with
* smp_load_acquire() from |bucket| above.
*/
smp_store_release(bucket, new);
found = new;
}
} else if (prealloc) {
/*
* We didn't need to store this stack trace, but let's keep
* the preallocated memory for the future.
*/
WARN_ON(!init_stack_slab(&prealloc));
}
raw_spin_unlock_irqrestore(&depot_lock, flags);
exit:
if (prealloc) {
/* Nobody used this memory, ok to free it. */
free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
}
if (found)
retval = found->handle.handle;
fast_exit:
return retval;
}
EXPORT_SYMBOL_GPL(__stack_depot_save);
/**
* stack_depot_save - Save a stack trace from an array
*
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
* @alloc_flags: Allocation gfp flags
*
* Context: Contexts where allocations via alloc_pages() are allowed.
* See __stack_depot_save() for more details.
*
* Return: The handle of the stack struct stored in depot, 0 on failure.
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
return __stack_depot_save(entries, nr_entries, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);