Merge 5.15.56 into android14-5.15

Changes in 5.15.56
	ALSA: hda - Add fixup for Dell Latitidue E5430
	ALSA: hda/conexant: Apply quirk for another HP ProDesk 600 G3 model
	ALSA: hda/realtek: Fix headset mic for Acer SF313-51
	ALSA: hda/realtek - Fix headset mic problem for a HP machine with alc671
	ALSA: hda/realtek: fix mute/micmute LEDs for HP machines
	ALSA: hda/realtek - Fix headset mic problem for a HP machine with alc221
	ALSA: hda/realtek - Enable the headset-mic on a Xiaomi's laptop
	xen/netback: avoid entering xenvif_rx_next_skb() with an empty rx queue
	fix race between exit_itimers() and /proc/pid/timers
	mm: userfaultfd: fix UFFDIO_CONTINUE on fallocated shmem pages
	mm: split huge PUD on wp_huge_pud fallback
	tracing/histograms: Fix memory leak problem
	net: sock: tracing: Fix sock_exceed_buf_limit not to dereference stale pointer
	ip: fix dflt addr selection for connected nexthop
	ARM: 9213/1: Print message about disabled Spectre workarounds only once
	ARM: 9214/1: alignment: advance IT state after emulating Thumb instruction
	wifi: mac80211: fix queue selection for mesh/OCB interfaces
	cgroup: Use separate src/dst nodes when preloading css_sets for migration
	btrfs: return -EAGAIN for NOWAIT dio reads/writes on compressed and inline extents
	drm/panfrost: Put mapping instead of shmem obj on panfrost_mmu_map_fault_addr() error
	drm/panfrost: Fix shrinker list corruption by madvise IOCTL
	fs/remap: constrain dedupe of EOF blocks
	nilfs2: fix incorrect masking of permission flags for symlinks
	sh: convert nommu io{re,un}map() to static inline functions
	Revert "evm: Fix memleak in init_desc"
	xfs: only run COW extent recovery when there are no live extents
	xfs: don't include bnobt blocks when reserving free block pool
	xfs: run callbacks before waking waiters in xlog_state_shutdown_callbacks
	xfs: drop async cache flushes from CIL commits.
	reset: Fix devm bulk optional exclusive control getter
	ARM: dts: imx6qdl-ts7970: Fix ngpio typo and count
	spi: amd: Limit max transfer and message size
	ARM: 9209/1: Spectre-BHB: avoid pr_info() every time a CPU comes out of idle
	ARM: 9210/1: Mark the FDT_FIXED sections as shareable
	net/mlx5e: kTLS, Fix build time constant test in TX
	net/mlx5e: kTLS, Fix build time constant test in RX
	net/mlx5e: Fix enabling sriov while tc nic rules are offloaded
	net/mlx5e: Fix capability check for updating vnic env counters
	net/mlx5e: Ring the TX doorbell on DMA errors
	drm/i915: fix a possible refcount leak in intel_dp_add_mst_connector()
	ima: Fix a potential integer overflow in ima_appraise_measurement
	ASoC: sgtl5000: Fix noise on shutdown/remove
	ASoC: tas2764: Add post reset delays
	ASoC: tas2764: Fix and extend FSYNC polarity handling
	ASoC: tas2764: Correct playback volume range
	ASoC: tas2764: Fix amp gain register offset & default
	ASoC: Intel: Skylake: Correct the ssp rate discovery in skl_get_ssp_clks()
	ASoC: Intel: Skylake: Correct the handling of fmt_config flexible array
	net: stmmac: dwc-qos: Disable split header for Tegra194
	net: ethernet: ti: am65-cpsw: Fix devlink port register sequence
	sysctl: Fix data races in proc_dointvec().
	sysctl: Fix data races in proc_douintvec().
	sysctl: Fix data races in proc_dointvec_minmax().
	sysctl: Fix data races in proc_douintvec_minmax().
	sysctl: Fix data races in proc_doulongvec_minmax().
	sysctl: Fix data races in proc_dointvec_jiffies().
	tcp: Fix a data-race around sysctl_tcp_max_orphans.
	inetpeer: Fix data-races around sysctl.
	net: Fix data-races around sysctl_mem.
	cipso: Fix data-races around sysctl.
	icmp: Fix data-races around sysctl.
	ipv4: Fix a data-race around sysctl_fib_sync_mem.
	ARM: dts: at91: sama5d2: Fix typo in i2s1 node
	ARM: dts: sunxi: Fix SPI NOR campatible on Orange Pi Zero
	arm64: dts: broadcom: bcm4908: Fix timer node for BCM4906 SoC
	arm64: dts: broadcom: bcm4908: Fix cpu node for smp boot
	netfilter: nf_log: incorrect offset to network header
	netfilter: nf_tables: replace BUG_ON by element length check
	drm/i915/gvt: IS_ERR() vs NULL bug in intel_gvt_update_reg_whitelist()
	xen/gntdev: Ignore failure to unmap INVALID_GRANT_HANDLE
	lockd: set fl_owner when unlocking files
	lockd: fix nlm_close_files
	tracing: Fix sleeping while atomic in kdb ftdump
	drm/i915/selftests: fix a couple IS_ERR() vs NULL tests
	drm/i915/dg2: Add Wa_22011100796
	drm/i915/gt: Serialize GRDOM access between multiple engine resets
	drm/i915/gt: Serialize TLB invalidates with GT resets
	drm/i915/uc: correctly track uc_fw init failure
	drm/i915: Require the vm mutex for i915_vma_bind()
	bnxt_en: Fix bnxt_reinit_after_abort() code path
	bnxt_en: Fix bnxt_refclk_read()
	sysctl: Fix data-races in proc_dou8vec_minmax().
	sysctl: Fix data-races in proc_dointvec_ms_jiffies().
	icmp: Fix data-races around sysctl_icmp_echo_enable_probe.
	icmp: Fix a data-race around sysctl_icmp_ignore_bogus_error_responses.
	icmp: Fix a data-race around sysctl_icmp_errors_use_inbound_ifaddr.
	icmp: Fix a data-race around sysctl_icmp_ratelimit.
	icmp: Fix a data-race around sysctl_icmp_ratemask.
	raw: Fix a data-race around sysctl_raw_l3mdev_accept.
	tcp: Fix a data-race around sysctl_tcp_ecn_fallback.
	ipv4: Fix data-races around sysctl_ip_dynaddr.
	nexthop: Fix data-races around nexthop_compat_mode.
	net: ftgmac100: Hold reference returned by of_get_child_by_name()
	net: stmmac: fix leaks in probe
	ima: force signature verification when CONFIG_KEXEC_SIG is configured
	ima: Fix potential memory leak in ima_init_crypto()
	drm/amd/display: Only use depth 36 bpp linebuffers on DCN display engines.
	drm/amd/pm: Prevent divide by zero
	sfc: fix use after free when disabling sriov
	ceph: switch netfs read ops to use rreq->inode instead of rreq->mapping->host
	seg6: fix skb checksum evaluation in SRH encapsulation/insertion
	seg6: fix skb checksum in SRv6 End.B6 and End.B6.Encaps behaviors
	seg6: bpf: fix skb checksum in bpf_push_seg6_encap()
	sfc: fix kernel panic when creating VF
	net: atlantic: remove deep parameter on suspend/resume functions
	net: atlantic: remove aq_nic_deinit() when resume
	KVM: x86: Fully initialize 'struct kvm_lapic_irq' in kvm_pv_kick_cpu_op()
	net/tls: Check for errors in tls_device_init
	ACPI: video: Fix acpi_video_handles_brightness_key_presses()
	mm: sysctl: fix missing numa_stat when !CONFIG_HUGETLB_PAGE
	btrfs: rename btrfs_bio to btrfs_io_context
	btrfs: zoned: fix a leaked bioc in read_zone_info
	ksmbd: use SOCK_NONBLOCK type for kernel_accept()
	powerpc/xive/spapr: correct bitmap allocation size
	vdpa/mlx5: Initialize CVQ vringh only once
	vduse: Tie vduse mgmtdev and its device
	virtio_mmio: Add missing PM calls to freeze/restore
	virtio_mmio: Restore guest page size on resume
	netfilter: br_netfilter: do not skip all hooks with 0 priority
	scsi: hisi_sas: Limit max hw sectors for v3 HW
	cpufreq: pmac32-cpufreq: Fix refcount leak bug
	platform/x86: hp-wmi: Ignore Sanitization Mode event
	firmware: sysfb: Make sysfb_create_simplefb() return a pdev pointer
	firmware: sysfb: Add sysfb_disable() helper function
	fbdev: Disable sysfb device registration when removing conflicting FBs
	net: tipc: fix possible refcount leak in tipc_sk_create()
	NFC: nxp-nci: don't print header length mismatch on i2c error
	nvme-tcp: always fail a request when sending it failed
	nvme: fix regression when disconnect a recovering ctrl
	net: sfp: fix memory leak in sfp_probe()
	ASoC: ops: Fix off by one in range control validation
	pinctrl: aspeed: Fix potential NULL dereference in aspeed_pinmux_set_mux()
	ASoC: Realtek/Maxim SoundWire codecs: disable pm_runtime on remove
	ASoC: rt711-sdca-sdw: fix calibrate mutex initialization
	ASoC: Intel: sof_sdw: handle errors on card registration
	ASoC: rt711: fix calibrate mutex initialization
	ASoC: rt7*-sdw: harden jack_detect_handler
	ASoC: codecs: rt700/rt711/rt711-sdca: initialize workqueues in probe
	ASoC: SOF: Intel: hda-loader: Clarify the cl_dsp_init() flow
	ASoC: wcd938x: Fix event generation for some controls
	ASoC: Intel: bytcr_wm5102: Fix GPIO related probe-ordering problem
	ASoC: wm5110: Fix DRE control
	ASoC: rt711-sdca: fix kernel NULL pointer dereference when IO error
	ASoC: dapm: Initialise kcontrol data for mux/demux controls
	ASoC: cs47l15: Fix event generation for low power mux control
	ASoC: madera: Fix event generation for OUT1 demux
	ASoC: madera: Fix event generation for rate controls
	irqchip: or1k-pic: Undefine mask_ack for level triggered hardware
	x86: Clear .brk area at early boot
	soc: ixp4xx/npe: Fix unused match warning
	ARM: dts: stm32: use the correct clock source for CEC on stm32mp151
	Revert "can: xilinx_can: Limit CANFD brp to 2"
	ALSA: usb-audio: Add quirks for MacroSilicon MS2100/MS2106 devices
	ALSA: usb-audio: Add quirk for Fiero SC-01
	ALSA: usb-audio: Add quirk for Fiero SC-01 (fw v1.0.0)
	nvme-pci: phison e16 has bogus namespace ids
	signal handling: don't use BUG_ON() for debugging
	USB: serial: ftdi_sio: add Belimo device ids
	usb: typec: add missing uevent when partner support PD
	usb: dwc3: gadget: Fix event pending check
	tty: serial: samsung_tty: set dma burst_size to 1
	vt: fix memory overlapping when deleting chars in the buffer
	serial: 8250: fix return error code in serial8250_request_std_resource()
	serial: stm32: Clear prev values before setting RTS delays
	serial: pl011: UPSTAT_AUTORTS requires .throttle/unthrottle
	serial: 8250: Fix PM usage_count for console handover
	x86/pat: Fix x86_has_pat_wp()
	drm/aperture: Run fbdev removal before internal helpers
	Linux 5.15.56

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I763d2a7b49435bf2996b31e201aa9794ab64609e
This commit is contained in:
Greg Kroah-Hartman
2022-07-22 17:43:50 +02:00
189 changed files with 1861 additions and 938 deletions

View File

@@ -13,6 +13,12 @@ EDD Interfaces
.. kernel-doc:: drivers/firmware/edd.c
:internal:
Generic System Framebuffers Interface
-------------------------------------
.. kernel-doc:: drivers/firmware/sysfb.c
:export:
Intel Stratix10 SoC Service Layer
---------------------------------
Some features of the Intel Stratix10 SoC require a level of privilege

View File

@@ -1063,7 +1063,7 @@ cipso_cache_enable - BOOLEAN
cipso_cache_bucket_size - INTEGER
The CIPSO label cache consists of a fixed size hash table with each
hash bucket containing a number of cache entries. This variable limits
the number of entries in each hash bucket; the larger the value the
the number of entries in each hash bucket; the larger the value is, the
more CIPSO label mappings that can be cached. When the number of
entries in a given hash bucket reaches this limit adding new entries
causes the oldest entry in the bucket to be removed to make room.
@@ -1170,7 +1170,7 @@ ip_autobind_reuse - BOOLEAN
option should only be set by experts.
Default: 0
ip_dynaddr - BOOLEAN
ip_dynaddr - INTEGER
If set non-zero, enables support for dynamic addresses.
If set to a non-zero value larger than 1, a kernel log
message will be printed when dynamic address rewriting

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 55
SUBLEVEL = 56
EXTRAVERSION =
NAME = Trick or Treat

View File

@@ -226,7 +226,7 @@
reg = <0x28>;
#gpio-cells = <2>;
gpio-controller;
ngpio = <32>;
ngpios = <62>;
};
sgtl5000: codec@a {

View File

@@ -1125,7 +1125,7 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 55>, <&pmc PMC_TYPE_GCK 55>;
clock-names = "pclk", "gclk";
assigned-clocks = <&pmc PMC_TYPE_CORE PMC_I2S1_MUX>;
assigned-parrents = <&pmc PMC_TYPE_GCK 55>;
assigned-clock-parents = <&pmc PMC_TYPE_GCK 55>;
status = "disabled";
};

View File

@@ -553,7 +553,7 @@
compatible = "st,stm32-cec";
reg = <0x40016000 0x400>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc CEC_K>, <&clk_lse>;
clocks = <&rcc CEC_K>, <&rcc CEC>;
clock-names = "cec", "hdmi-cec";
status = "disabled";
};

View File

@@ -169,7 +169,7 @@
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "mxicy,mx25l1606e", "winbond,w25q128";
compatible = "mxicy,mx25l1606e", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <40000000>;
};

View File

@@ -27,6 +27,7 @@ enum {
MT_HIGH_VECTORS,
MT_MEMORY_RWX,
MT_MEMORY_RW,
MT_MEMORY_RO,
MT_ROM,
MT_MEMORY_RWX_NONCACHED,
MT_MEMORY_RW_DTCM,

View File

@@ -163,5 +163,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
})
/*
* Update ITSTATE after normal execution of an IT block instruction.
*
* The 8 IT state bits are split into two parts in CPSR:
* ITSTATE<1:0> are in CPSR<26:25>
* ITSTATE<7:2> are in CPSR<15:10>
*/
static inline unsigned long it_advance(unsigned long cpsr)
{
if ((cpsr & 0x06000400) == 0) {
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
cpsr &= ~PSR_IT_MASK;
} else {
/* We need to shift left ITSTATE<4:0> */
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
unsigned long it = cpsr & mask;
it <<= 1;
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
it &= mask;
cpsr &= ~mask;
cpsr |= it;
}
return cpsr;
}
#endif /* __ASSEMBLY__ */
#endif

View File

@@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
if (thumb_mode(regs))
regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
return 0;
bad_or_fault:

View File

@@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
@@ -489,6 +496,7 @@ static void __init build_mem_type_table(void)
/* Also setup NX memory mapping */
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN;
}
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/*
@@ -568,6 +576,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#endif
/*
@@ -587,6 +596,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
@@ -647,6 +658,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
@@ -1360,7 +1373,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
map.virtual = FDT_FIXED_BASE;
map.length = FDT_FIXED_SIZE;
map.type = MT_ROM;
map.type = MT_MEMORY_RO;
create_mapping(&map);
}

View File

@@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method)
#else
static unsigned int spectre_v2_install_workaround(unsigned int method)
{
pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
smp_processor_id());
pr_info_once("Spectre V2: workarounds disabled by configuration\n");
return SPECTRE_VULNERABLE;
}
@@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method)
return SPECTRE_VULNERABLE;
spectre_bhb_method = method;
}
pr_info("CPU%u: Spectre BHB: using %s workaround\n",
pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
smp_processor_id(), spectre_bhb_method_name(method));
}
return SPECTRE_MITIGATED;
}

View File

@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/stddef.h>
#include <asm/probes.h>
#include <asm/ptrace.h>
#include <asm/kprobes.h>
void __init arm_probes_decode_init(void);
@@ -35,31 +36,6 @@ void __init find_str_pc_offset(void);
#endif
/*
* Update ITSTATE after normal execution of an IT block instruction.
*
* The 8 IT state bits are split into two parts in CPSR:
* ITSTATE<1:0> are in CPSR<26:25>
* ITSTATE<7:2> are in CPSR<15:10>
*/
static inline unsigned long it_advance(unsigned long cpsr)
{
if ((cpsr & 0x06000400) == 0) {
/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
cpsr &= ~PSR_IT_MASK;
} else {
/* We need to shift left ITSTATE<4:0> */
const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
unsigned long it = cpsr & mask;
it <<= 1;
it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
it &= mask;
cpsr &= ~mask;
cpsr |= it;
}
return cpsr;
}
static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
{
long cpsr = regs->ARM_cpsr;

View File

@@ -9,6 +9,14 @@
/delete-node/ cpu@3;
};
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
};
pmu {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,

View File

@@ -29,6 +29,8 @@
device_type = "cpu";
compatible = "brcm,brahma-b53";
reg = <0x0>;
enable-method = "spin-table";
cpu-release-addr = <0x0 0xfff8>;
next-level-cache = <&l2>;
};

View File

@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
#include <linux/delay.h>
@@ -55,7 +56,7 @@ static int xive_irq_bitmap_add(int base, int count)
spin_lock_init(&xibm->lock);
xibm->base = base;
xibm->count = count;
xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
if (!xibm->bitmap) {
kfree(xibm);
return -ENOMEM;
@@ -73,7 +74,7 @@ static void xive_irq_bitmap_remove_all(void)
list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
list_del(&xibm->list);
kfree(xibm->bitmap);
bitmap_free(xibm->bitmap);
kfree(xibm);
}
}

View File

@@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
#endif /* CONFIG_HAVE_IOREMAP_PROT */
#else /* CONFIG_MMU */
#define iounmap(addr) do { } while (0)
#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset))
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
return (void __iomem *)(unsigned long)offset;
}
static inline void iounmap(volatile void __iomem *addr) { }
#endif /* CONFIG_MMU */
#define ioremap_uc ioremap

View File

@@ -418,6 +418,8 @@ static void __init clear_bss(void)
{
memset(__bss_start, 0,
(unsigned long) __bss_stop - (unsigned long) __bss_start);
memset(__brk_base, 0,
(unsigned long) __brk_limit - (unsigned long) __brk_base);
}
static unsigned long get_cmd_line_ptr(void)

View File

@@ -8713,15 +8713,17 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
*/
static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
{
struct kvm_lapic_irq lapic_irq;
/*
* All other fields are unused for APIC_DM_REMRD, but may be consumed by
* common code, e.g. for tracing. Defer initialization to the compiler.
*/
struct kvm_lapic_irq lapic_irq = {
.delivery_mode = APIC_DM_REMRD,
.dest_mode = APIC_DEST_PHYSICAL,
.shorthand = APIC_DEST_NOSHORT,
.dest_id = apicid,
};
lapic_irq.shorthand = APIC_DEST_NOSHORT;
lapic_irq.dest_mode = APIC_DEST_PHYSICAL;
lapic_irq.level = 0;
lapic_irq.dest_id = apicid;
lapic_irq.msi_redir_hint = false;
lapic_irq.delivery_mode = APIC_DM_REMRD;
kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
}

View File

@@ -78,10 +78,20 @@ static uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
/* Check that the write-protect PAT entry is set for write-protect */
/*
* Check that the write-protect PAT entry is set for write-protect.
* To do this without making assumptions how PAT has been set up (Xen has
* another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache
* mode via the __cachemode2pte_tbl[] into protection bits (those protection
* bits will select a cache mode of WP or better), and then translate the
* protection bits back into the cache mode using __pte2cm_idx() and the
* __pte2cachemode_tbl[] array. This will return the really used cache mode.
*/
bool x86_has_pat_wp(void)
{
return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP];
return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP;
}
enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)

View File

@@ -73,7 +73,7 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
static bool has_backlight;
static bool may_report_brightness_keys;
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
static DEFINE_MUTEX(video_list_lock);
@@ -1224,7 +1224,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
acpi_video_device_find_cap(data);
if (data->cap._BCM && data->cap._BCL)
has_backlight = true;
may_report_brightness_keys = true;
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
@@ -1693,6 +1693,9 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
break;
}
if (keycode)
may_report_brightness_keys = true;
acpi_notifier_call_chain(device, event, 0);
if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) {
@@ -2255,7 +2258,7 @@ void acpi_video_unregister(void)
if (register_count) {
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
has_backlight = false;
may_report_brightness_keys = false;
}
mutex_unlock(&register_count_mutex);
}
@@ -2277,7 +2280,7 @@ void acpi_video_unregister_backlight(void)
bool acpi_video_handles_brightness_key_presses(void)
{
return has_backlight &&
return may_report_brightness_keys &&
(report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);
}
EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses);

View File

@@ -470,6 +470,10 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
if (slew_done_gpio_np)
slew_done_gpio = read_gpio(slew_done_gpio_np);
of_node_put(volt_gpio_np);
of_node_put(freq_gpio_np);
of_node_put(slew_done_gpio_np);
/* If we use the frequency GPIOs, calculate the min/max speeds based
* on the bus frequencies
*/

View File

@@ -34,21 +34,59 @@
#include <linux/screen_info.h>
#include <linux/sysfb.h>
static struct platform_device *pd;
static DEFINE_MUTEX(disable_lock);
static bool disabled;
static bool sysfb_unregister(void)
{
if (IS_ERR_OR_NULL(pd))
return false;
platform_device_unregister(pd);
pd = NULL;
return true;
}
/**
* sysfb_disable() - disable the Generic System Framebuffers support
*
* This disables the registration of system framebuffer devices that match the
* generic drivers that make use of the system framebuffer set up by firmware.
*
* It also unregisters a device if this was already registered by sysfb_init().
*
* Context: The function can sleep. A @disable_lock mutex is acquired to serialize
* against sysfb_init(), that registers a system framebuffer device.
*/
void sysfb_disable(void)
{
mutex_lock(&disable_lock);
sysfb_unregister();
disabled = true;
mutex_unlock(&disable_lock);
}
EXPORT_SYMBOL_GPL(sysfb_disable);
static __init int sysfb_init(void)
{
struct screen_info *si = &screen_info;
struct simplefb_platform_data mode;
struct platform_device *pd;
const char *name;
bool compatible;
int ret;
int ret = 0;
mutex_lock(&disable_lock);
if (disabled)
goto unlock_mutex;
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
ret = sysfb_create_simplefb(si, &mode);
if (!ret)
return 0;
pd = sysfb_create_simplefb(si, &mode);
if (!IS_ERR(pd))
goto unlock_mutex;
}
/* if the FB is incompatible, create a legacy framebuffer device */
@@ -60,8 +98,10 @@ static __init int sysfb_init(void)
name = "platform-framebuffer";
pd = platform_device_alloc(name, 0);
if (!pd)
return -ENOMEM;
if (!pd) {
ret = -ENOMEM;
goto unlock_mutex;
}
sysfb_apply_efi_quirks(pd);
@@ -73,9 +113,11 @@ static __init int sysfb_init(void)
if (ret)
goto err;
return 0;
goto unlock_mutex;
err:
platform_device_put(pd);
unlock_mutex:
mutex_unlock(&disable_lock);
return ret;
}

View File

@@ -57,7 +57,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
return false;
}
__init int sysfb_create_simplefb(const struct screen_info *si,
__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
const struct simplefb_platform_data *mode)
{
struct platform_device *pd;
@@ -76,7 +76,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
base |= (u64)si->ext_lfb_base << 32;
if (!base || (u64)(resource_size_t)base != base) {
printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
return -EINVAL;
return ERR_PTR(-EINVAL);
}
/*
@@ -93,7 +93,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
length = mode->height * mode->stride;
if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return -EINVAL;
return ERR_PTR(-EINVAL);
}
length = PAGE_ALIGN(length);
@@ -104,11 +104,11 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
res.start = base;
res.end = res.start + length - 1;
if (res.end <= res.start)
return -EINVAL;
return ERR_PTR(-EINVAL);
pd = platform_device_alloc("simple-framebuffer", 0);
if (!pd)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
sysfb_apply_efi_quirks(pd);
@@ -124,10 +124,10 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
if (ret)
goto err_put_device;
return 0;
return pd;
err_put_device:
platform_device_put(pd);
return ret;
return ERR_PTR(ret);
}

View File

@@ -1062,12 +1062,13 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* on certain displays, such as the Sharp 4k. 36bpp is needed
* to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
* SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
* precision on at least DCN display engines. However, at least
* Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
* so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
* did not show such problems, so this seems to be the exception.
* precision on DCN display engines, but apparently not for DCE, as
* far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have
* problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
* neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel
* passthrough). Therefore only use 36 bpp on DCN where it is actually needed.
*/
if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
else
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;

View File

@@ -1235,6 +1235,8 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
if (speed == 0)
return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
* for minimum fan speed:

View File

@@ -329,7 +329,20 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const struct drm_driver *req_driver)
{
resource_size_t base, size;
int bar, ret = 0;
int bar, ret;
/*
* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over.
*/
#if IS_REACHABLE(CONFIG_FB)
ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
if (ret)
return ret;
#endif
ret = vga_remove_vgacon(pdev);
if (ret)
return ret;
for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
@@ -339,15 +352,6 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
drm_aperture_detach_drivers(base, size);
}
/*
* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over.
*/
#if IS_REACHABLE(CONFIG_FB)
ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
#endif
if (ret == 0)
ret = vga_remove_vgacon(pdev);
return ret;
return 0;
}
EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);

View File

@@ -817,6 +817,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}

View File

@@ -1060,6 +1060,47 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
return &i915->ggtt;
}
static void reloc_cache_unmap(struct reloc_cache *cache)
{
void *vaddr;
if (!cache->vaddr)
return;
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP)
kunmap_atomic(vaddr);
else
io_mapping_unmap_atomic((void __iomem *)vaddr);
}
static void reloc_cache_remap(struct reloc_cache *cache,
struct drm_i915_gem_object *obj)
{
void *vaddr;
if (!cache->vaddr)
return;
if (cache->vaddr & KMAP) {
struct page *page = i915_gem_object_get_page(obj, cache->page);
vaddr = kmap_atomic(page);
cache->vaddr = unmask_flags(cache->vaddr) |
(unsigned long)vaddr;
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
unsigned long offset;
offset = cache->node.start;
if (!drm_mm_node_allocated(&cache->node))
offset += cache->page << PAGE_SHIFT;
cache->vaddr = (unsigned long)
io_mapping_map_atomic_wc(&ggtt->iomap, offset);
}
}
static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
{
void *vaddr;
@@ -1324,10 +1365,17 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
GRAPHICS_VER(eb->i915) == 6) {
GRAPHICS_VER(eb->i915) == 6 &&
!i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) {
struct i915_vma *vma = target->vma;
reloc_cache_unmap(&eb->reloc_cache);
mutex_lock(&vma->vm->mutex);
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
mutex_unlock(&vma->vm->mutex);
reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
if (err)
return err;
}

View File

@@ -970,6 +970,20 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
for_each_engine(engine, gt, id) {
struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
}
spin_unlock_irq(&uncore->lock);
for_each_engine(engine, gt, id) {
/*
* HW architecture suggest typical invalidation time at 40us,
@@ -984,7 +998,6 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,

View File

@@ -293,7 +293,7 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
return err;
}
static int gen6_reset_engines(struct intel_gt *gt,
static int __gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -322,6 +322,20 @@ static int gen6_reset_engines(struct intel_gt *gt,
return gen6_hw_domain_reset(gt, hw_mask);
}
static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&gt->uncore->lock, flags);
ret = __gen6_reset_engines(gt, engine_mask, retry);
spin_unlock_irqrestore(&gt->uncore->lock, flags);
return ret;
}
static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
{
int vecs_id;
@@ -488,7 +502,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
}
static int gen11_reset_engines(struct intel_gt *gt,
static int __gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -601,8 +615,11 @@ static int gen8_reset_engines(struct intel_gt *gt,
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
intel_engine_mask_t tmp;
unsigned long flags;
int ret;
spin_lock_irqsave(&gt->uncore->lock, flags);
for_each_engine_masked(engine, gt, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
@@ -623,15 +640,26 @@ static int gen8_reset_engines(struct intel_gt *gt,
*/
}
/*
* Wa_22011100796:dg2, whenever Full soft reset is required,
* reset all individual engines firstly, and then do a full soft reset.
*
* This is best effort, so ignore any error from the initial reset.
*/
if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
__gen11_reset_engines(gt, gt->info.engine_mask, 0);
if (GRAPHICS_VER(gt->i915) >= 11)
ret = gen11_reset_engines(gt, engine_mask, retry);
ret = __gen11_reset_engines(gt, engine_mask, retry);
else
ret = gen6_reset_engines(gt, engine_mask, retry);
ret = __gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
for_each_engine_masked(engine, gt, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
spin_unlock_irqrestore(&gt->uncore->lock, flags);
return ret;
}

View File

@@ -153,8 +153,8 @@ static int live_lrc_layout(void *arg)
continue;
hw = shmem_pin_map(engine->default_state);
if (IS_ERR(hw)) {
err = PTR_ERR(hw);
if (!hw) {
err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
@@ -329,8 +329,8 @@ static int live_lrc_fixed(void *arg)
continue;
hw = shmem_pin_map(engine->default_state);
if (IS_ERR(hw)) {
err = PTR_ERR(hw);
if (!hw) {
err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);

View File

@@ -159,6 +159,6 @@ int intel_guc_fw_upload(struct intel_guc *guc)
return 0;
out:
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL);
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}

View File

@@ -191,7 +191,7 @@ int intel_huc_auth(struct intel_huc *huc)
fail:
i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL);
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}

View File

@@ -526,7 +526,7 @@ fail:
i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
err);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
}
@@ -544,7 +544,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
if (err) {
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
}
return err;

View File

@@ -31,11 +31,12 @@ struct intel_gt;
* | | MISSING <--/ | \--> ERROR |
* | fetch | V |
* | | AVAILABLE |
* +------------+- | -+
* +------------+- | \ -+
* | | | \--> INIT FAIL |
* | init | V |
* | | /------> LOADABLE <----<-----------\ |
* +------------+- \ / \ \ \ -+
* | | FAIL <--< \--> TRANSFERRED \ |
* | | LOAD FAIL <--< \--> TRANSFERRED \ |
* | upload | \ / \ / |
* | | \---------/ \--> RUNNING |
* +------------+---------------------------------------------------+
@@ -49,8 +50,9 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
INTEL_UC_FIRMWARE_INIT_FAIL, /* failed to prepare fw objects for load */
INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
};
@@ -121,10 +123,12 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
case INTEL_UC_FIRMWARE_INIT_FAIL:
return "INIT FAIL";
case INTEL_UC_FIRMWARE_LOADABLE:
return "LOADABLE";
case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
case INTEL_UC_FIRMWARE_LOAD_FAIL:
return "LOAD FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
return "TRANSFERRED";
case INTEL_UC_FIRMWARE_RUNNING:
@@ -146,7 +150,8 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
return -ENOENT;
case INTEL_UC_FIRMWARE_ERROR:
return -ENOEXEC;
case INTEL_UC_FIRMWARE_FAIL:
case INTEL_UC_FIRMWARE_INIT_FAIL:
case INTEL_UC_FIRMWARE_LOAD_FAIL:
return -EIO;
case INTEL_UC_FIRMWARE_SELECTED:
return -ESTALE;

View File

@@ -3115,9 +3115,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
continue;
vaddr = shmem_pin_map(engine->default_state);
if (IS_ERR(vaddr)) {
gvt_err("failed to map %s->default state, err:%zd\n",
engine->name, PTR_ERR(vaddr));
if (!vaddr) {
gvt_err("failed to map %s->default state\n",
engine->name);
return;
}

View File

@@ -376,6 +376,7 @@ int i915_vma_bind(struct i915_vma *vma,
u32 bind_flags;
u32 vma_flags;
lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);

View File

@@ -422,7 +422,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list,
list_move_tail(&bo->base.madv_list,
&pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);

View File

@@ -501,7 +501,7 @@ err_map:
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_bo:
drm_gem_object_put(&bo->base.base);
panfrost_gem_mapping_put(bomapping);
return ret;
}

View File

@@ -66,7 +66,6 @@ static struct or1k_pic_dev or1k_pic_level = {
.name = "or1k-PIC-level",
.irq_unmask = or1k_pic_unmask,
.irq_mask = or1k_pic_mask,
.irq_mask_ack = or1k_pic_mask_ack,
},
.handle = handle_level_irq,
.flags = IRQ_LEVEL | IRQ_NOPROBE,

View File

@@ -259,7 +259,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
.brp_min = 2,
.brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
@@ -272,7 +272,7 @@ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
.brp_min = 2,
.brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};

View File

@@ -379,7 +379,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
static int aq_suspend_common(struct device *dev, bool deep)
static int aq_suspend_common(struct device *dev)
{
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
@@ -392,17 +392,15 @@ static int aq_suspend_common(struct device *dev, bool deep)
if (netif_running(nic->ndev))
aq_nic_stop(nic);
if (deep) {
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
aq_nic_set_power(nic);
}
rtnl_unlock();
return 0;
}
static int atl_resume_common(struct device *dev, bool deep)
static int atl_resume_common(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
@@ -415,11 +413,6 @@ static int atl_resume_common(struct device *dev, bool deep)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (deep) {
/* Reinitialize Nic/Vecs objects */
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
}
if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
@@ -444,22 +437,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
return aq_suspend_common(dev, true);
return aq_suspend_common(dev);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
return aq_suspend_common(dev, true);
return aq_suspend_common(dev);
}
static int aq_pm_thaw(struct device *dev)
{
return atl_resume_common(dev, true);
return atl_resume_common(dev);
}
static int aq_pm_resume_restore(struct device *dev)
{
return atl_resume_common(dev, true);
return atl_resume_common(dev);
}
static const struct dev_pm_ops aq_pm_ops = {

View File

@@ -9806,7 +9806,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
fw_reset = true;
else if (bp->fw_health && !bp->fw_health->status_reliable)
bnxt_try_map_fw_health_reg(bp);

View File

@@ -61,14 +61,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 high_before, high_now, low;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return -EIO;
high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
ptp_read_system_prets(sts);
*ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
*ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
if (high_now != high_before) {
ptp_read_system_prets(sts);
low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
}
*ns = ((u64)high_now << 32) | low;
return 0;
}

View File

@@ -1746,6 +1746,19 @@ cleanup_clk:
return rc;
}
static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
{
struct device_node *child_np = of_get_child_by_name(np, name);
bool ret = false;
if (child_np) {
ret = true;
of_node_put(child_np);
}
return ret;
}
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1865,7 +1878,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* Display what we found */
phy_attached_info(phy);
} else if (np && !of_get_child_by_name(np, "mdio")) {
} else if (np && !ftgmac100_has_child_node(np, "mdio")) {
/* Support legacy ASPEED devicetree descriptions that decribe a
* MAC with an embedded MDIO controller but have no "mdio"
* child node. Automatically scan the MDIO bus for available

View File

@@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_rx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
TLS_OFFLOAD_CONTEXT_SIZE_RX);
BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);
*ctx = priv_rx;
}

View File

@@ -68,8 +68,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_tx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
TLS_OFFLOAD_CONTEXT_SIZE_TX);
BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
*ctx = priv_tx;
}

View File

@@ -614,7 +614,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
return;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);

View File

@@ -429,6 +429,26 @@ static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
}
}
static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *wqe;
u16 pi;
/* Must not be called when a MPWQE session is active but empty. */
mlx5e_tx_mpwqe_ensure_complete(sq);
pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
wi = &sq->db.wqe_info[pi];
*wi = (struct mlx5e_tx_wqe_info) {
.num_wqebbs = 1,
};
wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
const struct mlx5e_tx_attr *attr,
@@ -521,6 +541,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
@@ -622,6 +643,13 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_xmit_data txd;
txd.data = skb->data;
txd.len = skb->len;
txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
goto err_unmap;
if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
mlx5e_tx_mpwqe_session_start(sq, eseg);
} else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
@@ -631,18 +659,9 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->xmit_more += xmit_more;
txd.data = skb->data;
txd.len = skb->len;
txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
goto err_unmap;
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
mlx5e_tx_skb_update_hwts_flags(skb);
if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
@@ -664,6 +683,7 @@ err_unmap:
mlx5e_dma_unmap_wqe_err(sq, 1);
sq->stats->dropped++;
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
@@ -1033,5 +1053,6 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
#endif

View File

@@ -11,6 +11,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "fs_core.h"
#include "fs_ft_pool.h"
#include "esw/qos.h"
enum {
@@ -95,8 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
ft_attr.max_fte = table_size;
ft_attr.max_fte = POOL_NEXT_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
@@ -105,6 +105,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
goto out;
}
esw->fdb_table.legacy.fdb = fdb;
table_size = fdb->max_fte;
/* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,

View File

@@ -1932,7 +1932,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
efx_update_sw_stats(efx, stats);
out:
/* releasing a DMA coherent buffer with BH disabled can panic */
spin_unlock_bh(&efx->stats_lock);
efx_nic_free_buffer(efx, &stats_buf);
spin_lock_bh(&efx->stats_lock);
return rc;
}

View File

@@ -408,8 +408,9 @@ fail1:
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int vfs_assigned = pci_vfs_assigned(dev);
int rc = 0;
int i, rc = 0;
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -417,10 +418,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
return -EBUSY;
}
if (!vfs_assigned)
if (!vfs_assigned) {
for (i = 0; i < efx->vf_count; i++)
nic_data->vf[i].pci_dev = NULL;
pci_disable_sriov(dev);
else
} else {
rc = -EBUSY;
}
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;

View File

@@ -361,6 +361,7 @@ bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
data->sph_disable = 1;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)

View File

@@ -273,7 +273,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->tx_delay = tx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
return -EINVAL;
ret = -EINVAL;
goto err_remove_config_dt;
}
}
@@ -283,7 +284,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->rx_delay = rx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
return -EINVAL;
ret = -EINVAL;
goto err_remove_config_dt;
}
}

View File

@@ -2467,7 +2467,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
port->port_id, ret);
goto dl_port_unreg;
}
devlink_port_type_eth_set(dl_port, port->ndev);
}
return ret;
@@ -2514,6 +2513,7 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
struct devlink_port *dl_port;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2530,6 +2530,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
}
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -2542,25 +2546,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
i, ret);
goto err_cleanup_ndev;
}
dl_port = &port->devlink_port;
devlink_port_type_eth_set(dl_port, port->ndev);
}
ret = am65_cpsw_register_notifiers(common);
if (ret)
goto err_cleanup_ndev;
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
goto clean_unregister_notifiers;
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
return 0;
clean_unregister_notifiers:
am65_cpsw_unregister_notifiers(common);
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_unregister_devlink(common);
return ret;
}

View File

@@ -2504,7 +2504,7 @@ static int sfp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
err = devm_add_action(sfp->dev, sfp_cleanup, sfp);
err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp);
if (err < 0)
return err;

View File

@@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue)
queue->rx_copy.completed = &completed_skbs;
while (xenvif_rx_ring_slots_available(queue) &&
!skb_queue_empty(&queue->rx_queue) &&
work_done < RX_BATCH_SIZE) {
xenvif_rx_skb(queue);
work_done++;

View File

@@ -122,7 +122,9 @@ static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy,
skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN);
r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
if (r != frame_len) {
if (r < 0) {
goto fw_read_exit_free_skb;
} else if (r != frame_len) {
nfc_err(&client->dev,
"Invalid frame length: %u (expected %zu)\n",
r, frame_len);
@@ -166,7 +168,9 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy,
return 0;
r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
if (r != header.plen) {
if (r < 0) {
goto nci_read_exit_free_skb;
} else if (r != header.plen) {
nfc_err(&client->dev,
"Invalid frame payload length: %u (expected %u)\n",
r, header.plen);

View File

@@ -4385,6 +4385,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
if (ctrl->ops->stop_ctrl)
ctrl->ops->stop_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);

View File

@@ -495,6 +495,7 @@ struct nvme_ctrl_ops {
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
};

View File

@@ -3337,7 +3337,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },

View File

@@ -1049,6 +1049,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
}
}
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->reconnect_work);
}
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -2230,9 +2238,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->reconnect_work);
nvme_rdma_teardown_io_queues(ctrl, shutdown);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
if (shutdown)
@@ -2282,6 +2287,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
.stop_ctrl = nvme_rdma_stop_ctrl,
};
/*

View File

@@ -1162,7 +1162,6 @@ done:
} else if (ret < 0) {
dev_err(queue->ctrl->ctrl.device,
"failed to send request %d\n", ret);
if (ret != -EPIPE && ret != -ECONNRESET)
nvme_tcp_fail_request(queue->request);
nvme_tcp_done_send_req(queue);
}
@@ -2164,9 +2163,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
nvme_tcp_teardown_io_queues(ctrl, shutdown);
blk_mq_quiesce_queue(ctrl->admin_q);
if (shutdown)
@@ -2206,6 +2202,12 @@ out_fail:
nvme_tcp_reconnect_or_remove(ctrl);
}
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
}
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
@@ -2529,6 +2531,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
.get_address = nvmf_get_address,
.stop_ctrl = nvme_tcp_stop_ctrl,
};
static bool

View File

@@ -236,11 +236,11 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
const struct aspeed_sig_expr **funcs;
const struct aspeed_sig_expr ***prios;
pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
if (!pdesc)
return -EINVAL;
pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
prios = pdesc->prios;
if (!prios)

View File

@@ -63,6 +63,7 @@ enum hp_wmi_event_ids {
HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
HPWMI_SANITIZATION_MODE = 0x17,
};
struct bios_args {
@@ -638,6 +639,8 @@ static void hp_wmi_notify(u32 value, void *context)
break;
case HPWMI_BATTERY_CHARGE_PERIOD:
break;
case HPWMI_SANITIZATION_MODE:
break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;

View File

@@ -2758,6 +2758,7 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
struct hisi_hba *hisi_hba = shost_priv(shost);
struct device *dev = hisi_hba->dev;
int ret = sas_slave_configure(sdev);
unsigned int max_sectors;
if (ret)
return ret;
@@ -2775,6 +2776,12 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
}
}
/* Set according to IOMMU IOVA caching limit */
max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
(PAGE_SIZE * 32) >> SECTOR_SHIFT);
blk_queue_max_hw_sectors(sdev->request_queue, max_sectors);
return 0;
}

View File

@@ -743,7 +743,7 @@ static const struct of_device_id ixp4xx_npe_of_match[] = {
static struct platform_driver ixp4xx_npe_driver = {
.driver = {
.name = "ixp4xx-npe",
.of_match_table = of_match_ptr(ixp4xx_npe_of_match),
.of_match_table = ixp4xx_npe_of_match,
},
.probe = ixp4xx_npe_probe,
.remove = ixp4xx_npe_remove,

View File

@@ -28,6 +28,7 @@
#define AMD_SPI_RX_COUNT_REG 0x4B
#define AMD_SPI_STATUS_REG 0x4C
#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
/* M_CMD OP codes for SPI */
@@ -245,6 +246,11 @@ static int amd_spi_master_transfer(struct spi_master *master,
return 0;
}
static size_t amd_spi_max_transfer_size(struct spi_device *spi)
{
return AMD_SPI_FIFO_SIZE;
}
static int amd_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -275,6 +281,8 @@ static int amd_spi_probe(struct platform_device *pdev)
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = amd_spi_master_setup;
master->transfer_one_message = amd_spi_master_transfer;
master->max_transfer_size = amd_spi_max_transfer_size;
master->max_message_size = amd_spi_max_transfer_size;
/* Register the controller with SPI framework */
err = devm_spi_register_master(dev, master);

View File

@@ -23,6 +23,7 @@
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/tty.h>
#include <linux/ratelimit.h>
#include <linux/tty_flip.h>
@@ -561,6 +562,9 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
up->port.dev = dev;
if (uart_console_enabled(&up->port))
pm_runtime_get_sync(up->port.dev);
serial8250_apply_quirks(up);
uart_add_one_port(drv, &up->port);
}

View File

@@ -2982,8 +2982,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
if (!port->mapbase)
if (!port->mapbase) {
ret = -EINVAL;
break;
}
if (!request_mem_region(port->mapbase, size, "serial")) {
ret = -EBUSY;

View File

@@ -1372,6 +1372,15 @@ static void pl011_stop_rx(struct uart_port *port)
pl011_dma_rx_stop(uap);
}
static void pl011_throttle_rx(struct uart_port *port)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
pl011_stop_rx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void pl011_enable_ms(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1793,9 +1802,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
*/
static void pl011_enable_interrupts(struct uart_amba_port *uap)
{
unsigned long flags;
unsigned int i;
spin_lock_irq(&uap->port.lock);
spin_lock_irqsave(&uap->port.lock, flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
@@ -1817,7 +1827,14 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
spin_unlock_irq(&uap->port.lock);
spin_unlock_irqrestore(&uap->port.lock, flags);
}
static void pl011_unthrottle_rx(struct uart_port *port)
{
struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
pl011_enable_interrupts(uap);
}
static int pl011_startup(struct uart_port *port)
@@ -2245,6 +2262,8 @@ static const struct uart_ops amba_pl011_pops = {
.stop_tx = pl011_stop_tx,
.start_tx = pl011_start_tx,
.stop_rx = pl011_stop_rx,
.throttle = pl011_throttle_rx,
.unthrottle = pl011_unthrottle_rx,
.enable_ms = pl011_enable_ms,
.break_ctl = pl011_break_ctl,
.startup = pl011_startup,

View File

@@ -378,8 +378,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
/* Enable tx dma mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
ucon |= (dma_get_cache_alignment() >= 16) ?
S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1;
ucon |= S3C64XX_UCON_TXBURST_1;
ucon |= S3C64XX_UCON_TXMODE_DMA;
wr_regl(port, S3C2410_UCON, ucon);
@@ -675,7 +674,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
S3C64XX_UCON_DMASUS_EN |
S3C64XX_UCON_TIMEOUT_EN |
S3C64XX_UCON_RXMODE_MASK);
ucon |= S3C64XX_UCON_RXBURST_16 |
ucon |= S3C64XX_UCON_RXBURST_1 |
0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
S3C64XX_UCON_EMPTYINT_EN |
S3C64XX_UCON_TIMEOUT_EN |

View File

@@ -1912,11 +1912,6 @@ static int uart_proc_show(struct seq_file *m, void *v)
}
#endif
static inline bool uart_console_enabled(struct uart_port *port)
{
return uart_console(port) && (port->cons->flags & CON_ENABLED);
}
static void uart_port_spin_lock_init(struct uart_port *port)
{
spin_lock_init(&port->lock);

View File

@@ -71,6 +71,8 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr3 |= USART_CR3_DEM;
over8 = *cr1 & USART_CR1_OVER8;
*cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
if (over8)
rs485_deat_dedt = delay_ADE * baud * 8;
else

View File

@@ -855,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
unsigned short *p = (unsigned short *) vc->vc_pos;
vc_uniscr_delete(vc, nr);
scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;

View File

@@ -4193,7 +4193,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
}
evt->count = 0;
evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
/* Unmask interrupt */
@@ -4206,6 +4205,9 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
/* Keep the clearing of DWC3_EVENT_PENDING at the end */
evt->flags &= ~DWC3_EVENT_PENDING;
return ret;
}

View File

@@ -1023,6 +1023,9 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
/* Belimo Automation devices */
{ USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) },
{ USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) },
/* ICP DAS I-756xU devices */
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },

View File

@@ -1568,6 +1568,12 @@
#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
/*
* Belimo Automation
*/
#define BELIMO_ZTH_PID 0x8050
#define BELIMO_ZIP_PID 0xC811
/*
* Unjo AB
*/

View File

@@ -1718,6 +1718,7 @@ void typec_set_pwr_opmode(struct typec_port *port,
partner->usb_pd = 1;
sysfs_notify(&partner_dev->kobj, NULL,
"supports_usb_power_delivery");
kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE);
}
put_device(partner_dev);
}

View File

@@ -1898,7 +1898,6 @@ static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_control_vq *cvq = &mvdev->cvq;
int err;
int i;
@@ -1908,16 +1907,6 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
goto err_vq;
}
if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
MLX5_CVQ_MAX_ENT, false,
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
(struct vring_used *)(uintptr_t)cvq->device_addr);
if (err)
goto err_vq;
}
return 0;
err_vq:
@@ -2184,6 +2173,21 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
ndev->mvdev.cvq.ready = false;
}
static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_control_vq *cvq = &mvdev->cvq;
int err = 0;
if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
MLX5_CVQ_MAX_ENT, false,
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
(struct vring_used *)(uintptr_t)cvq->device_addr);
return err;
}
static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2194,6 +2198,11 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
err = setup_cvq_vring(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n");
goto err_setup;
}
err = setup_driver(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup driver\n");

View File

@@ -1466,16 +1466,12 @@ static char *vduse_devnode(struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
}
static void vduse_mgmtdev_release(struct device *dev)
{
}
static struct device vduse_mgmtdev = {
.init_name = "vduse",
.release = vduse_mgmtdev_release,
struct vduse_mgmt_dev {
struct vdpa_mgmt_dev mgmt_dev;
struct device dev;
};
static struct vdpa_mgmt_dev mgmt_dev;
static struct vduse_mgmt_dev *vduse_mgmt;
static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
{
@@ -1500,7 +1496,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
}
set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
vdev->vdpa.dma_dev = &vdev->vdpa.dev;
vdev->vdpa.mdev = &mgmt_dev;
vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
return 0;
}
@@ -1545,34 +1541,52 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
static struct vdpa_mgmt_dev mgmt_dev = {
.device = &vduse_mgmtdev,
.id_table = id_table,
.ops = &vdpa_dev_mgmtdev_ops,
};
static void vduse_mgmtdev_release(struct device *dev)
{
struct vduse_mgmt_dev *mgmt_dev;
mgmt_dev = container_of(dev, struct vduse_mgmt_dev, dev);
kfree(mgmt_dev);
}
static int vduse_mgmtdev_init(void)
{
int ret;
ret = device_register(&vduse_mgmtdev);
vduse_mgmt = kzalloc(sizeof(*vduse_mgmt), GFP_KERNEL);
if (!vduse_mgmt)
return -ENOMEM;
ret = dev_set_name(&vduse_mgmt->dev, "vduse");
if (ret) {
kfree(vduse_mgmt);
return ret;
}
vduse_mgmt->dev.release = vduse_mgmtdev_release;
ret = device_register(&vduse_mgmt->dev);
if (ret)
goto dev_reg_err;
vduse_mgmt->mgmt_dev.id_table = id_table;
vduse_mgmt->mgmt_dev.ops = &vdpa_dev_mgmtdev_ops;
vduse_mgmt->mgmt_dev.device = &vduse_mgmt->dev;
ret = vdpa_mgmtdev_register(&vduse_mgmt->mgmt_dev);
if (ret)
device_unregister(&vduse_mgmt->dev);
return ret;
ret = vdpa_mgmtdev_register(&mgmt_dev);
if (ret)
goto err;
return 0;
err:
device_unregister(&vduse_mgmtdev);
dev_reg_err:
put_device(&vduse_mgmt->dev);
return ret;
}
static void vduse_mgmtdev_exit(void)
{
vdpa_mgmtdev_unregister(&mgmt_dev);
device_unregister(&vduse_mgmtdev);
vdpa_mgmtdev_unregister(&vduse_mgmt->mgmt_dev);
device_unregister(&vduse_mgmt->dev);
}
static int vduse_init(void)

View File

@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/sysfb.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/vt.h>
@@ -1779,6 +1780,17 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
do_free = true;
}
/*
* If a driver asked to unregister a platform device registered by
* sysfb, then can be assumed that this is a driver for a display
* that is set up by the system firmware and has a generic driver.
*
* Drivers for devices that don't have a generic driver will never
* ask for this, so let's assume that a real driver for the display
* was already probed and prevent sysfb to register devices later.
*/
sysfb_disable();
mutex_lock(&registration_lock);
do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);

View File

@@ -62,6 +62,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/virtio.h>
@@ -543,6 +544,28 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
.get_shm_region = vm_get_shm_region,
};
#ifdef CONFIG_PM_SLEEP
static int virtio_mmio_freeze(struct device *dev)
{
struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
return virtio_device_freeze(&vm_dev->vdev);
}
static int virtio_mmio_restore(struct device *dev)
{
struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
if (vm_dev->version == 1)
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
return virtio_device_restore(&vm_dev->vdev);
}
static const struct dev_pm_ops virtio_mmio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
};
#endif
static void virtio_mmio_release_dev(struct device *_d)
{
@@ -786,6 +809,9 @@ static struct platform_driver virtio_mmio_driver = {
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,
.acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
#ifdef CONFIG_PM_SLEEP
.pm = &virtio_mmio_pm_ops,
#endif
},
};

View File

@@ -396,13 +396,15 @@ static void __unmap_grant_pages_done(int result,
unsigned int offset = data->unmap_ops - map->unmap_ops;
for (i = 0; i < data->count; i++) {
WARN_ON(map->unmap_ops[offset+i].status);
WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
if (use_ptemod) {
WARN_ON(map->kunmap_ops[offset+i].status);
WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("kunmap handle=%u st=%d\n",
map->kunmap_ops[offset+i].handle,
map->kunmap_ops[offset+i].status);

View File

@@ -1455,7 +1455,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfs_fs_info *fs_info = state->fs_info;
int ret;
u64 length;
struct btrfs_bio *multi = NULL;
struct btrfs_io_context *multi = NULL;
struct btrfs_device *device;
length = len;

View File

@@ -1266,7 +1266,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
return ret;
}
static int do_discard_extent(struct btrfs_bio_stripe *stripe, u64 *bytes)
static int do_discard_extent(struct btrfs_io_stripe *stripe, u64 *bytes)
{
struct btrfs_device *dev = stripe->dev;
struct btrfs_fs_info *fs_info = dev->fs_info;
@@ -1313,22 +1313,21 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 discarded_bytes = 0;
u64 end = bytenr + num_bytes;
u64 cur = bytenr;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
/*
* Avoid races with device replace and make sure our bbio has devices
* Avoid races with device replace and make sure our bioc has devices
* associated to its stripes that don't go away while we are discarding.
*/
btrfs_bio_counter_inc_blocked(fs_info);
while (cur < end) {
struct btrfs_bio_stripe *stripe;
struct btrfs_io_stripe *stripe;
int i;
num_bytes = end - cur;
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, cur,
&num_bytes, &bbio, 0);
&num_bytes, &bioc, 0);
/*
* Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
* -EOPNOTSUPP. For any such error, @num_bytes is not updated,
@@ -1337,8 +1336,8 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
if (ret < 0)
goto out;
stripe = bbio->stripes;
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
stripe = bioc->stripes;
for (i = 0; i < bioc->num_stripes; i++, stripe++) {
u64 bytes;
struct btrfs_device *device = stripe->dev;
@@ -1361,7 +1360,7 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
* And since there are two loops, explicitly
* go to out to avoid confusion.
*/
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
goto out;
}
@@ -1372,7 +1371,7 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
*/
ret = 0;
}
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
cur += num_bytes;
}
out:

View File

@@ -2290,7 +2290,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
struct btrfs_device *dev;
u64 map_length = 0;
u64 sector;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
int ret;
ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
@@ -2304,7 +2304,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
map_length = length;
/*
* Avoid races with device replace and make sure our bbio has devices
* Avoid races with device replace and make sure our bioc has devices
* associated to its stripes that don't go away while we are doing the
* read repair operation.
*/
@@ -2317,28 +2317,28 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
* stripe's dev and sector.
*/
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
&map_length, &bbio, 0);
&map_length, &bioc, 0);
if (ret) {
btrfs_bio_counter_dec(fs_info);
bio_put(bio);
return -EIO;
}
ASSERT(bbio->mirror_num == 1);
ASSERT(bioc->mirror_num == 1);
} else {
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
&map_length, &bbio, mirror_num);
&map_length, &bioc, mirror_num);
if (ret) {
btrfs_bio_counter_dec(fs_info);
bio_put(bio);
return -EIO;
}
BUG_ON(mirror_num != bbio->mirror_num);
BUG_ON(mirror_num != bioc->mirror_num);
}
sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
bio->bi_iter.bi_sector = sector;
dev = bbio->stripes[bbio->mirror_num - 1].dev;
btrfs_put_bbio(bbio);
dev = bioc->stripes[bioc->mirror_num - 1].dev;
btrfs_put_bioc(bioc);
if (!dev || !dev->bdev ||
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
btrfs_bio_counter_dec(fs_info);

View File

@@ -360,7 +360,7 @@ static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
int i;
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_bio_stripe *stripe = &map->stripes[i];
struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev;
set_extent_bits_nowait(&device->alloc_state, stripe->physical,
@@ -375,7 +375,7 @@ static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
int i;
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_bio_stripe *stripe = &map->stripes[i];
struct btrfs_io_stripe *stripe = &map->stripes[i];
struct btrfs_device *device = stripe->dev;
__clear_extent_bit(&device->alloc_state, stripe->physical,

View File

@@ -7957,7 +7957,19 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
em->block_start == EXTENT_MAP_INLINE) {
free_extent_map(em);
ret = -ENOTBLK;
/*
* If we are in a NOWAIT context, return -EAGAIN in order to
* fallback to buffered IO. This is not only because we can
* block with buffered IO (no support for NOWAIT semantics at
* the moment) but also to avoid returning short reads to user
* space - this happens if we were able to read some data from
* previous non-compressed extents and then when we fallback to
* buffered IO, at btrfs_file_read_iter() by calling
* filemap_read(), we fail to fault in pages for the read buffer,
* in which case filemap_read() returns a short read (the number
* of bytes previously read is > 0, so it does not return -EFAULT).
*/
ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
goto unlock_err;
}

View File

@@ -61,7 +61,7 @@ enum btrfs_rbio_ops {
struct btrfs_raid_bio {
struct btrfs_fs_info *fs_info;
struct btrfs_bio *bbio;
struct btrfs_io_context *bioc;
/* while we're doing rmw on a stripe
* we put it into a hash table so we can
@@ -271,7 +271,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
*/
static int rbio_bucket(struct btrfs_raid_bio *rbio)
{
u64 num = rbio->bbio->raid_map[0];
u64 num = rbio->bioc->raid_map[0];
/*
* we shift down quite a bit. We're using byte
@@ -559,8 +559,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
test_bit(RBIO_CACHE_BIT, &cur->flags))
return 0;
if (last->bbio->raid_map[0] !=
cur->bbio->raid_map[0])
if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
return 0;
/* we can't merge with different operations */
@@ -673,7 +672,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) {
if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
continue;
spin_lock(&cur->bio_list_lock);
@@ -838,7 +837,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
}
}
btrfs_put_bbio(rbio->bbio);
btrfs_put_bioc(rbio->bioc);
kfree(rbio);
}
@@ -906,7 +905,7 @@ static void raid_write_end_io(struct bio *bio)
/* OK, we have read all the stripes we need to. */
max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
0 : rbio->bbio->max_errors;
0 : rbio->bioc->max_errors;
if (atomic_read(&rbio->error) > max_errors)
err = BLK_STS_IOERR;
@@ -961,12 +960,12 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* this does not allocate any pages for rbio->pages.
*/
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
struct btrfs_bio *bbio,
struct btrfs_io_context *bioc,
u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
int num_pages = rbio_nr_pages(stripe_len, real_stripes);
int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
void *p;
@@ -987,7 +986,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
spin_lock_init(&rbio->bio_list_lock);
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio;
rbio->bioc = bioc;
rbio->fs_info = fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
@@ -1015,9 +1014,9 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
#undef CONSUME_ALLOC
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
nr_data = real_stripes - 1;
else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
nr_data = real_stripes - 2;
else
BUG();
@@ -1077,10 +1076,10 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
struct bio *last = bio_list->tail;
int ret;
struct bio *bio;
struct btrfs_bio_stripe *stripe;
struct btrfs_io_stripe *stripe;
u64 disk_start;
stripe = &rbio->bbio->stripes[stripe_nr];
stripe = &rbio->bioc->stripes[stripe_nr];
disk_start = stripe->physical + (page_index << PAGE_SHIFT);
/* if the device is missing, just fail this stripe */
@@ -1155,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
int i = 0;
start = bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->bbio->raid_map[0];
stripe_offset = start - rbio->bioc->raid_map[0];
page_index = stripe_offset >> PAGE_SHIFT;
if (bio_flagged(bio, BIO_CLONED))
@@ -1179,7 +1178,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
*/
static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
{
struct btrfs_bio *bbio = rbio->bbio;
struct btrfs_io_context *bioc = rbio->bioc;
void **pointers = rbio->finish_pointers;
int nr_data = rbio->nr_data;
int stripe;
@@ -1284,11 +1283,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
}
}
if (likely(!bbio->num_tgtdevs))
if (likely(!bioc->num_tgtdevs))
goto write_data;
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
if (!bbio->tgtdev_map[stripe])
if (!bioc->tgtdev_map[stripe])
continue;
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
@@ -1302,7 +1301,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
}
ret = rbio_add_io_page(rbio, &bio_list, page,
rbio->bbio->tgtdev_map[stripe],
rbio->bioc->tgtdev_map[stripe],
pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
@@ -1339,12 +1338,12 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
{
u64 physical = bio->bi_iter.bi_sector;
int i;
struct btrfs_bio_stripe *stripe;
struct btrfs_io_stripe *stripe;
physical <<= 9;
for (i = 0; i < rbio->bbio->num_stripes; i++) {
stripe = &rbio->bbio->stripes[i];
for (i = 0; i < rbio->bioc->num_stripes; i++) {
stripe = &rbio->bioc->stripes[i];
if (in_range(physical, stripe->physical, rbio->stripe_len) &&
stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
return i;
@@ -1365,7 +1364,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
int i;
for (i = 0; i < rbio->nr_data; i++) {
u64 stripe_start = rbio->bbio->raid_map[i];
u64 stripe_start = rbio->bioc->raid_map[i];
if (in_range(logical, stripe_start, rbio->stripe_len))
return i;
@@ -1456,7 +1455,7 @@ static void raid_rmw_end_io(struct bio *bio)
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
goto cleanup;
/*
@@ -1538,8 +1537,8 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
}
/*
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
* The bioc may be freed once we submit the last bio. Make sure not to
* touch it after that.
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) {
@@ -1720,16 +1719,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
* our main entry point for writes from the rest of the FS.
*/
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len)
struct btrfs_io_context *bioc, u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
int ret;
rbio = alloc_rbio(fs_info, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bioc, stripe_len);
if (IS_ERR(rbio)) {
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
return PTR_ERR(rbio);
}
bio_list_add(&rbio->bio_list, bio);
@@ -1842,7 +1841,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
}
/* all raid6 handling here */
if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
/*
* single failure, rebuild from parity raid5
* style
@@ -1874,8 +1873,8 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
* here due to a crc mismatch and we can't give them the
* data they want
*/
if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->bbio->raid_map[faila] ==
if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->bioc->raid_map[faila] ==
RAID5_P_STRIPE) {
err = BLK_STS_IOERR;
goto cleanup;
@@ -1887,7 +1886,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
goto pstripe;
}
if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
raid6_datap_recov(rbio->real_stripes,
PAGE_SIZE, faila, pointers);
} else {
@@ -2006,7 +2005,7 @@ static void raid_recover_end_io(struct bio *bio)
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
rbio_orig_end_io(rbio, BLK_STS_IOERR);
else
__raid_recover_end_io(rbio);
@@ -2074,7 +2073,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* were up to date, or we might have no bios to read because
* the devices were gone.
*/
if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) {
__raid_recover_end_io(rbio);
return 0;
} else {
@@ -2083,8 +2082,8 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
}
/*
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
* The bioc may be freed once we submit the last bio. Make sure not to
* touch it after that.
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) {
@@ -2117,21 +2116,21 @@ cleanup:
* of the drive.
*/
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_io_context *bioc, u64 stripe_len,
int mirror_num, int generic_io)
{
struct btrfs_raid_bio *rbio;
int ret;
if (generic_io) {
ASSERT(bbio->mirror_num == mirror_num);
ASSERT(bioc->mirror_num == mirror_num);
btrfs_io_bio(bio)->mirror_num = mirror_num;
}
rbio = alloc_rbio(fs_info, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bioc, stripe_len);
if (IS_ERR(rbio)) {
if (generic_io)
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
return PTR_ERR(rbio);
}
@@ -2142,11 +2141,11 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
btrfs_warn(fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bioc has map_type %llu)",
__func__, bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type);
(u64)bio->bi_iter.bi_size, bioc->map_type);
if (generic_io)
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
kfree(rbio);
return -EIO;
}
@@ -2155,7 +2154,7 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
} else {
btrfs_get_bbio(bbio);
btrfs_get_bioc(bioc);
}
/*
@@ -2214,7 +2213,7 @@ static void read_rebuild_work(struct btrfs_work *work)
/*
* The following code is used to scrub/replace the parity stripe
*
* Caller must have already increased bio_counter for getting @bbio.
* Caller must have already increased bio_counter for getting @bioc.
*
* Note: We need make sure all the pages that add into the scrub/replace
* raid bio are correct and not be changed during the scrub/replace. That
@@ -2223,14 +2222,14 @@ static void read_rebuild_work(struct btrfs_work *work)
struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_io_context *bioc, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
{
struct btrfs_raid_bio *rbio;
int i;
rbio = alloc_rbio(fs_info, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bioc, stripe_len);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
@@ -2242,12 +2241,12 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
/*
* After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
* After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
* to the end position, so this search can start from the first parity
* stripe.
*/
for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
if (bbio->stripes[i].dev == scrub_dev) {
if (bioc->stripes[i].dev == scrub_dev) {
rbio->scrubp = i;
break;
}
@@ -2260,7 +2259,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
/*
* We have already increased bio_counter when getting bbio, record it
* We have already increased bio_counter when getting bioc, record it
* so we can free it at rbio_orig_end_io().
*/
rbio->generic_bio_cnt = 1;
@@ -2275,10 +2274,10 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
int stripe_offset;
int index;
ASSERT(logical >= rbio->bbio->raid_map[0]);
ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
ASSERT(logical >= rbio->bioc->raid_map[0]);
ASSERT(logical + PAGE_SIZE <= rbio->bioc->raid_map[0] +
rbio->stripe_len * rbio->nr_data);
stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
index = stripe_offset >> PAGE_SHIFT;
rbio->bio_pages[index] = page;
}
@@ -2312,7 +2311,7 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check)
{
struct btrfs_bio *bbio = rbio->bbio;
struct btrfs_io_context *bioc = rbio->bioc;
void **pointers = rbio->finish_pointers;
unsigned long *pbitmap = rbio->finish_pbitmap;
int nr_data = rbio->nr_data;
@@ -2335,7 +2334,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
else
BUG();
if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
is_replace = 1;
bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
}
@@ -2435,7 +2434,7 @@ writeback:
page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
ret = rbio_add_io_page(rbio, &bio_list, page,
bbio->tgtdev_map[rbio->scrubp],
bioc->tgtdev_map[rbio->scrubp],
pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
@@ -2483,7 +2482,7 @@ static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
*/
static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
{
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
goto cleanup;
if (rbio->faila >= 0 || rbio->failb >= 0) {
@@ -2504,7 +2503,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
* the data, so the capability of the repair is declined.
* (In the case of RAID5, we can not repair anything)
*/
if (dfail > rbio->bbio->max_errors - 1)
if (dfail > rbio->bioc->max_errors - 1)
goto cleanup;
/*
@@ -2625,8 +2624,8 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
}
/*
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
* The bioc may be freed once we submit the last bio. Make sure not to
* touch it after that.
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) {
@@ -2671,11 +2670,11 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length)
struct btrfs_io_context *bioc, u64 length)
{
struct btrfs_raid_bio *rbio;
rbio = alloc_rbio(fs_info, bbio, length);
rbio = alloc_rbio(fs_info, bioc, length);
if (IS_ERR(rbio))
return NULL;
@@ -2695,7 +2694,7 @@ raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
}
/*
* When we get bbio, we have already increased bio_counter, record it
* When we get bioc, we have already increased bio_counter, record it
* so we can free it at rbio_orig_end_io()
*/
rbio->generic_bio_cnt = 1;

View File

@@ -31,24 +31,24 @@ struct btrfs_raid_bio;
struct btrfs_device;
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_io_context *bioc, u64 stripe_len,
int mirror_num, int generic_io);
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len);
struct btrfs_io_context *bioc, u64 stripe_len);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
u64 logical);
struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_io_context *bioc, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length);
struct btrfs_io_context *bioc, u64 length);
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);

View File

@@ -227,7 +227,7 @@ start_machine:
}
static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
struct btrfs_bio *bbio)
struct btrfs_io_context *bioc)
{
struct btrfs_fs_info *fs_info = dev->fs_info;
int ret;
@@ -275,11 +275,11 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
kref_init(&zone->refcnt);
zone->elems = 0;
zone->device = dev; /* our device always sits at index 0 */
for (i = 0; i < bbio->num_stripes; ++i) {
for (i = 0; i < bioc->num_stripes; ++i) {
/* bounds have already been checked */
zone->devs[i] = bbio->stripes[i].dev;
zone->devs[i] = bioc->stripes[i].dev;
}
zone->ndevs = bbio->num_stripes;
zone->ndevs = bioc->num_stripes;
spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&dev->reada_zones,
@@ -309,7 +309,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
int ret;
struct reada_extent *re = NULL;
struct reada_extent *re_exist = NULL;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
struct btrfs_device *dev;
struct btrfs_device *prev_dev;
u64 length;
@@ -345,28 +345,28 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
*/
length = fs_info->nodesize;
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bbio, 0);
if (ret || !bbio || length < fs_info->nodesize)
&length, &bioc, 0);
if (ret || !bioc || length < fs_info->nodesize)
goto error;
if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
if (bioc->num_stripes > BTRFS_MAX_MIRRORS) {
btrfs_err(fs_info,
"readahead: more than %d copies not supported",
BTRFS_MAX_MIRRORS);
goto error;
}
real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
for (nzones = 0; nzones < real_stripes; ++nzones) {
struct reada_zone *zone;
dev = bbio->stripes[nzones].dev;
dev = bioc->stripes[nzones].dev;
/* cannot read ahead on missing device. */
if (!dev->bdev)
continue;
zone = reada_find_zone(dev, logical, bbio);
zone = reada_find_zone(dev, logical, bioc);
if (!zone)
continue;
@@ -464,7 +464,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
if (!have_zone)
goto error;
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
return re;
error:
@@ -488,7 +488,7 @@ error:
kref_put(&zone->refcnt, reada_zone_release);
spin_unlock(&fs_info->reada_lock);
}
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
kfree(re);
return re_exist;
}

View File

@@ -57,7 +57,7 @@ struct scrub_ctx;
struct scrub_recover {
refcount_t refs;
struct btrfs_bio *bbio;
struct btrfs_io_context *bioc;
u64 map_length;
};
@@ -254,7 +254,7 @@ static void scrub_put_ctx(struct scrub_ctx *sctx);
static inline int scrub_is_page_on_raid56(struct scrub_page *spage)
{
return spage->recover &&
(spage->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
(spage->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
}
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
@@ -798,7 +798,7 @@ static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
{
if (refcount_dec_and_test(&recover->refs)) {
btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(recover->bbio);
btrfs_put_bioc(recover->bioc);
kfree(recover);
}
}
@@ -1027,8 +1027,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
sblock_other = sblocks_for_recheck + mirror_index;
} else {
struct scrub_recover *r = sblock_bad->pagev[0]->recover;
int max_allowed = r->bbio->num_stripes -
r->bbio->num_tgtdevs;
int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
if (mirror_index >= max_allowed)
break;
@@ -1218,14 +1217,14 @@ out:
return 0;
}
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
{
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
return 2;
else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
return 3;
else
return (int)bbio->num_stripes;
return (int)bioc->num_stripes;
}
static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
@@ -1269,7 +1268,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
u64 flags = original_sblock->pagev[0]->flags;
u64 have_csum = original_sblock->pagev[0]->have_csum;
struct scrub_recover *recover;
struct btrfs_bio *bbio;
struct btrfs_io_context *bioc;
u64 sublen;
u64 mapped_length;
u64 stripe_offset;
@@ -1288,7 +1287,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
while (length > 0) {
sublen = min_t(u64, length, fs_info->sectorsize);
mapped_length = sublen;
bbio = NULL;
bioc = NULL;
/*
* With a length of sectorsize, each returned stripe represents
@@ -1296,27 +1295,27 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
*/
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
logical, &mapped_length, &bbio);
if (ret || !bbio || mapped_length < sublen) {
btrfs_put_bbio(bbio);
logical, &mapped_length, &bioc);
if (ret || !bioc || mapped_length < sublen) {
btrfs_put_bioc(bioc);
btrfs_bio_counter_dec(fs_info);
return -EIO;
}
recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
if (!recover) {
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
btrfs_bio_counter_dec(fs_info);
return -ENOMEM;
}
refcount_set(&recover->refs, 1);
recover->bbio = bbio;
recover->bioc = bioc;
recover->map_length = mapped_length;
BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) {
@@ -1348,17 +1347,17 @@ leave_nomem:
sctx->fs_info->csum_size);
scrub_stripe_index_and_offset(logical,
bbio->map_type,
bbio->raid_map,
bioc->map_type,
bioc->raid_map,
mapped_length,
bbio->num_stripes -
bbio->num_tgtdevs,
bioc->num_stripes -
bioc->num_tgtdevs,
mirror_index,
&stripe_index,
&stripe_offset);
spage->physical = bbio->stripes[stripe_index].physical +
spage->physical = bioc->stripes[stripe_index].physical +
stripe_offset;
spage->dev = bbio->stripes[stripe_index].dev;
spage->dev = bioc->stripes[stripe_index].dev;
BUG_ON(page_index >= original_sblock->page_count);
spage->physical_for_dev_replace =
@@ -1401,7 +1400,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
bio->bi_end_io = scrub_bio_wait_endio;
mirror_num = spage->sblock->pagev[0]->mirror_num;
ret = raid56_parity_recover(fs_info, bio, spage->recover->bbio,
ret = raid56_parity_recover(fs_info, bio, spage->recover->bioc,
spage->recover->map_length,
mirror_num, 0);
if (ret)
@@ -2203,7 +2202,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 length = sblock->page_count * PAGE_SIZE;
u64 logical = sblock->pagev[0]->logical;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
struct bio *bio;
struct btrfs_raid_bio *rbio;
int ret;
@@ -2211,19 +2210,19 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bbio);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
&length, &bioc);
if (ret || !bioc || !bioc->raid_map)
goto bioc_out;
if (WARN_ON(!sctx->is_dev_replace ||
!(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
!(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
/*
* We shouldn't be scrubbing a missing device. Even for dev
* replace, we should only get here for RAID 5/6. We either
* managed to mount something with no mirrors remaining or
* there's a bug in scrub_remap_extent()/btrfs_map_block().
*/
goto bbio_out;
goto bioc_out;
}
bio = btrfs_io_bio_alloc(0);
@@ -2231,7 +2230,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
rbio = raid56_alloc_missing_rbio(fs_info, bio, bioc, length);
if (!rbio)
goto rbio_out;
@@ -2249,9 +2248,9 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
rbio_out:
bio_put(bio);
bbio_out:
bioc_out:
btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
@@ -2826,7 +2825,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct bio *bio;
struct btrfs_raid_bio *rbio;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
u64 length;
int ret;
@@ -2838,16 +2837,16 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
&length, &bbio);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
&length, &bioc);
if (ret || !bioc || !bioc->raid_map)
goto bioc_out;
bio = btrfs_io_bio_alloc(0);
bio->bi_iter.bi_sector = sparity->logic_start >> 9;
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;
rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bioc,
length, sparity->scrub_dev,
sparity->dbitmap,
sparity->nsectors);
@@ -2860,9 +2859,9 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
rbio_out:
bio_put(bio);
bbio_out:
bioc_out:
btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
sparity->nsectors);
spin_lock(&sctx->stat_lock);
@@ -2901,7 +2900,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
u64 flags;
int ret;
int slot;
@@ -3044,22 +3043,22 @@ again:
extent_len);
mapped_length = extent_len;
bbio = NULL;
bioc = NULL;
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
extent_logical, &mapped_length, &bbio,
extent_logical, &mapped_length, &bioc,
0);
if (!ret) {
if (!bbio || mapped_length < extent_len)
if (!bioc || mapped_length < extent_len)
ret = -EIO;
}
if (ret) {
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
goto out;
}
extent_physical = bbio->stripes[0].physical;
extent_mirror_num = bbio->mirror_num;
extent_dev = bbio->stripes[0].dev;
btrfs_put_bbio(bbio);
extent_physical = bioc->stripes[0].physical;
extent_mirror_num = bioc->mirror_num;
extent_dev = bioc->stripes[0].dev;
btrfs_put_bioc(bioc);
ret = btrfs_lookup_csums_range(csum_root,
extent_logical,
@@ -4311,20 +4310,20 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
int *extent_mirror_num)
{
u64 mapped_length;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
int ret;
mapped_length = extent_len;
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
&mapped_length, &bbio, 0);
if (ret || !bbio || mapped_length < extent_len ||
!bbio->stripes[0].dev->bdev) {
btrfs_put_bbio(bbio);
&mapped_length, &bioc, 0);
if (ret || !bioc || mapped_length < extent_len ||
!bioc->stripes[0].dev->bdev) {
btrfs_put_bioc(bioc);
return;
}
*extent_physical = bbio->stripes[0].physical;
*extent_mirror_num = bbio->mirror_num;
*extent_dev = bbio->stripes[0].dev;
btrfs_put_bbio(bbio);
*extent_physical = bioc->stripes[0].physical;
*extent_mirror_num = bioc->mirror_num;
*extent_dev = bioc->stripes[0].dev;
btrfs_put_bioc(bioc);
}

View File

@@ -251,7 +251,7 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
struct btrfs_io_context **bioc_ret,
int mirror_num, int need_raid_map);
/*
@@ -5868,7 +5868,7 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
}
/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
{
int i;
int again = 1;
@@ -5877,52 +5877,53 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
again = 0;
for (i = 0; i < num_stripes - 1; i++) {
/* Swap if parity is on a smaller index */
if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
swap(bbio->stripes[i], bbio->stripes[i + 1]);
swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
swap(bioc->stripes[i], bioc->stripes[i + 1]);
swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
again = 1;
}
}
}
}
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
static struct btrfs_io_context *alloc_btrfs_io_context(int total_stripes,
int real_stripes)
{
struct btrfs_bio *bbio = kzalloc(
/* the size of the btrfs_bio */
sizeof(struct btrfs_bio) +
/* plus the variable array for the stripes */
sizeof(struct btrfs_bio_stripe) * (total_stripes) +
/* plus the variable array for the tgt dev */
struct btrfs_io_context *bioc = kzalloc(
/* The size of btrfs_io_context */
sizeof(struct btrfs_io_context) +
/* Plus the variable array for the stripes */
sizeof(struct btrfs_io_stripe) * (total_stripes) +
/* Plus the variable array for the tgt dev */
sizeof(int) * (real_stripes) +
/*
* plus the raid_map, which includes both the tgt dev
* and the stripes
* Plus the raid_map, which includes both the tgt dev
* and the stripes.
*/
sizeof(u64) * (total_stripes),
GFP_NOFS|__GFP_NOFAIL);
atomic_set(&bbio->error, 0);
refcount_set(&bbio->refs, 1);
atomic_set(&bioc->error, 0);
refcount_set(&bioc->refs, 1);
bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
return bbio;
return bioc;
}
void btrfs_get_bbio(struct btrfs_bio *bbio)
void btrfs_get_bioc(struct btrfs_io_context *bioc)
{
WARN_ON(!refcount_read(&bbio->refs));
refcount_inc(&bbio->refs);
WARN_ON(!refcount_read(&bioc->refs));
refcount_inc(&bioc->refs);
}
void btrfs_put_bbio(struct btrfs_bio *bbio)
void btrfs_put_bioc(struct btrfs_io_context *bioc)
{
if (!bbio)
if (!bioc)
return;
if (refcount_dec_and_test(&bbio->refs))
kfree(bbio);
if (refcount_dec_and_test(&bioc->refs))
kfree(bioc);
}
/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
@@ -5932,11 +5933,11 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
*/
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
u64 logical, u64 *length_ret,
struct btrfs_bio **bbio_ret)
struct btrfs_io_context **bioc_ret)
{
struct extent_map *em;
struct map_lookup *map;
struct btrfs_bio *bbio;
struct btrfs_io_context *bioc;
u64 length = *length_ret;
u64 offset;
u64 stripe_nr;
@@ -5955,8 +5956,8 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
int ret = 0;
int i;
/* discard always return a bbio */
ASSERT(bbio_ret);
/* Discard always returns a bioc. */
ASSERT(bioc_ret);
em = btrfs_get_chunk_map(fs_info, logical, length);
if (IS_ERR(em))
@@ -6019,26 +6020,25 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
&stripe_index);
}
bbio = alloc_btrfs_bio(num_stripes, 0);
if (!bbio) {
bioc = alloc_btrfs_io_context(num_stripes, 0);
if (!bioc) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num_stripes; i++) {
bbio->stripes[i].physical =
bioc->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
bbio->stripes[i].dev = map->stripes[stripe_index].dev;
bioc->stripes[i].dev = map->stripes[stripe_index].dev;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
bbio->stripes[i].length = stripes_per_dev *
bioc->stripes[i].length = stripes_per_dev *
map->stripe_len;
if (i / sub_stripes < remaining_stripes)
bbio->stripes[i].length +=
map->stripe_len;
bioc->stripes[i].length += map->stripe_len;
/*
* Special for the first stripe and
@@ -6049,19 +6049,17 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
* off end_off
*/
if (i < sub_stripes)
bbio->stripes[i].length -=
stripe_offset;
bioc->stripes[i].length -= stripe_offset;
if (stripe_index >= last_stripe &&
stripe_index <= (last_stripe +
sub_stripes - 1))
bbio->stripes[i].length -=
stripe_end_offset;
bioc->stripes[i].length -= stripe_end_offset;
if (i == sub_stripes - 1)
stripe_offset = 0;
} else {
bbio->stripes[i].length = length;
bioc->stripes[i].length = length;
}
stripe_index++;
@@ -6071,9 +6069,9 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
}
}
*bbio_ret = bbio;
bbio->map_type = map->type;
bbio->num_stripes = num_stripes;
*bioc_ret = bioc;
bioc->map_type = map->type;
bioc->num_stripes = num_stripes;
out:
free_extent_map(em);
return ret;
@@ -6097,7 +6095,7 @@ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
u64 srcdev_devid, int *mirror_num,
u64 *physical)
{
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
int num_stripes;
int index_srcdev = 0;
int found = 0;
@@ -6106,20 +6104,20 @@ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
int ret = 0;
ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
logical, &length, &bbio, 0, 0);
logical, &length, &bioc, 0, 0);
if (ret) {
ASSERT(bbio == NULL);
ASSERT(bioc == NULL);
return ret;
}
num_stripes = bbio->num_stripes;
num_stripes = bioc->num_stripes;
if (*mirror_num > num_stripes) {
/*
* BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
* that means that the requested area is not left of the left
* cursor
*/
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
return -EIO;
}
@@ -6129,7 +6127,7 @@ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
* pointer to the one of the target drive.
*/
for (i = 0; i < num_stripes; i++) {
if (bbio->stripes[i].dev->devid != srcdev_devid)
if (bioc->stripes[i].dev->devid != srcdev_devid)
continue;
/*
@@ -6137,15 +6135,15 @@ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
* mirror with the lowest physical address
*/
if (found &&
physical_of_found <= bbio->stripes[i].physical)
physical_of_found <= bioc->stripes[i].physical)
continue;
index_srcdev = i;
found = 1;
physical_of_found = bbio->stripes[i].physical;
physical_of_found = bioc->stripes[i].physical;
}
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
ASSERT(found);
if (!found)
@@ -6176,12 +6174,12 @@ static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
}
static void handle_ops_on_dev_replace(enum btrfs_map_op op,
struct btrfs_bio **bbio_ret,
struct btrfs_io_context **bioc_ret,
struct btrfs_dev_replace *dev_replace,
u64 logical,
int *num_stripes_ret, int *max_errors_ret)
{
struct btrfs_bio *bbio = *bbio_ret;
struct btrfs_io_context *bioc = *bioc_ret;
u64 srcdev_devid = dev_replace->srcdev->devid;
int tgtdev_indexes = 0;
int num_stripes = *num_stripes_ret;
@@ -6211,17 +6209,17 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
*/
index_where_to_add = num_stripes;
for (i = 0; i < num_stripes; i++) {
if (bbio->stripes[i].dev->devid == srcdev_devid) {
if (bioc->stripes[i].dev->devid == srcdev_devid) {
/* write to new disk, too */
struct btrfs_bio_stripe *new =
bbio->stripes + index_where_to_add;
struct btrfs_bio_stripe *old =
bbio->stripes + i;
struct btrfs_io_stripe *new =
bioc->stripes + index_where_to_add;
struct btrfs_io_stripe *old =
bioc->stripes + i;
new->physical = old->physical;
new->length = old->length;
new->dev = dev_replace->tgtdev;
bbio->tgtdev_map[i] = index_where_to_add;
bioc->tgtdev_map[i] = index_where_to_add;
index_where_to_add++;
max_errors++;
tgtdev_indexes++;
@@ -6241,30 +6239,29 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
* full copy of the source drive.
*/
for (i = 0; i < num_stripes; i++) {
if (bbio->stripes[i].dev->devid == srcdev_devid) {
if (bioc->stripes[i].dev->devid == srcdev_devid) {
/*
* In case of DUP, in order to keep it simple,
* only add the mirror with the lowest physical
* address
*/
if (found &&
physical_of_found <=
bbio->stripes[i].physical)
physical_of_found <= bioc->stripes[i].physical)
continue;
index_srcdev = i;
found = 1;
physical_of_found = bbio->stripes[i].physical;
physical_of_found = bioc->stripes[i].physical;
}
}
if (found) {
struct btrfs_bio_stripe *tgtdev_stripe =
bbio->stripes + num_stripes;
struct btrfs_io_stripe *tgtdev_stripe =
bioc->stripes + num_stripes;
tgtdev_stripe->physical = physical_of_found;
tgtdev_stripe->length =
bbio->stripes[index_srcdev].length;
bioc->stripes[index_srcdev].length;
tgtdev_stripe->dev = dev_replace->tgtdev;
bbio->tgtdev_map[index_srcdev] = num_stripes;
bioc->tgtdev_map[index_srcdev] = num_stripes;
tgtdev_indexes++;
num_stripes++;
@@ -6273,8 +6270,8 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
*num_stripes_ret = num_stripes;
*max_errors_ret = max_errors;
bbio->num_tgtdevs = tgtdev_indexes;
*bbio_ret = bbio;
bioc->num_tgtdevs = tgtdev_indexes;
*bioc_ret = bioc;
}
static bool need_full_stripe(enum btrfs_map_op op)
@@ -6377,7 +6374,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
struct btrfs_io_context **bioc_ret,
int mirror_num, int need_raid_map)
{
struct extent_map *em;
@@ -6392,7 +6389,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
int num_stripes;
int max_errors = 0;
int tgtdev_indexes = 0;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int dev_replace_is_ongoing = 0;
int num_alloc_stripes;
@@ -6401,7 +6398,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
u64 raid56_full_stripe_start = (u64)-1;
struct btrfs_io_geometry geom;
ASSERT(bbio_ret);
ASSERT(bioc_ret);
ASSERT(op != BTRFS_MAP_DISCARD);
em = btrfs_get_chunk_map(fs_info, logical, *length);
@@ -6545,20 +6542,20 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
tgtdev_indexes = num_stripes;
}
bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
if (!bbio) {
bioc = alloc_btrfs_io_context(num_alloc_stripes, tgtdev_indexes);
if (!bioc) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num_stripes; i++) {
bbio->stripes[i].physical = map->stripes[stripe_index].physical +
bioc->stripes[i].physical = map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
bbio->stripes[i].dev = map->stripes[stripe_index].dev;
bioc->stripes[i].dev = map->stripes[stripe_index].dev;
stripe_index++;
}
/* build raid_map */
/* Build raid_map */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
(need_full_stripe(op) || mirror_num > 1)) {
u64 tmp;
@@ -6570,15 +6567,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
/* Fill in the logical address of each stripe */
tmp = stripe_nr * data_stripes;
for (i = 0; i < data_stripes; i++)
bbio->raid_map[(i+rot) % num_stripes] =
bioc->raid_map[(i + rot) % num_stripes] =
em->start + (tmp + i) * map->stripe_len;
bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
bbio->raid_map[(i+rot+1) % num_stripes] =
bioc->raid_map[(i + rot + 1) % num_stripes] =
RAID6_Q_STRIPE;
sort_parity_stripes(bbio, num_stripes);
sort_parity_stripes(bioc, num_stripes);
}
if (need_full_stripe(op))
@@ -6586,15 +6583,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
need_full_stripe(op)) {
handle_ops_on_dev_replace(op, &bbio, dev_replace, logical,
handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
&num_stripes, &max_errors);
}
*bbio_ret = bbio;
bbio->map_type = map->type;
bbio->num_stripes = num_stripes;
bbio->max_errors = max_errors;
bbio->mirror_num = mirror_num;
*bioc_ret = bioc;
bioc->map_type = map->type;
bioc->num_stripes = num_stripes;
bioc->max_errors = max_errors;
bioc->mirror_num = mirror_num;
/*
* this is the case that REQ_READ && dev_replace_is_ongoing &&
@@ -6603,9 +6600,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
*/
if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
WARN_ON(num_stripes > 1);
bbio->stripes[0].dev = dev_replace->tgtdev;
bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
bbio->mirror_num = map->num_stripes + 1;
bioc->stripes[0].dev = dev_replace->tgtdev;
bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
bioc->mirror_num = map->num_stripes + 1;
}
out:
if (dev_replace_is_ongoing) {
@@ -6619,40 +6616,40 @@ out:
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num)
struct btrfs_io_context **bioc_ret, int mirror_num)
{
if (op == BTRFS_MAP_DISCARD)
return __btrfs_map_block_for_discard(fs_info, logical,
length, bbio_ret);
length, bioc_ret);
return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
mirror_num, 0);
}
/* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret)
struct btrfs_io_context **bioc_ret)
{
return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
}
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio)
{
bio->bi_private = bbio->private;
bio->bi_end_io = bbio->end_io;
bio->bi_private = bioc->private;
bio->bi_end_io = bioc->end_io;
bio_endio(bio);
btrfs_put_bbio(bbio);
btrfs_put_bioc(bioc);
}
static void btrfs_end_bio(struct bio *bio)
{
struct btrfs_bio *bbio = bio->bi_private;
struct btrfs_io_context *bioc = bio->bi_private;
int is_orig_bio = 0;
if (bio->bi_status) {
atomic_inc(&bbio->error);
atomic_inc(&bioc->error);
if (bio->bi_status == BLK_STS_IOERR ||
bio->bi_status == BLK_STS_TARGET) {
struct btrfs_device *dev = btrfs_io_bio(bio)->device;
@@ -6670,22 +6667,22 @@ static void btrfs_end_bio(struct bio *bio)
}
}
if (bio == bbio->orig_bio)
if (bio == bioc->orig_bio)
is_orig_bio = 1;
btrfs_bio_counter_dec(bbio->fs_info);
btrfs_bio_counter_dec(bioc->fs_info);
if (atomic_dec_and_test(&bbio->stripes_pending)) {
if (atomic_dec_and_test(&bioc->stripes_pending)) {
if (!is_orig_bio) {
bio_put(bio);
bio = bbio->orig_bio;
bio = bioc->orig_bio;
}
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
btrfs_io_bio(bio)->mirror_num = bioc->mirror_num;
/* only send an error to the higher layers if it is
* beyond the tolerance of the btrfs bio
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
if (atomic_read(&bioc->error) > bioc->max_errors) {
bio->bi_status = BLK_STS_IOERR;
} else {
/*
@@ -6695,18 +6692,18 @@ static void btrfs_end_bio(struct bio *bio)
bio->bi_status = BLK_STS_OK;
}
btrfs_end_bbio(bbio, bio);
btrfs_end_bioc(bioc, bio);
} else if (!is_orig_bio) {
bio_put(bio);
}
}
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
u64 physical, struct btrfs_device *dev)
{
struct btrfs_fs_info *fs_info = bbio->fs_info;
struct btrfs_fs_info *fs_info = bioc->fs_info;
bio->bi_private = bbio;
bio->bi_private = bioc;
btrfs_io_bio(bio)->device = dev;
bio->bi_end_io = btrfs_end_bio;
bio->bi_iter.bi_sector = physical >> 9;
@@ -6736,20 +6733,20 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
btrfsic_submit_bio(bio);
}
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)
{
atomic_inc(&bbio->error);
if (atomic_dec_and_test(&bbio->stripes_pending)) {
atomic_inc(&bioc->error);
if (atomic_dec_and_test(&bioc->stripes_pending)) {
/* Should be the original bio. */
WARN_ON(bio != bbio->orig_bio);
WARN_ON(bio != bioc->orig_bio);
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
btrfs_io_bio(bio)->mirror_num = bioc->mirror_num;
bio->bi_iter.bi_sector = logical >> 9;
if (atomic_read(&bbio->error) > bbio->max_errors)
if (atomic_read(&bioc->error) > bioc->max_errors)
bio->bi_status = BLK_STS_IOERR;
else
bio->bi_status = BLK_STS_OK;
btrfs_end_bbio(bbio, bio);
btrfs_end_bioc(bioc, bio);
}
}
@@ -6764,35 +6761,35 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int ret;
int dev_nr;
int total_devs;
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
length = bio->bi_iter.bi_size;
map_length = length;
btrfs_bio_counter_inc_blocked(fs_info);
ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
&map_length, &bbio, mirror_num, 1);
&map_length, &bioc, mirror_num, 1);
if (ret) {
btrfs_bio_counter_dec(fs_info);
return errno_to_blk_status(ret);
}
total_devs = bbio->num_stripes;
bbio->orig_bio = first_bio;
bbio->private = first_bio->bi_private;
bbio->end_io = first_bio->bi_end_io;
bbio->fs_info = fs_info;
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
total_devs = bioc->num_stripes;
bioc->orig_bio = first_bio;
bioc->private = first_bio->bi_private;
bioc->end_io = first_bio->bi_end_io;
bioc->fs_info = fs_info;
atomic_set(&bioc->stripes_pending, bioc->num_stripes);
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
ret = raid56_parity_write(fs_info, bio, bbio,
ret = raid56_parity_write(fs_info, bio, bioc,
map_length);
} else {
ret = raid56_parity_recover(fs_info, bio, bbio,
ret = raid56_parity_recover(fs_info, bio, bioc,
map_length, mirror_num, 1);
}
@@ -6808,12 +6805,12 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
}
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
dev = bioc->stripes[dev_nr].dev;
if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
&dev->dev_state) ||
(btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
bbio_error(bbio, first_bio, logical);
bioc_error(bioc, first_bio, logical);
continue;
}
@@ -6822,7 +6819,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
else
bio = first_bio;
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
}
btrfs_bio_counter_dec(fs_info);
return BLK_STS_OK;

View File

@@ -306,11 +306,11 @@ struct btrfs_fs_devices {
/*
* we need the mirror number and stripe index to be passed around
* the call chain while we are processing end_io (especially errors).
* Really, what we need is a btrfs_bio structure that has this info
* Really, what we need is a btrfs_io_context structure that has this info
* and is properly sized with its stripe array, but we're not there
* quite yet. We have our own btrfs bioset, and all of the bios
* we allocate are actually btrfs_io_bios. We'll cram as much of
* struct btrfs_bio as we can into this over time.
* struct btrfs_io_context as we can into this over time.
*/
struct btrfs_io_bio {
unsigned int mirror_num;
@@ -339,13 +339,29 @@ static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
}
}
struct btrfs_bio_stripe {
struct btrfs_io_stripe {
struct btrfs_device *dev;
u64 physical;
u64 length; /* only used for discard mappings */
};
struct btrfs_bio {
/*
* Context for IO subsmission for device stripe.
*
* - Track the unfinished mirrors for mirror based profiles
* Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
*
* - Contain the logical -> physical mapping info
* Used by submit_stripe_bio() for mapping logical bio
* into physical device address.
*
* - Contain device replace info
* Used by handle_ops_on_dev_replace() to copy logical bios
* into the new device.
*
* - Contain RAID56 full stripe logical bytenrs
*/
struct btrfs_io_context {
refcount_t refs;
atomic_t stripes_pending;
struct btrfs_fs_info *fs_info;
@@ -365,7 +381,7 @@ struct btrfs_bio {
* so raid_map[0] is the start of our full stripe
*/
u64 *raid_map;
struct btrfs_bio_stripe stripes[];
struct btrfs_io_stripe stripes[];
};
struct btrfs_device_info {
@@ -400,11 +416,11 @@ struct map_lookup {
int num_stripes;
int sub_stripes;
int verified_stripes; /* For mount time dev extent verification */
struct btrfs_bio_stripe stripes[];
struct btrfs_io_stripe stripes[];
};
#define map_lookup_size(n) (sizeof(struct map_lookup) + \
(sizeof(struct btrfs_bio_stripe) * (n)))
(sizeof(struct btrfs_io_stripe) * (n)))
struct btrfs_balance_args;
struct btrfs_balance_progress;
@@ -457,14 +473,14 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
}
}
void btrfs_get_bbio(struct btrfs_bio *bbio);
void btrfs_put_bbio(struct btrfs_bio *bbio);
void btrfs_get_bioc(struct btrfs_io_context *bioc);
void btrfs_put_bioc(struct btrfs_io_context *bioc);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num);
struct btrfs_io_context **bioc_ret, int mirror_num);
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret);
struct btrfs_io_context **bioc_ret);
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
enum btrfs_map_op op, u64 logical,
struct btrfs_io_geometry *io_geom);

View File

@@ -1502,27 +1502,29 @@ int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 len
static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
struct blk_zone *zone)
{
struct btrfs_bio *bbio = NULL;
struct btrfs_io_context *bioc = NULL;
u64 mapped_length = PAGE_SIZE;
unsigned int nofs_flag;
int nmirrors;
int i, ret;
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&mapped_length, &bbio);
if (ret || !bbio || mapped_length < PAGE_SIZE) {
btrfs_put_bbio(bbio);
return -EIO;
&mapped_length, &bioc);
if (ret || !bioc || mapped_length < PAGE_SIZE) {
ret = -EIO;
goto out_put_bioc;
}
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
return -EINVAL;
if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = -EINVAL;
goto out_put_bioc;
}
nofs_flag = memalloc_nofs_save();
nmirrors = (int)bbio->num_stripes;
nmirrors = (int)bioc->num_stripes;
for (i = 0; i < nmirrors; i++) {
u64 physical = bbio->stripes[i].physical;
struct btrfs_device *dev = bbio->stripes[i].dev;
u64 physical = bioc->stripes[i].physical;
struct btrfs_device *dev = bioc->stripes[i].dev;
/* Missing device */
if (!dev->bdev)
@@ -1535,7 +1537,8 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
break;
}
memalloc_nofs_restore(nofs_flag);
out_put_bioc:
btrfs_put_bioc(bioc);
return ret;
}

View File

@@ -179,7 +179,7 @@ static int ceph_releasepage(struct page *page, gfp_t gfp)
static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
{
struct inode *inode = rreq->mapping->host;
struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_layout *lo = &ci->i_layout;
u32 blockoff;
@@ -196,7 +196,7 @@ static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq)
{
struct inode *inode = subreq->rreq->mapping->host;
struct inode *inode = subreq->rreq->inode;
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u64 objno, objoff;
@@ -242,7 +242,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq)
{
struct netfs_read_request *rreq = subreq->rreq;
struct inode *inode = rreq->mapping->host;
struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req;

View File

@@ -1301,7 +1301,7 @@ int begin_new_exec(struct linux_binprm * bprm)
bprm->mm = NULL;
#ifdef CONFIG_POSIX_TIMERS
exit_itimers(me->signal);
exit_itimers(me);
flush_itimer_signals();
#endif

View File

@@ -230,7 +230,7 @@ static int ksmbd_kthread_fn(void *p)
break;
}
ret = kernel_accept(iface->ksmbd_socket, &client_sk,
O_NONBLOCK);
SOCK_NONBLOCK);
mutex_unlock(&iface->sock_release_lock);
if (ret) {
if (ret == -EAGAIN)

Some files were not shown because too many files have changed in this diff Show More