Merge 5.15.36 into android14-5.15

Changes in 5.15.36
	fs: remove __sync_filesystem
	block: remove __sync_blockdev
	block: simplify the block device syncing code
	vfs: make sync_filesystem return errors from ->sync_fs
	xfs: return errors in xfs_fs_sync_fs
	dma-mapping: remove bogus test for pfn_valid from dma_map_resource
	arm64/mm: drop HAVE_ARCH_PFN_VALID
	etherdevice: Adjust ether_addr* prototypes to silence -Wstringop-overead
	mm: page_alloc: fix building error on -Werror=array-compare
	perf tools: Fix segfault accessing sample_id xyarray
	mm, kfence: support kmem_dump_obj() for KFENCE objects
	gfs2: assign rgrp glock before compute_bitstructs
	scsi: ufs: core: scsi_get_lba() error fix
	net/sched: cls_u32: fix netns refcount changes in u32_change()
	ALSA: usb-audio: Clear MIDI port active flag after draining
	ALSA: hda/realtek: Add quirk for Clevo NP70PNP
	ASoC: atmel: Remove system clock tree configuration for at91sam9g20ek
	ASoC: topology: Correct error handling in soc_tplg_dapm_widget_create()
	ASoC: rk817: Use devm_clk_get() in rk817_platform_probe
	ASoC: msm8916-wcd-digital: Check failure for devm_snd_soc_register_component
	ASoC: codecs: wcd934x: do not switch off SIDO Buck when codec is in use
	dmaengine: idxd: fix device cleanup on disable
	dmaengine: imx-sdma: Fix error checking in sdma_event_remap
	dmaengine: mediatek:Fix PM usage reference leak of mtk_uart_apdma_alloc_chan_resources
	dmaengine: dw-edma: Fix unaligned 64bit access
	spi: spi-mtk-nor: initialize spi controller after resume
	esp: limit skb_page_frag_refill use to a single page
	spi: cadence-quadspi: fix incorrect supports_op() return value
	igc: Fix infinite loop in release_swfw_sync
	igc: Fix BUG: scheduling while atomic
	igc: Fix suspending when PTM is active
	ALSA: hda/hdmi: fix warning about PCM count when used with SOF
	rxrpc: Restore removed timer deletion
	net/smc: Fix sock leak when release after smc_shutdown()
	net/packet: fix packet_sock xmit return value checking
	ip6_gre: Avoid updating tunnel->tun_hlen in __gre6_xmit()
	ip6_gre: Fix skb_under_panic in __gre6_xmit()
	net: restore alpha order to Ethernet devices in config
	net/sched: cls_u32: fix possible leak in u32_init_knode()
	l3mdev: l3mdev_master_upper_ifindex_by_index_rcu should be using netdev_master_upper_dev_get_rcu
	ipv6: make ip6_rt_gc_expire an atomic_t
	can: isotp: stop timeout monitoring when no first frame was sent
	net: dsa: hellcreek: Calculate checksums in tagger
	net: mscc: ocelot: fix broken IP multicast flooding
	netlink: reset network and mac headers in netlink_dump()
	drm/i915/display/psr: Unset enable_psr2_sel_fetch if other checks in intel_psr2_config_valid() fails
	net: stmmac: Use readl_poll_timeout_atomic() in atomic state
	dmaengine: idxd: add RO check for wq max_batch_size write
	dmaengine: idxd: add RO check for wq max_transfer_size write
	dmaengine: idxd: skip clearing device context when device is read-only
	selftests: mlxsw: vxlan_flooding: Prevent flooding of unwanted packets
	arm64: mm: fix p?d_leaf()
	ARM: vexpress/spc: Avoid negative array index when !SMP
	reset: renesas: Check return value of reset_control_deassert()
	reset: tegra-bpmp: Restore Handle errors in BPMP response
	platform/x86: samsung-laptop: Fix an unsigned comparison which can never be negative
	ALSA: usb-audio: Fix undefined behavior due to shift overflowing the constant
	drm/msm/disp: check the return value of kzalloc()
	arm64: dts: imx: Fix imx8*-var-som touchscreen property sizes
	vxlan: fix error return code in vxlan_fdb_append
	cifs: Check the IOCB_DIRECT flag, not O_DIRECT
	net: atlantic: Avoid out-of-bounds indexing
	mt76: Fix undefined behavior due to shift overflowing the constant
	brcmfmac: sdio: Fix undefined behavior due to shift overflowing the constant
	dpaa_eth: Fix missing of_node_put in dpaa_get_ts_info()
	drm/msm/mdp5: check the return of kzalloc()
	net: macb: Restart tx only if queue pointer is lagging
	scsi: iscsi: Release endpoint ID when its freed
	scsi: iscsi: Merge suspend fields
	scsi: iscsi: Fix NOP handling during conn recovery
	scsi: qedi: Fix failed disconnect handling
	stat: fix inconsistency between struct stat and struct compat_stat
	VFS: filename_create(): fix incorrect intent.
	nvme: add a quirk to disable namespace identifiers
	nvme-pci: disable namespace identifiers for the MAXIO MAP1002/1202
	nvme-pci: disable namespace identifiers for Qemu controllers
	EDAC/synopsys: Read the error count from the correct register
	mm/memory-failure.c: skip huge_zero_page in memory_failure()
	memcg: sync flush only if periodic flush is delayed
	mm, hugetlb: allow for "high" userspace addresses
	oom_kill.c: futex: delay the OOM reaper to allow time for proper futex cleanup
	mm/mmu_notifier.c: fix race in mmu_interval_notifier_remove()
	ata: pata_marvell: Check the 'bmdma_addr' beforing reading
	dma: at_xdmac: fix a missing check on list iterator
	dmaengine: imx-sdma: fix init of uart scripts
	net: atlantic: invert deep par in pm functions, preventing null derefs
	Input: omap4-keypad - fix pm_runtime_get_sync() error checking
	scsi: sr: Do not leak information in ioctl
	sched/pelt: Fix attach_entity_load_avg() corner case
	perf/core: Fix perf_mmap fail when CONFIG_PERF_USE_VMALLOC enabled
	drm/panel/raspberrypi-touchscreen: Avoid NULL deref if not initialised
	drm/panel/raspberrypi-touchscreen: Initialise the bridge in prepare
	KVM: PPC: Fix TCE handling for VFIO
	drm/vc4: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync() usage
	powerpc/perf: Fix power9 event alternatives
	powerpc/perf: Fix power10 event alternatives
	perf script: Always allow field 'data_src' for auxtrace
	perf report: Set PERF_SAMPLE_DATA_SRC bit for Arm SPE event
	xtensa: patch_text: Fixup last cpu should be master
	xtensa: fix a7 clobbering in coprocessor context load/store
	openvswitch: fix OOB access in reserve_sfa_size()
	gpio: Request interrupts after IRQ is initialized
	ASoC: soc-dapm: fix two incorrect uses of list iterator
	e1000e: Fix possible overflow in LTR decoding
	ARC: entry: fix syscall_trace_exit argument
	arm_pmu: Validate single/group leader events
	KVM: x86/pmu: Update AMD PMC sample period to fix guest NMI-watchdog
	KVM: x86: Pend KVM_REQ_APICV_UPDATE during vCPU creation to fix a race
	KVM: nVMX: Defer APICv updates while L2 is active until L1 is active
	KVM: SVM: Flush when freeing encrypted pages even on SME_COHERENT CPUs
	netfilter: conntrack: convert to refcount_t api
	netfilter: conntrack: avoid useless indirection during conntrack destruction
	ext4: fix fallocate to use file_modified to update permissions consistently
	ext4: fix symlink file size not match to file content
	ext4: fix use-after-free in ext4_search_dir
	ext4: limit length to bitmap_maxbytes - blocksize in punch_hole
	ext4, doc: fix incorrect h_reserved size
	ext4: fix overhead calculation to account for the reserved gdt blocks
	ext4: force overhead calculation if the s_overhead_cluster makes no sense
	netfilter: nft_ct: fix use after free when attaching zone template
	jbd2: fix a potential race while discarding reserved buffers after an abort
	spi: atmel-quadspi: Fix the buswidth adjustment between spi-mem and controller
	block/compat_ioctl: fix range check in BLKGETSIZE
	arm64: dts: qcom: add IPA qcom,qmp property
	Linux 5.15.36

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iba23a60bda8fd07f26fd7f9217f208c2e6ee26c2
This commit is contained in:
Greg Kroah-Hartman
2022-06-06 11:12:02 +02:00
158 changed files with 1034 additions and 657 deletions

View File

@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
- Checksum of the extended attribute block.
* - 0x14
- \_\_u32
- h\_reserved[2]
- h\_reserved[3]
- Zero.
The checksum is calculated against the FS UUID, the 64-bit block number

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 35
SUBLEVEL = 36
EXTRAVERSION =
NAME = Trick or Treat

View File

@@ -196,6 +196,7 @@ tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
;POST Sys Call Ptrace Hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above

View File

@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
}
cluster = topology_physical_package_id(cpu_dev->id);
if (init_opp_table[cluster])
if (cluster < 0 || init_opp_table[cluster])
continue;
if (ve_init_opp_table(cpu_dev))

View File

@@ -89,12 +89,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
touchscreen-size-x = /bits/ 16 <4008>;
touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
touchscreen-size-y = /bits/ 16 <3864>;
touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
touchscreen-max-pressure = /bits/ 16 <255>;
touchscreen-average-samples = /bits/ 16 <10>;
touchscreen-max-pressure = <255>;
touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;

View File

@@ -70,12 +70,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
touchscreen-size-x = /bits/ 16 <4008>;
touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
touchscreen-size-y = /bits/ 16 <3864>;
touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
touchscreen-max-pressure = /bits/ 16 <255>;
touchscreen-average-samples = /bits/ 16 <10>;
touchscreen-max-pressure = <255>;
touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;

View File

@@ -1460,6 +1460,8 @@
"imem",
"config";
qcom,qmp = <&aoss_qmp>;
qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>;
qcom,smem-state-names = "ipa-clock-enabled-valid",

View File

@@ -615,6 +615,8 @@
interconnect-names = "memory",
"config";
qcom,qmp = <&aoss_qmp>;
qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>;
qcom,smem-state-names = "ipa-clock-enabled-valid",

View File

@@ -736,6 +736,8 @@
interconnect-names = "memory",
"config";
qcom,qmp = <&aoss_qmp>;
qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>;
qcom,smem-state-names = "ipa-clock-enabled-valid",

View File

@@ -544,7 +544,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
@@ -634,7 +634,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)

View File

@@ -421,13 +421,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
tbl[idx % TCES_PER_PAGE] = tce;
}
static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry)
static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
struct iommu_table *tbl, unsigned long entry)
{
unsigned long i;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
for (i = 0; i < subpages; ++i) {
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
}
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -486,6 +492,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
break;
}
iommu_tce_kill(tbl, io_entry, subpages);
return ret;
}
@@ -545,6 +553,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
break;
}
iommu_tce_kill(tbl, io_entry, subpages);
return ret;
}
@@ -591,10 +601,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
iommu_tce_kill(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
goto unlock_exit;
}
}
@@ -670,13 +679,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
if (get_user(tce, tces + i)) {
ret = H_TOO_HARD;
goto invalidate_exit;
goto unlock_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto invalidate_exit;
goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -685,19 +694,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
entry);
goto invalidate_exit;
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
entry + i);
goto unlock_exit;
}
}
kvmppc_tce_put(stt, entry + i, tce);
}
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill(stit->tbl, entry, npages);
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -736,20 +741,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
goto invalidate_exit;
return ret;
WARN_ON_ONCE(1);
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);

View File

@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
tbl->it_ops->tce_kill(tbl, entry, pages, true);
}
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry)
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
struct iommu_table *tbl, unsigned long entry)
{
unsigned long i;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
for (i = 0; i < subpages; ++i) {
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
}
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
break;
}
iommu_tce_kill_rm(tbl, io_entry, subpages);
return ret;
}
@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
break;
}
iommu_tce_kill_rm(tbl, io_entry, subpages);
return ret;
}
@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
iommu_tce_kill_rm(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
return ret;
}
}
@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
ua = 0;
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto invalidate_exit;
goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
entry);
goto invalidate_exit;
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
entry + i);
goto unlock_exit;
}
}
kvmppc_rm_tce_put(stt, entry + i, tce);
}
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill_rm(stit->tbl, entry, npages);
unlock_exit:
if (!prereg)
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
goto invalidate_exit;
return ret;
WARN_ON_ONCE_RM(1);
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
return ret;
}

View File

@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
{ PM_CYC_ALT, PM_CYC },
{ PM_INST_CMPL_ALT, PM_INST_CMPL },
{ PM_CYC_ALT, PM_CYC },
};
static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])

View File

@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])

View File

@@ -28,15 +28,13 @@ typedef u16 compat_ipc_pid_t;
typedef __kernel_fsid_t compat_fsid_t;
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
u32 st_dev;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_rdev;
u32 st_size;
u32 st_blksize;
u32 st_blocks;

View File

@@ -141,6 +141,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
return sample_period;
}
static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
{
if (!pmc->perf_event || pmc->is_paused)
return;
perf_event_period(pmc->perf_event,
get_sample_period(pmc, pmc->counter));
}
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);

View File

@@ -256,6 +256,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
if (pmc) {
pmc->counter += data - pmc_read_counter(pmc);
pmc_update_sample_period(pmc);
return 0;
}
/* MSR_EVNTSELn */

View File

@@ -1990,11 +1990,14 @@ static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
unsigned long len)
{
/*
* If hardware enforced cache coherency for encrypted mappings of the
* same physical page is supported, nothing to do.
* If CPU enforced cache coherency for encrypted mappings of the
* same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
* flush is still needed in order to work properly with DMA devices.
*/
if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
clflush_cache_range(va, PAGE_SIZE);
return;
}
/*
* If the VM Page Flush MSR is supported, use it to flush the page

View File

@@ -4601,6 +4601,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
}
if (vmx->nested.update_vmcs01_apicv_status) {
vmx->nested.update_vmcs01_apicv_status = false;
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
}
if ((vm_exit_reason != -1) &&
(enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
vmx->nested.need_vmcs12_to_shadow_sync = true;

View File

@@ -439,15 +439,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
if (pmc->perf_event && !pmc->is_paused)
perf_event_period(pmc->perf_event,
get_sample_period(pmc, data));
pmc_update_sample_period(pmc);
return 0;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter += data - pmc_read_counter(pmc);
if (pmc->perf_event && !pmc->is_paused)
perf_event_period(pmc->perf_event,
get_sample_period(pmc, data));
pmc_update_sample_period(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)

View File

@@ -4098,6 +4098,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (is_guest_mode(vcpu)) {
vmx->nested.update_vmcs01_apicv_status = true;
return;
}
pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
if (cpu_has_secondary_exec_ctrls()) {
if (kvm_vcpu_apicv_active(vcpu))

View File

@@ -164,6 +164,7 @@ struct nested_vmx {
bool change_vmcs01_virtual_apic_mode;
bool reload_vmcs01_apic_access_page;
bool update_vmcs01_cpu_dirty_logging;
bool update_vmcs01_apicv_status;
/*
* Enlightened VMCS has been enabled. It does not mean that L1 has to

View File

@@ -10813,8 +10813,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0)
goto fail_mmu_destroy;
if (kvm_apicv_activated(vcpu->kvm))
/*
* Defer evaluating inhibits until the vCPU is first run, as
* this vCPU will not get notified of any changes until this
* vCPU is visible to other vCPUs (marked online and added to
* the set of vCPUs). Opportunistically mark APICv active as
* VMX in particularly is highly unlikely to have inhibits.
* Ignore the current per-VM APICv state so that vCPU creation
* is guaranteed to run with a deterministic value, the request
* will ensure the vCPU gets the correct state before VM-Entry.
*/
if (enable_apicv) {
vcpu->arch.apicv_active = true;
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
}
} else
static_branch_inc(&kvm_has_noapic_vcpu);

View File

@@ -29,7 +29,7 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lsave_cp_regs_cp##x: \
xchal_cp##x##_store a2 a4 a5 a6 a7; \
xchal_cp##x##_store a2 a3 a4 a5 a6; \
jx a0; \
.endif
@@ -46,7 +46,7 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lload_cp_regs_cp##x: \
xchal_cp##x##_load a2 a4 a5 a6 a7; \
xchal_cp##x##_load a2 a3 a4 a5 a6; \
jx a0; \
.endif

View File

@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
{
struct patch *patch = data;
if (atomic_inc_return(&patch->cpu_count) == 1) {
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
local_patch_text(patch->addr, patch->data, patch->sz);
atomic_inc(&patch->cpu_count);
} else {

View File

@@ -184,14 +184,13 @@ int sb_min_blocksize(struct super_block *sb, int size)
EXPORT_SYMBOL(sb_min_blocksize);
int __sync_blockdev(struct block_device *bdev, int wait)
int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
if (!wait)
return filemap_flush(bdev->bd_inode->i_mapping);
return filemap_write_and_wait(bdev->bd_inode->i_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
/*
* Write out and wait upon all the dirty data associated with a block
@@ -199,7 +198,9 @@ int __sync_blockdev(struct block_device *bdev, int wait)
*/
int sync_blockdev(struct block_device *bdev)
{
return __sync_blockdev(bdev, 1);
if (!bdev)
return 0;
return filemap_write_and_wait(bdev->bd_inode->i_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
@@ -1016,7 +1017,7 @@ int __invalidate_device(struct block_device *bdev, bool kill_dirty)
}
EXPORT_SYMBOL(__invalidate_device);
void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
void sync_bdevs(bool wait)
{
struct inode *inode, *old_inode = NULL;
@@ -1047,8 +1048,19 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
bdev = I_BDEV(inode);
mutex_lock(&bdev->bd_disk->open_mutex);
if (bdev->bd_openers)
func(bdev, arg);
if (!bdev->bd_openers) {
; /* skip */
} else if (wait) {
/*
* We keep the error status of individual mapping so
* that applications can catch the writeback error using
* fsync(2). See filemap_fdatawait_keep_errors() for
* details.
*/
filemap_fdatawait_keep_errors(inode->i_mapping);
} else {
filemap_fdatawrite(inode->i_mapping);
}
mutex_unlock(&bdev->bd_disk->open_mutex);
spin_lock(&blockdev_superblock->s_inode_list_lock);

View File

@@ -645,7 +645,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
if ((size >> 9) > ~(compat_ulong_t)0)
return -EFBIG;
return compat_put_ulong(argp, size >> 9);

View File

@@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap)
switch(ap->port_no)
{
case 0:
if (!ap->ioaddr.bmdma_addr)
return ATA_CBL_PATA_UNK;
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;

View File

@@ -1450,7 +1450,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc;
struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
int residue, retry;
@@ -1565,12 +1565,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
* microblock.
*/
descs_list = &desc->descs_list;
list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
desc = iter;
break;
}
}
residue += cur_ubc << dwidth;
dma_set_residue(txstate, residue);

View File

@@ -415,8 +415,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
#ifdef CONFIG_64BIT
SET_CH_64(dw, chan->dir, chan->id, llp.reg,
chunk->ll_region.paddr);
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
#else /* CONFIG_64BIT */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));

View File

@@ -406,7 +406,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
{
lockdep_assert_held(&wq->wq_lock);
idxd_wq_disable_cleanup(wq);
wq->size = 0;
wq->group = NULL;
}
@@ -723,14 +722,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
idxd_wq_device_reset_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
idxd_wq_device_reset_cleanup(wq);
}
}
void idxd_device_clear_state(struct idxd_device *idxd)
{
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return;
idxd_groups_clear_state(idxd);
idxd_engines_clear_state(idxd);
idxd_device_wqs_clear_state(idxd);

View File

@@ -842,6 +842,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
u64 xfer_size;
int rc;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -876,6 +879,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
u64 batch_size;
int rc;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;

View File

@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
s32 per_2_firi_addr;
s32 mcu_2_firi_addr;
s32 uart_2_per_addr;
s32 uart_2_mcu_ram_addr;
s32 uart_2_mcu_addr;
s32 per_2_app_addr;
s32 mcu_2_app_addr;
s32 per_2_per_addr;
s32 uartsh_2_per_addr;
s32 uartsh_2_mcu_ram_addr;
s32 uartsh_2_mcu_addr;
s32 per_2_shp_addr;
s32 mcu_2_shp_addr;
s32 ata_2_mcu_addr;
@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr;
s32 uart_2_mcu_addr;
s32 uartsh_2_mcu_addr;
s32 uart_2_mcu_rom_addr;
s32 uartsh_2_mcu_rom_addr;
/* End of v3 array */
s32 mcu_2_zqspi_addr;
/* End of v4 array */
@@ -1780,17 +1780,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i];
/*
* get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
* they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
* to be compatible with legacy freescale/nxp sdma firmware, and they
* are located in the bottom part of sdma_script_start_addrs which are
* beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
* For compatibility with NXP internal legacy kernel before 4.19 which
* is based on uart ram script and mainline kernel based on uart rom
* script, both uart ram/rom scripts are present in newer sdma
* firmware. Use the rom versions if they are present (V3 or newer).
*/
if (addr->uart_2_mcu_addr)
sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
if (addr->uartsh_2_mcu_addr)
sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
if (addr->uart_2_mcu_rom_addr)
sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
if (addr->uartsh_2_mcu_rom_addr)
sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
}
}
static void sdma_load_firmware(const struct firmware *fw, void *context)
@@ -1869,7 +1869,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
u32 reg, val, shift, num_map, i;
int ret = 0;
if (IS_ERR(np) || IS_ERR(gpr_np))
if (IS_ERR(np) || !gpr_np)
goto out;
event_remap = of_find_property(np, propname, NULL);
@@ -1917,7 +1917,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
}
out:
if (!IS_ERR(gpr_np))
if (gpr_np)
of_node_put(gpr_np);
return ret;

View File

@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
unsigned int status;
int ret;
ret = pm_runtime_get_sync(mtkd->ddev.dev);
ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
if (ret < 0) {
pm_runtime_put_noidle(chan->device->dev);
return ret;
@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
ret = readx_poll_timeout(readl, c->base + VFF_EN,
status, !status, 10, 100);
if (ret)
return ret;
goto err_pm;
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
if (ret < 0) {
dev_err(chan->device->dev, "Can't request dma IRQ\n");
return -EINVAL;
ret = -EINVAL;
goto err_pm;
}
if (mtkd->support_33bits)
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
err_pm:
pm_runtime_put_noidle(mtkd->ddev.dev);
return ret;
}

View File

@@ -163,6 +163,11 @@
#define ECC_STAT_CECNT_SHIFT 8
#define ECC_STAT_BITNUM_MASK 0x7F
/* ECC error count register definitions */
#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
#define ECC_ERRCNT_UECNT_SHIFT 16
#define ECC_ERRCNT_CECNT_MASK 0xFFFF
/* DDR QOS Interrupt register definitions */
#define DDR_QOS_IRQ_STAT_OFST 0x20200
#define DDR_QOSUE_MASK 0x4
@@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
base = priv->baseaddr;
p = &priv->stat;
regval = readl(base + ECC_ERRCNT_OFST);
p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
if (!p->ce_cnt)
goto ue_err;
regval = readl(base + ECC_STAT_OFST);
if (!regval)
return 1;
p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
if (!p->ce_cnt)
goto ue_err;
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
regval = readl(base + ECC_CEADDR0_OFST);

View File

@@ -1560,8 +1560,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gpiochip_set_irq_hooks(gc);
acpi_gpiochip_request_interrupts(gc);
/*
* Using barrier() here to prevent compiler from reordering
* gc->irq.initialized before initialization of above
@@ -1571,6 +1569,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gc->irq.initialized = true;
acpi_gpiochip_request_interrupts(gc);
return 0;
}

View File

@@ -936,6 +936,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
}
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
return false;
}
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -949,12 +963,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
return false;
goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
return false;
goto unsupported;
}
if (!crtc_state->enable_psr2_sel_fetch &&
@@ -963,25 +977,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
return false;
}
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
return false;
}
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
goto unsupported;
}
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
unsupported:
crtc_state->enable_psr2_sel_fetch = false;
return false;
}
void intel_psr_compute_config(struct intel_dp *intel_dp,

View File

@@ -90,7 +90,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(to_mdp5_plane_state(plane->state));
plane->state = NULL;
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
if (!mdp5_state)
return;
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
mdp5_state->base.zpos = STAGE_BASE;

View File

@@ -169,6 +169,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
va_list va;
new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
if (!new_blk)
return;
va_start(va, fmt);

View File

@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
return 0;
}
static int rpi_touchscreen_enable(struct drm_panel *panel)
static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
return 0;
}
static int rpi_touchscreen_enable(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
.prepare = rpi_touchscreen_noop,
.prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};

View File

@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
unsigned long phy_clock;
int ret;
ret = pm_runtime_get_sync(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;

View File

@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* revision register.
*/
error = pm_runtime_get_sync(dev);
if (error) {
if (error < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
pm_runtime_put_noidle(dev);
return error;

View File

@@ -34,15 +34,6 @@ source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -56,6 +47,14 @@ config CX_ECAT
To compile this driver as a module, choose M here. The module
will be called ec_bhf.
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
@@ -82,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -125,8 +123,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -138,10 +137,10 @@ config FEALNX
Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
cards. <http://www.myson.com.tw/>
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
@@ -161,6 +160,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
@@ -168,10 +168,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"

View File

@@ -480,8 +480,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
for (i = 0U; self->aq_vecs > i; ++i) {
aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -511,8 +511,8 @@ int aq_nic_start(struct aq_nic_s *self)
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
for (i = 0U; self->aq_vecs > i; ++i) {
aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));

View File

@@ -444,22 +444,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
return aq_suspend_common(dev, false);
return aq_suspend_common(dev, true);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
return aq_suspend_common(dev, true);
return aq_suspend_common(dev, false);
}
static int aq_pm_thaw(struct device *dev)
{
return atl_resume_common(dev, false);
return atl_resume_common(dev, true);
}
static int aq_pm_resume_restore(struct device *dev)
{
return atl_resume_common(dev, true);
return atl_resume_common(dev, false);
}
static const struct dev_pm_ops aq_pm_ops = {

View File

@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (!self) {
err = -EINVAL;
} else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
unsigned int i = 0U;
int err = 0;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
if (!self)
goto err_exit;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
if (!self)
goto err_exit;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
for (i = 0U; self->tx_rings > i; ++i) {
ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
if (i < self->rx_rings)
aq_ring_free(&ring[AQ_VEC_RX_ID]);

View File

@@ -1689,6 +1689,7 @@ static void macb_tx_restart(struct macb_queue *queue)
unsigned int head = queue->tx_head;
unsigned int tail = queue->tx_tail;
struct macb *bp = queue->bp;
unsigned int head_idx, tbqp;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1696,6 +1697,13 @@ static void macb_tx_restart(struct macb_queue *queue)
if (head == tail)
return;
tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
if (tbqp == head_idx)
return;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}

View File

@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
if (fman_node)
if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
of_node_put(fman_node);
}
if (ptp_node)
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
of_node_put(ptp_node);
}
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);

View File

@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
u16 lat_enc_d = 0; /* latency decoded */
u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {

View File

@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
while (igc_get_hw_semaphore_i225(hw))
; /* Empty */
/* Releasing the resource requires first getting the HW semaphore.
* If we fail to get the semaphore, there is nothing we can do,
* except log an error and quit. We are not allowed to hang here
* indefinitely, as it may cause denial of service or system crash.
*/
if (igc_get_hw_semaphore_i225(hw)) {
hw_dbg("Failed to release SW_FW_SYNC.\n");
return;
}
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;

View File

@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
usleep_range(500, 1000);
udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
usleep_range(500, 1000);
udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;

View File

@@ -996,6 +996,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
igc_ptp_write_i225(adapter, &ts);
}
static void igc_ptm_stop(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 ctrl;
ctrl = rd32(IGC_PTM_CTRL);
ctrl &= ~IGC_PTM_CTRL_EN;
wr32(IGC_PTM_CTRL, ctrl);
}
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -1013,8 +1024,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
if (pci_device_is_present(adapter->pdev))
if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
igc_ptm_stop(adapter);
}
}
/**

View File

@@ -1932,6 +1932,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
}
static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,

View File

@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
return readl_poll_timeout(ioaddr + PTP_TCR, value,
return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
!(value & PTP_TCR_TSINIT),
10000, 100000);
10, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)

View File

@@ -711,11 +711,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
return -ENOBUFS;
return -ENOMEM;
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
kfree(rd);
return -ENOBUFS;
return -ENOMEM;
}
rd->remote_ip = *ip;

View File

@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {

View File

@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);

View File

@@ -1354,6 +1354,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
return NVME_NIDT_EUI64_LEN;
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
return NVME_NIDT_EUI64_LEN;
case NVME_NIDT_NGUID:
@@ -1362,6 +1364,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
return NVME_NIDT_NGUID_LEN;
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
return NVME_NIDT_NGUID_LEN;
case NVME_NIDT_UUID:
@@ -1370,6 +1374,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
return NVME_NIDT_UUID_LEN;
uuid_copy(&ids->uuid, data + sizeof(*cur));
return NVME_NIDT_UUID_LEN;
case NVME_NIDT_CSI:
@@ -1466,12 +1472,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
} else {
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
}
return 0;

View File

@@ -144,6 +144,11 @@ enum nvme_quirks {
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
/*
* Reports garbage in the namespace identifiers (eui64, nguid, uuid).
*/
NVME_QUIRK_BOGUS_NID = (1 << 18),
};
/*

View File

@@ -3314,7 +3314,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
NVME_QUIRK_DISABLE_WRITE_ZEROES, },
NVME_QUIRK_DISABLE_WRITE_ZEROES |
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
@@ -3352,6 +3355,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),

View File

@@ -398,6 +398,9 @@ validate_group(struct perf_event *event)
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
if (event == leader)
return 0;
for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
@@ -487,12 +490,7 @@ __hw_perf_event_init(struct perf_event *event)
local64_set(&hwc->period_left, hwc->sample_period);
}
if (event->group_leader != event) {
if (validate_group(event) != 0)
return -EINVAL;
}
return 0;
return validate_group(event);
}
static int armpmu_event_init(struct perf_event *event)

View File

@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
if (value > samsung->kbd_led.max_brightness)
value = samsung->kbd_led.max_brightness;
else if (value < 0)
value = 0;
samsung->kbd_led_wk = value;
queue_work(samsung->led_workqueue, &samsung->kbd_led_work);

View File

@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->rstc),
"failed to get reset\n");
reset_control_deassert(priv->rstc);
error = reset_control_deassert(priv->rstc);
if (error)
return error;
priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
priv->rcdev.of_reset_n_cells = 1;

View File

@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
return tegra_bpmp_transfer(bpmp, &msg);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
if (msg.rx.ret)
return -EINVAL;
return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,

View File

@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "

View File

@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
/* Must suspend all rx queue activity for this ep */
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
}
/* CONN_DISCONNECT timeout may or may not be an issue depending
* on what transcribed in TCP layer, different targets behave

View File

@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p.\n", csk, conn);
if (unlikely(!conn || conn->suspend_rx)) {
if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
"csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
csk, conn, conn ? conn->id : 0xFF,
conn ? conn->suspend_rx : 0xFF);
conn ? conn->flags : 0xFF);
return;
}

View File

@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task;
itt_t itt;
if (session->state == ISCSI_STATE_TERMINATE)
if (session->state == ISCSI_STATE_TERMINATE ||
!test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
return NULL;
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
return true;
}
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* Do this after dropping the extra ref because if this was a requeue
* it's removed from that list and cleanup_queued_task would miss it.
*/
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
/*
* Save the task and ref in case we weren't cleaning up this
* task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->frwd_lock);
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto fault;
}
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE << 16;
goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
flush_workqueue(ihost->workq);
}
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
iscsi_conn_queue_work(conn);
}
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
if (!is_active) {
/*
* if logout timed out before userspace could even send a PDU
@@ -3312,6 +3315,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
spin_unlock_bh(&session->frwd_lock);
/*
@@ -3324,8 +3329,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
/*
* Unblock xmitworker(), Login Phase will pass through.
*/
clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);

View File

@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
*/
conn->last_recv = jiffies;
if (unlikely(conn->suspend_rx)) {
if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
*status = ISCSI_TCP_SUSPENDED;
return 0;

View File

@@ -859,6 +859,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
return qedi_iscsi_send_ioreq(task);
}
static void qedi_offload_work(struct work_struct *work)
{
struct qedi_endpoint *qedi_ep =
container_of(work, struct qedi_endpoint, offload_work);
struct qedi_ctx *qedi;
int wait_delay = 5 * HZ;
int ret;
qedi = qedi_ep->qedi;
ret = qedi_iscsi_offload_conn(qedi_ep);
if (ret) {
QEDI_ERR(&qedi->dbg_ctx,
"offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
qedi_ep->iscsi_cid, qedi_ep, ret);
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
return;
}
ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
(qedi_ep->state ==
EP_STATE_OFLDCONN_COMPL),
wait_delay);
if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
QEDI_ERR(&qedi->dbg_ctx,
"Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
qedi_ep->iscsi_cid, qedi_ep);
}
}
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -907,6 +938,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -1055,12 +1087,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
flush_work(&qedi_ep->offload_work);
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
abrt_conn = qedi_conn->abrt_conn;
@@ -1234,37 +1265,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
return rc;
}
static void qedi_offload_work(struct work_struct *work)
{
struct qedi_endpoint *qedi_ep =
container_of(work, struct qedi_endpoint, offload_work);
struct qedi_ctx *qedi;
int wait_delay = 5 * HZ;
int ret;
qedi = qedi_ep->qedi;
ret = qedi_iscsi_offload_conn(qedi_ep);
if (ret) {
QEDI_ERR(&qedi->dbg_ctx,
"offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
qedi_ep->iscsi_cid, qedi_ep, ret);
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
return;
}
ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
(qedi_ep->state ==
EP_STATE_OFLDCONN_COMPL),
wait_delay);
if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
QEDI_ERR(&qedi->dbg_ctx,
"Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
qedi_ep->iscsi_cid, qedi_ep);
}
}
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1380,7 +1380,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
qedi_ep->dst_addr, qedi_ep->dst_port);
}
INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;

View File

@@ -86,6 +86,9 @@ struct iscsi_internal {
struct transport_container session_cont;
};
static DEFINE_IDR(iscsi_ep_idr);
static DEFINE_MUTEX(iscsi_ep_idr_mutex);
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
@@ -169,6 +172,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
mutex_lock(&iscsi_ep_idr_mutex);
idr_remove(&iscsi_ep_idr, ep->id);
mutex_unlock(&iscsi_ep_idr_mutex);
kfree(ep);
}
@@ -181,7 +189,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
return sysfs_emit(buf, "%d\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -194,48 +202,32 @@ static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
#define ISCSI_MAX_EPID -1
static int iscsi_match_epid(struct device *dev, const void *data)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
const uint64_t *epid = data;
return *epid == ep->id;
}
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
struct device *dev;
struct iscsi_endpoint *ep;
uint64_t id;
int err;
for (id = 1; id < ISCSI_MAX_EPID; id++) {
dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
iscsi_match_epid);
if (!dev)
break;
else
put_device(dev);
}
if (id == ISCSI_MAX_EPID) {
printk(KERN_ERR "Too many connections. Max supported %u\n",
ISCSI_MAX_EPID - 1);
return NULL;
}
int err, id;
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
mutex_lock(&iscsi_ep_idr_mutex);
id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
if (id < 0) {
mutex_unlock(&iscsi_ep_idr_mutex);
printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
id);
goto free_ep;
}
mutex_unlock(&iscsi_ep_idr_mutex);
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
goto free_ep;
goto free_id;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -249,6 +241,10 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
free_id:
mutex_lock(&iscsi_ep_idr_mutex);
idr_remove(&iscsi_ep_idr, id);
mutex_unlock(&iscsi_ep_idr_mutex);
free_ep:
kfree(ep);
return NULL;
@@ -276,14 +272,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
*/
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
struct device *dev;
struct iscsi_endpoint *ep;
dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
iscsi_match_epid);
if (!dev)
return NULL;
mutex_lock(&iscsi_ep_idr_mutex);
ep = idr_find(&iscsi_ep_idr, handle);
if (!ep)
goto unlock;
return iscsi_dev_to_endpoint(dev);
get_device(&ep->dev);
unlock:
mutex_unlock(&iscsi_ep_idr_mutex);
return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);

View File

@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
buffer = kmalloc(32, GFP_KERNEL);
buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
if (result)
goto err;
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
err:
kfree(buffer);
return result;
}
@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
buffer = kmalloc(32, GFP_KERNEL);
buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
if (result)
goto err;
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ buffer[10]) << 8) + buffer[11];
err:
kfree(buffer);
return result;
}
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
char *buffer = kmalloc(32, GFP_KERNEL);
char *buffer = kzalloc(32, GFP_KERNEL);
int result;
if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = IOCTL_TIMEOUT;
result = sr_do_ioctl(cd, &cgc);
if (result)
goto err;
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
err:
kfree(buffer);
return result;
}

View File

@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (!spi_mem_default_supports_op(mem, op))
return false;
if (atmel_qspi_find_mode(op) < 0)
return false;

View File

@@ -1226,9 +1226,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
/* Mixed DTR modes not supported. */
if (!(all_true || all_false))
if (all_true) {
/* Right now we only support 8-8-8 DTR mode. */
if (op->cmd.nbytes && op->cmd.buswidth != 8)
return false;
if (op->addr.nbytes && op->addr.buswidth != 8)
return false;
if (op->data.nbytes && op->data.buswidth != 8)
return false;
} else if (all_false) {
/* Only 1-1-X ops are supported without DTR */
if (op->cmd.nbytes && op->cmd.buswidth > 1)
return false;
if (op->addr.nbytes && op->addr.buswidth > 1)
return false;
} else {
/* Mixed DTR modes are not supported. */
return false;
}
if (all_true)
return spi_mem_dtr_supports_op(mem, op);

View File

@@ -909,7 +909,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
static int __maybe_unused mtk_nor_resume(struct device *dev)
{
return pm_runtime_force_resume(dev);
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
mtk_nor_init(sp);
return 0;
}
static const struct dev_pm_ops mtk_nor_pm_ops = {

View File

@@ -936,7 +936,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
ssize_t rc;
struct inode *inode = file_inode(iocb->ki_filp);
if (iocb->ki_filp->f_flags & O_DIRECT)
if (iocb->ki_flags & IOCB_DIRECT)
return cifs_user_readv(iocb, iter);
rc = cifs_revalidate_mapping(inode);

View File

@@ -2267,6 +2267,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
* Structure of a directory entry
*/
#define EXT4_NAME_LEN 255
/*
* Base length of the ext4 directory entry excluding the name length
*/
#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
struct ext4_dir_entry {
__le32 inode; /* Inode number */
@@ -3027,7 +3031,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
extern int ext4_break_layouts(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
extern void ext4_set_inode_flags(struct inode *, bool init);
extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);

View File

@@ -4504,9 +4504,9 @@ retry:
return ret > 0 ? ret2 : ret;
}
static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
@@ -4578,6 +4578,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Wait all existing dio workers, newcomers will block on i_mutex */
inode_dio_wait(inode);
ret = file_modified(file);
if (ret)
goto out_mutex;
/* Preallocate the range including the unaligned edges */
if (partial_begin || partial_end) {
ret = ext4_alloc_file_blocks(file,
@@ -4696,7 +4700,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
ext4_fc_start_update(inode);
if (mode & FALLOC_FL_PUNCH_HOLE) {
ret = ext4_punch_hole(inode, offset, len);
ret = ext4_punch_hole(file, offset, len);
goto exit;
}
@@ -4705,12 +4709,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
goto exit;
if (mode & FALLOC_FL_COLLAPSE_RANGE) {
ret = ext4_collapse_range(inode, offset, len);
ret = ext4_collapse_range(file, offset, len);
goto exit;
}
if (mode & FALLOC_FL_INSERT_RANGE) {
ret = ext4_insert_range(inode, offset, len);
ret = ext4_insert_range(file, offset, len);
goto exit;
}
@@ -4746,6 +4750,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
/* Wait all existing dio workers, newcomers will block on i_mutex */
inode_dio_wait(inode);
ret = file_modified(file);
if (ret)
goto out;
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
if (ret)
goto out;
@@ -5248,8 +5256,9 @@ out:
* This implements the fallocate's collapse range functionality for ext4
* Returns: 0 and non-zero on error.
*/
static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
ext4_lblk_t punch_start, punch_stop;
@@ -5301,6 +5310,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
/* Wait for existing dio to complete */
inode_dio_wait(inode);
ret = file_modified(file);
if (ret)
goto out_mutex;
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
@@ -5394,8 +5407,9 @@ out_mutex:
* by len bytes.
* Returns 0 on success, error otherwise.
*/
static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
@@ -5452,6 +5466,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
/* Wait for existing dio to complete */
inode_dio_wait(inode);
ret = file_modified(file);
if (ret)
goto out_mutex;
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.

View File

@@ -3970,12 +3970,14 @@ int ext4_break_layouts(struct inode *inode)
* Returns: 0 on success or negative on failure
*/
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
loff_t first_block_offset, last_block_offset;
loff_t first_block_offset, last_block_offset, max_length;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
handle_t *handle;
unsigned int credits;
int ret = 0, ret2 = 0;
@@ -4018,6 +4020,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
offset;
}
/*
* For punch hole the length + offset needs to be within one block
* before last range. Adjust the length if it goes beyond that limit.
*/
max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
if (offset + length > max_length)
length = max_length - offset;
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
@@ -4033,6 +4043,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
/* Wait all existing dio workers, newcomers will block on i_mutex */
inode_dio_wait(inode);
ret = file_modified(file);
if (ret)
goto out_mutex;
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.

View File

@@ -1466,10 +1466,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
while ((char *) de < dlimit) {
while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
if ((char *) de + de->name_len <= dlimit &&
if (de->name + de->name_len <= dlimit &&
ext4_match(dir, fname, de)) {
/* found a match - just to be sure, do
* a full check */

View File

@@ -134,8 +134,10 @@ static void ext4_finish_bio(struct bio *bio)
continue;
}
clear_buffer_async_write(bh);
if (bio->bi_status)
if (bio->bi_status) {
set_buffer_write_io_error(bh);
buffer_io_error(bh);
}
} while ((bh = bh->b_this_page) != head);
spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
if (!under_io) {

View File

@@ -3696,9 +3696,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
ext4_fsblk_t first_block, last_block, b;
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
int s, j, count = 0;
int has_super = ext4_bg_has_super(sb, grp);
if (!ext4_has_feature_bigalloc(sb))
return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
return (has_super + ext4_bg_num_gdb(sb, grp) +
(has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
sbi->s_itb_per_group + 2);
first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
@@ -4785,9 +4787,18 @@ no_journal:
* Get the # of file system overhead blocks from the
* superblock if present.
*/
if (es->s_overhead_clusters)
sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
else {
/* ignore the precalculated value if it is ridiculous */
if (sbi->s_overhead > ext4_blocks_count(es))
sbi->s_overhead = 0;
/*
* If the bigalloc feature is not enabled recalculating the
* overhead doesn't take long, so we might as well just redo
* it to make sure we are using the correct value.
*/
if (!ext4_has_feature_bigalloc(sb))
sbi->s_overhead = 0;
if (sbi->s_overhead == 0) {
err = ext4_calculate_overhead(sb);
if (err)
goto failed_mount_wq;

View File

@@ -923,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
spin_lock_init(&rgd->rd_rsspin);
mutex_init(&rgd->rd_mutex);
error = compute_bitstructs(rgd);
if (error)
goto fail;
error = gfs2_glock_get(sdp, rgd->rd_addr,
&gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
if (error)
goto fail;
error = compute_bitstructs(rgd);
if (error)
goto fail_glock;
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -945,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
}
error = 0; /* someone else read in the rgrp; free it and ignore it */
fail_glock:
gfs2_glock_put(rgd->rd_gl);
fail:

View File

@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
info.high_limit = arch_get_mmap_end(addr);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = current->mm->mmap_base;
info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
info.high_limit = arch_get_mmap_end(addr);
addr = vm_unmapped_area(&info);
}
@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct hstate *h = hstate_file(file);
const unsigned long mmap_end = arch_get_mmap_end(addr);
if (len & ~huge_page_mask(h))
return -EINVAL;
@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
if (mmap_end - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}

View File

@@ -23,22 +23,11 @@ struct pipe_inode_info;
#ifdef CONFIG_BLOCK
extern void __init bdev_cache_init(void);
extern int __sync_blockdev(struct block_device *bdev, int wait);
void iterate_bdevs(void (*)(struct block_device *, void *), void *);
void emergency_thaw_bdev(struct super_block *sb);
#else
static inline void bdev_cache_init(void)
{
}
static inline int __sync_blockdev(struct block_device *bdev, int wait)
{
return 0;
}
static inline void iterate_bdevs(void (*f)(struct block_device *, void *),
void *arg)
{
}
static inline int emergency_thaw_bdev(struct super_block *sb)
{
return 0;

View File

@@ -501,7 +501,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
}
spin_unlock(&commit_transaction->t_handle_lock);
commit_transaction->t_state = T_SWITCH;
write_unlock(&journal->j_state_lock);
J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
journal->j_max_transaction_buffers);
@@ -521,6 +520,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* has reserved. This is consistent with the existing behaviour
* that multiple jbd2_journal_get_write_access() calls to the same
* buffer are perfectly permissible.
* We use journal->j_state_lock here to serialize processing of
* t_reserved_list with eviction of buffers from journal_unmap_buffer().
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
@@ -540,6 +541,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd2_journal_refile_buffer(journal, jh);
}
write_unlock(&journal->j_state_lock);
/*
* Now try to drop any written-back buffers from the journal's
* checkpoint lists. We do this *before* commit because it potentially

View File

@@ -3625,18 +3625,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct qstr last;
bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
int type;
int err2;
int error;
bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
/*
* Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
* other flags passed in are ignored!
*/
lookup_flags &= LOOKUP_REVAL;
error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
if (error)
return ERR_PTR(error);
@@ -3650,11 +3646,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
/* don't fail immediately if it's r/o, at least try to report other errors */
err2 = mnt_want_write(path->mnt);
/*
* Do the final lookup.
* Do the final lookup. Suppress 'create' if there is a trailing
* '/', and a directory wasn't requested.
*/
lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
if (last.name[last.len] && !want_dir)
create_flags = 0;
inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path->dentry, lookup_flags);
dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
if (IS_ERR(dentry))
goto unlock;
@@ -3668,7 +3666,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
if (unlikely(!is_dir && last.name[last.len])) {
if (unlikely(!create_flags)) {
error = -ENOENT;
goto fail;
}

View File

@@ -334,9 +334,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
# define choose_32_64(a,b) b
#endif
#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
#ifndef INIT_STRUCT_STAT_PADDING
# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
#endif
@@ -345,7 +342,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
{
struct stat tmp;
if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
return -EOVERFLOW;
if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
return -EOVERFLOW;
#if BITS_PER_LONG == 32
if (stat->size > MAX_NON_LFS)
@@ -353,7 +352,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
#endif
INIT_STRUCT_STAT_PADDING(tmp);
tmp.st_dev = encode_dev(stat->dev);
tmp.st_dev = new_encode_dev(stat->dev);
tmp.st_ino = stat->ino;
if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
return -EOVERFLOW;
@@ -363,7 +362,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
return -EOVERFLOW;
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
tmp.st_rdev = encode_dev(stat->rdev);
tmp.st_rdev = new_encode_dev(stat->rdev);
tmp.st_size = stat->size;
tmp.st_atime = stat->atime.tv_sec;
tmp.st_mtime = stat->mtime.tv_sec;
@@ -644,11 +643,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
{
struct compat_stat tmp;
if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
return -EOVERFLOW;
if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
return -EOVERFLOW;
memset(&tmp, 0, sizeof(tmp));
tmp.st_dev = old_encode_dev(stat->dev);
tmp.st_dev = new_encode_dev(stat->dev);
tmp.st_ino = stat->ino;
if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
return -EOVERFLOW;
@@ -658,7 +659,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
return -EOVERFLOW;
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
tmp.st_rdev = old_encode_dev(stat->rdev);
tmp.st_rdev = new_encode_dev(stat->rdev);
if ((u64) stat->size > MAX_NON_LFS)
return -EOVERFLOW;
tmp.st_size = stat->size;

View File

@@ -3,6 +3,7 @@
* High-level sync()-related operations
*/
#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/fs.h>
@@ -21,25 +22,6 @@
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
SYNC_FILE_RANGE_WAIT_AFTER)
/*
* Do the filesystem syncing work. For simple filesystems
* writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
* submit IO for these buffers via __sync_blockdev(). This also speeds up the
* wait == 1 case since in that case write_inode() functions do
* sync_dirty_buffer() and thus effectively write one block at a time.
*/
static int __sync_filesystem(struct super_block *sb, int wait)
{
if (wait)
sync_inodes_sb(sb);
else
writeback_inodes_sb(sb, WB_REASON_SYNC);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, wait);
return __sync_blockdev(sb->s_bdev, wait);
}
/*
* Write out and wait upon all dirty data associated with this
* superblock. Filesystem data as well as the underlying block
@@ -47,7 +29,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
*/
int sync_filesystem(struct super_block *sb)
{
int ret;
int ret = 0;
/*
* We need to be protected against the filesystem going from
@@ -61,10 +43,31 @@ int sync_filesystem(struct super_block *sb)
if (sb_rdonly(sb))
return 0;
ret = __sync_filesystem(sb, 0);
if (ret < 0)
/*
* Do the filesystem syncing work. For simple filesystems
* writeback_inodes_sb(sb) just dirties buffers with inodes so we have
* to submit I/O for these buffers via sync_blockdev(). This also
* speeds up the wait == 1 case since in that case write_inode()
* methods call sync_dirty_buffer() and thus effectively write one block
* at a time.
*/
writeback_inodes_sb(sb, WB_REASON_SYNC);
if (sb->s_op->sync_fs) {
ret = sb->s_op->sync_fs(sb, 0);
if (ret)
return ret;
return __sync_filesystem(sb, 1);
}
ret = sync_blockdev_nowait(sb->s_bdev);
if (ret)
return ret;
sync_inodes_sb(sb);
if (sb->s_op->sync_fs) {
ret = sb->s_op->sync_fs(sb, 1);
if (ret)
return ret;
}
return sync_blockdev(sb->s_bdev);
}
EXPORT_SYMBOL_NS(sync_filesystem, ANDROID_GKI_VFS_EXPORT_ONLY);
@@ -81,21 +84,6 @@ static void sync_fs_one_sb(struct super_block *sb, void *arg)
sb->s_op->sync_fs(sb, *(int *)arg);
}
static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
{
filemap_fdatawrite(bdev->bd_inode->i_mapping);
}
static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
{
/*
* We keep the error status of individual mapping so that
* applications can catch the writeback error using fsync(2).
* See filemap_fdatawait_keep_errors() for details.
*/
filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping);
}
/*
* Sync everything. We start by waking flusher threads so that most of
* writeback runs on all devices in parallel. Then we sync all inodes reliably
@@ -114,8 +102,8 @@ void ksys_sync(void)
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
iterate_bdevs(fdatawait_one_bdev, NULL);
sync_bdevs(false);
sync_bdevs(true);
if (unlikely(laptop_mode))
laptop_sync_completion();
}
@@ -136,10 +124,10 @@ static void do_sync_work(struct work_struct *work)
*/
iterate_supers(sync_inodes_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
sync_bdevs(false);
iterate_supers(sync_inodes_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
sync_bdevs(false);
printk("Emergency Sync complete\n");
kfree(work);
}

View File

@@ -729,6 +729,7 @@ xfs_fs_sync_fs(
int wait)
{
struct xfs_mount *mp = XFS_M(sb);
int error;
trace_xfs_fs_sync_fs(mp, __return_address);
@@ -738,7 +739,10 @@ xfs_fs_sync_fs(
if (!wait)
return 0;
xfs_log_force(mp, XFS_LOG_SYNC);
error = xfs_log_force(mp, XFS_LOG_SYNC);
if (error)
return error;
if (laptop_mode) {
/*
* The disk must be active because we're syncing.

View File

@@ -2012,6 +2012,8 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
@@ -2020,6 +2022,13 @@ static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
static inline int sync_blockdev_nowait(struct block_device *bdev)
{
return 0;
}
static inline void sync_bdevs(bool wait)
{
}
#endif
int fsync_bdev(struct block_device *bdev);

View File

@@ -127,7 +127,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
#endif
}
static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
@@ -364,8 +364,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
const u8 addr2[6+2])
static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);

View File

@@ -202,6 +202,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
*/
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
#ifdef CONFIG_PRINTK
struct kmem_obj_info;
/**
* __kfence_obj_info() - fill kmem_obj_info struct
* @kpp: kmem_obj_info to be filled
* @object: the object
*
* Return:
* * false - not a KFENCE object
* * true - a KFENCE object, filled @kpp
*
* Copies information to @kpp for KFENCE objects.
*/
bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
#endif
#else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; }
@@ -219,6 +235,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
return false;
}
#ifdef CONFIG_PRINTK
struct kmem_obj_info;
static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
{
return false;
}
#endif
#endif
#endif /* _LINUX_KFENCE_H */

View File

@@ -1026,6 +1026,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
}
void mem_cgroup_flush_stats(void);
void mem_cgroup_flush_stats_delayed(void);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
@@ -1458,6 +1459,10 @@ static inline void mem_cgroup_flush_stats(void)
{
}
static inline void mem_cgroup_flush_stats_delayed(void)
{
}
static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{

View File

@@ -2,7 +2,7 @@
#ifndef _NF_CONNTRACK_COMMON_H
#define _NF_CONNTRACK_COMMON_H
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
@@ -25,19 +25,21 @@ struct ip_conntrack_stat {
#define NFCT_PTRMASK ~(NFCT_INFOMASK)
struct nf_conntrack {
atomic_t use;
refcount_t use;
};
void nf_conntrack_destroy(struct nf_conntrack *nfct);
/* like nf_ct_put, but without module dependency on nf_conntrack */
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
if (nfct && atomic_dec_and_test(&nfct->use))
if (nfct && refcount_dec_and_test(&nfct->use))
nf_conntrack_destroy(nfct);
}
static inline void nf_conntrack_get(struct nf_conntrack *nfct)
{
if (nfct)
atomic_inc(&nfct->use);
refcount_inc(&nfct->use);
}
#endif /* _NF_CONNTRACK_COMMON_H */

View File

@@ -1446,6 +1446,7 @@ struct task_struct {
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;

View File

@@ -106,6 +106,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
#ifndef arch_get_mmap_end
#define arch_get_mmap_end(addr) (TASK_SIZE)
#endif
#ifndef arch_get_mmap_base
#define arch_get_mmap_base(addr, base) (base)
#endif
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
extern unsigned long

View File

@@ -4,8 +4,6 @@
#include <linux/skbuff.h>
#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
struct ip_esp_hdr;
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)

View File

@@ -76,6 +76,8 @@ struct nf_conn {
* Hint, SKB address this struct and refcnt via skb->_nfct and
* helpers nf_conntrack_get() and nf_conntrack_put().
* Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
* except that the latter uses internal indirection and does not
* result in a conntrack module dependency.
* beware nf_ct_get() is different and don't inc refcnt.
*/
struct nf_conntrack ct_general;
@@ -169,11 +171,13 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
return (struct nf_conn *)(nfct & NFCT_PTRMASK);
}
void nf_ct_destroy(struct nf_conntrack *nfct);
/* decrement reference count on a conntrack */
static inline void nf_ct_put(struct nf_conn *ct)
{
WARN_ON(!ct);
nf_conntrack_put(&ct->ct_general);
if (ct && refcount_dec_and_test(&ct->ct_general.use))
nf_ct_destroy(&ct->ct_general);
}
/* Protocol module loading */

View File

@@ -77,7 +77,7 @@ struct netns_ipv6 {
struct list_head fib6_walkers;
rwlock_t fib6_walker_lock;
spinlock_t fib6_gc_lock;
unsigned int ip6_rt_gc_expire;
atomic_t ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES

Some files were not shown because too many files have changed in this diff Show More