Merge tag 'v5.15-rc6' into android-mainline

Linux 5.15-rc6

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7d0ce5237a312f6345ddb6dd5faff0c428d8030f
This commit is contained in:
Greg Kroah-Hartman
2021-10-18 11:08:34 +02:00
100 changed files with 708 additions and 374 deletions

View File

@@ -18,7 +18,7 @@ types can be added after the security issue of corresponding device driver
is clarified or fixed in the future. is clarified or fixed in the future.
Create/Destroy VDUSE devices Create/Destroy VDUSE devices
------------------------ ----------------------------
VDUSE devices are created as follows: VDUSE devices are created as follows:

View File

@@ -7343,10 +7343,11 @@ F: include/uapi/linux/fpga-dfl.h
FPGA MANAGER FRAMEWORK FPGA MANAGER FRAMEWORK
M: Moritz Fischer <mdf@kernel.org> M: Moritz Fischer <mdf@kernel.org>
M: Wu Hao <hao.wu@intel.com>
M: Xu Yilun <yilun.xu@intel.com>
R: Tom Rix <trix@redhat.com> R: Tom Rix <trix@redhat.com>
L: linux-fpga@vger.kernel.org L: linux-fpga@vger.kernel.org
S: Maintained S: Maintained
W: http://www.rocketboards.org
Q: http://patchwork.kernel.org/project/linux-fpga/list/ Q: http://patchwork.kernel.org/project/linux-fpga/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
F: Documentation/devicetree/bindings/fpga/ F: Documentation/devicetree/bindings/fpga/

View File

@@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Opossums on Parade NAME = Opossums on Parade
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@@ -197,7 +197,6 @@ CONFIG_PCI_EPF_TEST=m
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y CONFIG_DEVTMPFS_MOUNT=y
CONFIG_OMAP_OCP2SCP=y CONFIG_OMAP_OCP2SCP=y
CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y

View File

@@ -46,7 +46,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64 CONFIG_CMA_SIZE_MBYTES=64
CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y

View File

@@ -40,7 +40,6 @@ CONFIG_PCI_RCAR_GEN2=y
CONFIG_PCIE_RCAR_HOST=y CONFIG_PCIE_RCAR_HOST=y
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y CONFIG_DEVTMPFS_MOUNT=y
CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y CONFIG_MTD_CFI=y

View File

@@ -112,7 +112,6 @@ config ARCH_OMAP2PLUS
select PM_GENERIC_DOMAINS select PM_GENERIC_DOMAINS
select PM_GENERIC_DOMAINS_OF select PM_GENERIC_DOMAINS_OF
select RESET_CONTROLLER select RESET_CONTROLLER
select SIMPLE_PM_BUS
select SOC_BUS select SOC_BUS
select TI_SYSC select TI_SYSC
select OMAP_IRQCHIP select OMAP_IRQCHIP

View File

@@ -245,7 +245,6 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_HISILICON_LPC=y CONFIG_HISILICON_LPC=y
CONFIG_SIMPLE_PM_BUS=y
CONFIG_FSL_MC_BUS=y CONFIG_FSL_MC_BUS=y
CONFIG_TEGRA_ACONNECT=m CONFIG_TEGRA_ACONNECT=m
CONFIG_GNSS=m CONFIG_GNSS=m

View File

@@ -255,13 +255,16 @@ kvm_novcpu_exit:
* r3 contains the SRR1 wakeup value, SRR1 is trashed. * r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/ */
_GLOBAL(idle_kvm_start_guest) _GLOBAL(idle_kvm_start_guest)
ld r4,PACAEMERGSP(r13)
mfcr r5 mfcr r5
mflr r0 mflr r0
std r1,0(r4) std r5, 8(r1) // Save CR in caller's frame
std r5,8(r4) std r0, 16(r1) // Save LR in caller's frame
std r0,16(r4) // Create frame on emergency stack
subi r1,r4,STACK_FRAME_OVERHEAD ld r4, PACAEMERGSP(r13)
stdu r1, -SWITCH_FRAME_SIZE(r4)
// Switch to new frame on emergency stack
mr r1, r4
std r3, 32(r1) // Save SRR1 wakeup value
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
/* /*
@@ -313,6 +316,10 @@ kvm_unsplit_wakeup:
kvm_secondary_got_guest: kvm_secondary_got_guest:
// About to go to guest, clear saved SRR1
li r0, 0
std r0, 32(r1)
/* Set HSTATE_DSCR(r13) to something sensible */ /* Set HSTATE_DSCR(r13) to something sensible */
ld r6, PACA_DSCR_DEFAULT(r13) ld r6, PACA_DSCR_DEFAULT(r13)
std r6, HSTATE_DSCR(r13) std r6, HSTATE_DSCR(r13)
@@ -392,13 +399,12 @@ kvm_no_guest:
mfspr r4, SPRN_LPCR mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4 mtspr SPRN_LPCR, r4
/* set up r3 for return */ // Return SRR1 wakeup value, or 0 if we went into the guest
mfspr r3,SPRN_SRR1 ld r3, 32(r1)
REST_NVGPRS(r1) REST_NVGPRS(r1)
addi r1, r1, STACK_FRAME_OVERHEAD ld r1, 0(r1) // Switch back to caller stack
ld r0, 16(r1) ld r0, 16(r1) // Reload LR
ld r5, 8(r1) ld r5, 8(r1) // Reload CR
ld r1, 0(r1)
mtlr r0 mtlr r0
mtcr r5 mtcr r5
blr blr

View File

@@ -945,7 +945,8 @@ static int xive_get_irqchip_state(struct irq_data *data,
* interrupt to be inactive in that case. * interrupt to be inactive in that case.
*/ */
*state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
(xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
!irqd_irq_disabled(data)));
return 0; return 0;
default: default:
return -EINVAL; return -EINVAL;

View File

@@ -1525,7 +1525,6 @@ config AMD_MEM_ENCRYPT
config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
bool "Activate AMD Secure Memory Encryption (SME) by default" bool "Activate AMD Secure Memory Encryption (SME) by default"
default y
depends on AMD_MEM_ENCRYPT depends on AMD_MEM_ENCRYPT
help help
Say yes to have system memory encrypted by default if running on Say yes to have system memory encrypted by default if running on

View File

@@ -68,6 +68,7 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_BROADWELL_D: case INTEL_FAM6_BROADWELL_D:
case INTEL_FAM6_BROADWELL_G: case INTEL_FAM6_BROADWELL_G:
case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_ATOM_SILVERMONT: case INTEL_FAM6_ATOM_SILVERMONT:
case INTEL_FAM6_ATOM_SILVERMONT_D: case INTEL_FAM6_ATOM_SILVERMONT_D:

View File

@@ -385,7 +385,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
return -EINVAL; return -EINVAL;
} else { } else {
/* Mask invalid bits out for historical reasons (broken hardware). */ /* Mask invalid bits out for historical reasons (broken hardware). */
fpu->state.fxsave.mxcsr &= ~mxcsr_feature_mask; fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
} }
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */ /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */

View File

@@ -666,6 +666,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
bfqg_and_blkg_put(bfqq_group(bfqq)); bfqg_and_blkg_put(bfqq_group(bfqq));
if (entity->parent &&
entity->parent->last_bfqq_created == bfqq)
entity->parent->last_bfqq_created = NULL;
else if (bfqd->last_bfqq_created == bfqq)
bfqd->last_bfqq_created = NULL;
entity->parent = bfqg->my_entity; entity->parent = bfqg->my_entity;
entity->sched_data = &bfqg->sched_data; entity->sched_data = &bfqg->sched_data;
/* pin down bfqg and its associated blkg */ /* pin down bfqg and its associated blkg */

View File

@@ -49,7 +49,6 @@
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-sched.h" #include "blk-mq-sched.h"
#include "blk-pm.h" #include "blk-pm.h"
#include "blk-rq-qos.h"
struct dentry *blk_debugfs_root; struct dentry *blk_debugfs_root;
@@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q)
} }
EXPORT_SYMBOL(blk_put_queue); EXPORT_SYMBOL(blk_put_queue);
void blk_set_queue_dying(struct request_queue *q) void blk_queue_start_drain(struct request_queue *q)
{ {
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
/* /*
* When queue DYING flag is set, we need to block new req * When queue DYING flag is set, we need to block new req
* entering queue, so we call blk_freeze_queue_start() to * entering queue, so we call blk_freeze_queue_start() to
* prevent I/O from crossing blk_queue_enter(). * prevent I/O from crossing blk_queue_enter().
*/ */
blk_freeze_queue_start(q); blk_freeze_queue_start(q);
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_wake_waiters(q); blk_mq_wake_waiters(q);
/* Make blk_queue_enter() reexamine the DYING flag. */ /* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq); wake_up_all(&q->mq_freeze_wq);
} }
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying); EXPORT_SYMBOL_GPL(blk_set_queue_dying);
/** /**
@@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q)
*/ */
blk_freeze_queue(q); blk_freeze_queue(q);
rq_qos_exit(q);
blk_queue_flag_set(QUEUE_FLAG_DEAD, q); blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
blk_sync_queue(q); blk_sync_queue(q);
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_exit_queue(q); blk_mq_exit_queue(q);
@@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q)
} }
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
rcu_read_lock();
if (!percpu_ref_tryget_live(&q->q_usage_counter))
goto fail;
/*
* The code that increments the pm_only counter must ensure that the
* counter is globally visible before the queue is unfrozen.
*/
if (blk_queue_pm_only(q) &&
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
goto fail_put;
rcu_read_unlock();
return true;
fail_put:
percpu_ref_put(&q->q_usage_counter);
fail:
rcu_read_unlock();
return false;
}
/** /**
* blk_queue_enter() - try to increase q->q_usage_counter * blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer * @q: request queue pointer
@@ -425,40 +445,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{ {
const bool pm = flags & BLK_MQ_REQ_PM; const bool pm = flags & BLK_MQ_REQ_PM;
while (true) { while (!blk_try_enter_queue(q, pm)) {
bool success = false;
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
* The code that increments the pm_only counter is
* responsible for ensuring that that counter is
* globally visible before the queue is unfrozen.
*/
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
!blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
}
}
rcu_read_unlock();
if (success)
return 0;
if (flags & BLK_MQ_REQ_NOWAIT) if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY; return -EBUSY;
/* /*
* read pair of barrier in blk_freeze_queue_start(), * read pair of barrier in blk_freeze_queue_start(), we need to
* we need to order reading __PERCPU_REF_DEAD flag of * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
* .q_usage_counter and reading .mq_freeze_depth or * reading .mq_freeze_depth or queue dying flag, otherwise the
* queue dying flag, otherwise the following wait may * following wait may never return if the two reads are
* never return if the two reads are reordered. * reordered.
*/ */
smp_rmb(); smp_rmb();
wait_event(q->mq_freeze_wq, wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth && (!q->mq_freeze_depth &&
blk_pm_resume_queue(pm, q)) || blk_pm_resume_queue(pm, q)) ||
@@ -466,23 +464,43 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
if (blk_queue_dying(q)) if (blk_queue_dying(q))
return -ENODEV; return -ENODEV;
} }
return 0;
} }
static inline int bio_queue_enter(struct bio *bio) static inline int bio_queue_enter(struct bio *bio)
{ {
struct request_queue *q = bio->bi_bdev->bd_disk->queue; struct gendisk *disk = bio->bi_bdev->bd_disk;
bool nowait = bio->bi_opf & REQ_NOWAIT; struct request_queue *q = disk->queue;
int ret;
ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0); while (!blk_try_enter_queue(q, false)) {
if (unlikely(ret)) { if (bio->bi_opf & REQ_NOWAIT) {
if (nowait && !blk_queue_dying(q)) if (test_bit(GD_DEAD, &disk->state))
goto dead;
bio_wouldblock_error(bio); bio_wouldblock_error(bio);
else return -EBUSY;
bio_io_error(bio);
} }
return ret; /*
* read pair of barrier in blk_freeze_queue_start(), we need to
* order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
* reading .mq_freeze_depth or queue dying flag, otherwise the
* following wait may never return if the two reads are
* reordered.
*/
smp_rmb();
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
blk_pm_resume_queue(false, q)) ||
test_bit(GD_DEAD, &disk->state));
if (test_bit(GD_DEAD, &disk->state))
goto dead;
}
return 0;
dead:
bio_io_error(bio);
return -ENODEV;
} }
void blk_queue_exit(struct request_queue *q) void blk_queue_exit(struct request_queue *q)
@@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio)
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_bdev->bd_disk;
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
if (blk_crypto_bio_prep(&bio)) { if (unlikely(bio_queue_enter(bio) != 0))
if (!disk->fops->submit_bio) return BLK_QC_T_NONE;
return blk_mq_submit_bio(bio);
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
goto queue_exit;
if (disk->fops->submit_bio) {
ret = disk->fops->submit_bio(bio); ret = disk->fops->submit_bio(bio);
goto queue_exit;
} }
return blk_mq_submit_bio(bio);
queue_exit:
blk_queue_exit(disk->queue); blk_queue_exit(disk->queue);
return ret; return ret;
} }
@@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
struct request_queue *q = bio->bi_bdev->bd_disk->queue; struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct bio_list lower, same; struct bio_list lower, same;
if (unlikely(bio_queue_enter(bio) != 0))
continue;
/* /*
* Create a fresh bio_list for all subordinate requests. * Create a fresh bio_list for all subordinate requests.
*/ */
@@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
{ {
struct bio_list bio_list[2] = { }; struct bio_list bio_list[2] = { };
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret;
current->bio_list = bio_list; current->bio_list = bio_list;
do { do {
struct gendisk *disk = bio->bi_bdev->bd_disk; ret = __submit_bio(bio);
if (unlikely(bio_queue_enter(bio) != 0))
continue;
if (!blk_crypto_bio_prep(&bio)) {
blk_queue_exit(disk->queue);
ret = BLK_QC_T_NONE;
continue;
}
ret = blk_mq_submit_bio(bio);
} while ((bio = bio_list_pop(&bio_list[0]))); } while ((bio = bio_list_pop(&bio_list[0])));
current->bio_list = NULL; current->bio_list = NULL;
@@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
*/ */
blk_qc_t submit_bio_noacct(struct bio *bio) blk_qc_t submit_bio_noacct(struct bio *bio)
{ {
if (!submit_bio_checks(bio))
return BLK_QC_T_NONE;
/* /*
* We only want one ->submit_bio to be active at a time, else stack * We only want one ->submit_bio to be active at a time, else stack
* usage with stacked devices could be a problem. Use current->bio_list * usage with stacked devices could be a problem. Use current->bio_list

View File

@@ -188,9 +188,11 @@ void blk_mq_freeze_queue(struct request_queue *q)
} }
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q) void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
{ {
mutex_lock(&q->mq_freeze_lock); mutex_lock(&q->mq_freeze_lock);
if (force_atomic)
q->q_usage_counter.data->force_atomic = true;
q->mq_freeze_depth--; q->mq_freeze_depth--;
WARN_ON_ONCE(q->mq_freeze_depth < 0); WARN_ON_ONCE(q->mq_freeze_depth < 0);
if (!q->mq_freeze_depth) { if (!q->mq_freeze_depth) {
@@ -199,6 +201,11 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
} }
mutex_unlock(&q->mq_freeze_lock); mutex_unlock(&q->mq_freeze_lock);
} }
void blk_mq_unfreeze_queue(struct request_queue *q)
{
__blk_mq_unfreeze_queue(q, false);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
/* /*

View File

@@ -51,6 +51,8 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
void blk_free_flush_queue(struct blk_flush_queue *q); void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q); void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
#define BIO_INLINE_VECS 4 #define BIO_INLINE_VECS 4
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,

View File

@@ -26,6 +26,7 @@
#include <linux/badblocks.h> #include <linux/badblocks.h>
#include "blk.h" #include "blk.h"
#include "blk-rq-qos.h"
static struct kobject *block_depr; static struct kobject *block_depr;
@@ -559,6 +560,8 @@ EXPORT_SYMBOL(device_add_disk);
*/ */
void del_gendisk(struct gendisk *disk) void del_gendisk(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
might_sleep(); might_sleep();
if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN))) if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
@@ -575,8 +578,27 @@ void del_gendisk(struct gendisk *disk)
fsync_bdev(disk->part0); fsync_bdev(disk->part0);
__invalidate_device(disk->part0, true); __invalidate_device(disk->part0, true);
/*
* Fail any new I/O.
*/
set_bit(GD_DEAD, &disk->state);
set_capacity(disk, 0); set_capacity(disk, 0);
/*
* Prevent new I/O from crossing bio_queue_enter().
*/
blk_queue_start_drain(q);
blk_mq_freeze_queue_wait(q);
rq_qos_exit(q);
blk_sync_queue(q);
blk_flush_integrity();
/*
* Allow using passthrough request again after the queue is torn down.
*/
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
__blk_mq_unfreeze_queue(q, true);
if (!(disk->flags & GENHD_FL_HIDDEN)) { if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -1056,6 +1078,7 @@ static void disk_release(struct device *dev)
struct gendisk *disk = dev_to_disk(dev); struct gendisk *disk = dev_to_disk(dev);
might_sleep(); might_sleep();
WARN_ON_ONCE(disk_live(disk));
disk_release_events(disk); disk_release_events(disk);
kfree(disk->random); kfree(disk->random);

View File

@@ -151,6 +151,7 @@ struct kyber_ctx_queue {
struct kyber_queue_data { struct kyber_queue_data {
struct request_queue *q; struct request_queue *q;
dev_t dev;
/* /*
* Each scheduling domain has a limited number of in-flight requests * Each scheduling domain has a limited number of in-flight requests
@@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd,
} }
memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain], trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
kyber_latency_type_names[type], percentile, kyber_latency_type_names[type], percentile,
bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples); bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
@@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
depth = clamp(depth, 1U, kyber_depth[sched_domain]); depth = clamp(depth, 1U, kyber_depth[sched_domain]);
if (depth != kqd->domain_tokens[sched_domain].sb.depth) { if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain], trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
depth); depth);
} }
} }
@@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
goto err; goto err;
kqd->q = q; kqd->q = q;
kqd->dev = disk_devt(q->disk);
kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency, kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
@@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
return rq; return rq;
} else { } else {
trace_kyber_throttled(kqd->q, trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]); kyber_domain_names[khd->cur_domain]);
} }
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
@@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
return rq; return rq;
} else { } else {
trace_kyber_throttled(kqd->q, trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]); kyber_domain_names[khd->cur_domain]);
} }
} }

View File

@@ -440,10 +440,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
hpriv->phy_regulator = devm_regulator_get(dev, "phy"); hpriv->phy_regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(hpriv->phy_regulator)) { if (IS_ERR(hpriv->phy_regulator)) {
rc = PTR_ERR(hpriv->phy_regulator); rc = PTR_ERR(hpriv->phy_regulator);
if (rc == -EPROBE_DEFER)
goto err_out; goto err_out;
rc = 0;
hpriv->phy_regulator = NULL;
} }
if (flags & AHCI_PLATFORM_GET_RESETS) { if (flags & AHCI_PLATFORM_GET_RESETS) {

View File

@@ -352,7 +352,8 @@ static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) { if (unlikely(slop)) {
__le32 pad; __le32 pad = 0;
if (rw == READ) { if (rw == READ) {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop); memcpy(buf + buflen - slop, &pad, slop);
@@ -742,7 +743,8 @@ static unsigned int vlb32_data_xfer(struct ata_queued_cmd *qc,
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) { if (unlikely(slop)) {
__le32 pad; __le32 pad = 0;
if (rw == WRITE) { if (rw == WRITE) {
memcpy(&pad, buf + buflen - slop, slop); memcpy(&pad, buf + buflen - slop, slop);
iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);

View File

@@ -687,7 +687,8 @@ struct device_link *device_link_add(struct device *consumer,
{ {
struct device_link *link; struct device_link *link;
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS || if (!consumer || !supplier || consumer == supplier ||
flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_SYNC_STATE_ONLY && (flags & DL_FLAG_SYNC_STATE_ONLY &&
(flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) || (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||

View File

@@ -373,10 +373,22 @@ static int brd_alloc(int i)
struct gendisk *disk; struct gendisk *disk;
char buf[DISK_NAME_LEN]; char buf[DISK_NAME_LEN];
mutex_lock(&brd_devices_mutex);
list_for_each_entry(brd, &brd_devices, brd_list) {
if (brd->brd_number == i) {
mutex_unlock(&brd_devices_mutex);
return -EEXIST;
}
}
brd = kzalloc(sizeof(*brd), GFP_KERNEL); brd = kzalloc(sizeof(*brd), GFP_KERNEL);
if (!brd) if (!brd) {
mutex_unlock(&brd_devices_mutex);
return -ENOMEM; return -ENOMEM;
}
brd->brd_number = i; brd->brd_number = i;
list_add_tail(&brd->brd_list, &brd_devices);
mutex_unlock(&brd_devices_mutex);
spin_lock_init(&brd->brd_lock); spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
@@ -411,37 +423,30 @@ static int brd_alloc(int i)
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
add_disk(disk); add_disk(disk);
list_add_tail(&brd->brd_list, &brd_devices);
return 0; return 0;
out_free_dev: out_free_dev:
mutex_lock(&brd_devices_mutex);
list_del(&brd->brd_list);
mutex_unlock(&brd_devices_mutex);
kfree(brd); kfree(brd);
return -ENOMEM; return -ENOMEM;
} }
static void brd_probe(dev_t dev) static void brd_probe(dev_t dev)
{ {
int i = MINOR(dev) / max_part; brd_alloc(MINOR(dev) / max_part);
struct brd_device *brd;
mutex_lock(&brd_devices_mutex);
list_for_each_entry(brd, &brd_devices, brd_list) {
if (brd->brd_number == i)
goto out_unlock;
}
brd_alloc(i);
out_unlock:
mutex_unlock(&brd_devices_mutex);
} }
static void brd_del_one(struct brd_device *brd) static void brd_del_one(struct brd_device *brd)
{ {
list_del(&brd->brd_list);
del_gendisk(brd->brd_disk); del_gendisk(brd->brd_disk);
blk_cleanup_disk(brd->brd_disk); blk_cleanup_disk(brd->brd_disk);
brd_free_pages(brd); brd_free_pages(brd);
mutex_lock(&brd_devices_mutex);
list_del(&brd->brd_list);
mutex_unlock(&brd_devices_mutex);
kfree(brd); kfree(brd);
} }
@@ -491,25 +496,21 @@ static int __init brd_init(void)
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
mutex_lock(&brd_devices_mutex);
for (i = 0; i < rd_nr; i++) { for (i = 0; i < rd_nr; i++) {
err = brd_alloc(i); err = brd_alloc(i);
if (err) if (err)
goto out_free; goto out_free;
} }
mutex_unlock(&brd_devices_mutex);
pr_info("brd: module loaded\n"); pr_info("brd: module loaded\n");
return 0; return 0;
out_free: out_free:
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
debugfs_remove_recursive(brd_debugfs_dir); debugfs_remove_recursive(brd_debugfs_dir);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd); brd_del_one(brd);
mutex_unlock(&brd_devices_mutex);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module NOT loaded !!!\n"); pr_info("brd: module NOT loaded !!!\n");
return err; return err;
@@ -519,13 +520,12 @@ static void __exit brd_exit(void)
{ {
struct brd_device *brd, *next; struct brd_device *brd, *next;
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
debugfs_remove_recursive(brd_debugfs_dir); debugfs_remove_recursive(brd_debugfs_dir);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd); brd_del_one(brd);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module unloaded\n"); pr_info("brd: module unloaded\n");
} }

View File

@@ -71,8 +71,10 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
int opt_mask = 0; int opt_mask = 0;
int token; int token;
int ret = -EINVAL; int ret = -EINVAL;
int i, dest_port, nr_poll_queues; int nr_poll_queues = 0;
int dest_port = 0;
int p_cnt = 0; int p_cnt = 0;
int i;
options = kstrdup(buf, GFP_KERNEL); options = kstrdup(buf, GFP_KERNEL);
if (!options) if (!options)

View File

@@ -691,28 +691,6 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth; static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
static int virtblk_validate(struct virtio_device *vdev)
{
u32 blk_size;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
return 0;
blk_size = virtio_cread32(vdev,
offsetof(struct virtio_blk_config, blk_size));
if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
__virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
return 0;
}
static int virtblk_probe(struct virtio_device *vdev) static int virtblk_probe(struct virtio_device *vdev)
{ {
struct virtio_blk *vblk; struct virtio_blk *vblk;
@@ -724,6 +702,12 @@ static int virtblk_probe(struct virtio_device *vdev)
u8 physical_block_exp, alignment_offset; u8 physical_block_exp, alignment_offset;
unsigned int queue_depth; unsigned int queue_depth;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
GFP_KERNEL); GFP_KERNEL);
if (err < 0) if (err < 0)
@@ -838,14 +822,6 @@ static int virtblk_probe(struct virtio_device *vdev)
else else
blk_size = queue_logical_block_size(q); blk_size = queue_logical_block_size(q);
if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) {
dev_err(&vdev->dev,
"block size is changed unexpectedly, now is %u\n",
blk_size);
err = -EINVAL;
goto out_cleanup_disk;
}
/* Use topology information if available */ /* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp, struct virtio_blk_config, physical_block_exp,
@@ -1011,7 +987,6 @@ static struct virtio_driver virtio_blk = {
.driver.name = KBUILD_MODNAME, .driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE, .driver.owner = THIS_MODULE,
.id_table = id_table, .id_table = id_table,
.validate = virtblk_validate,
.probe = virtblk_probe, .probe = virtblk_probe,
.remove = virtblk_remove, .remove = virtblk_remove,
.config_changed = virtblk_config_changed, .config_changed = virtblk_config_changed,

View File

@@ -152,18 +152,6 @@ config QCOM_EBI2
Interface 2, which can be used to connect things like NAND Flash, Interface 2, which can be used to connect things like NAND Flash,
SRAM, ethernet adapters, FPGAs and LCD displays. SRAM, ethernet adapters, FPGAs and LCD displays.
config SIMPLE_PM_BUS
tristate "Simple Power-Managed Bus Driver"
depends on OF && PM
help
Driver for transparent busses that don't need a real driver, but
where the bus controller is part of a PM domain, or under the control
of a functional clock, and thus relies on runtime PM for managing
this PM domain and/or clock.
An example of such a bus controller is the Renesas Bus State
Controller (BSC, sometimes called "LBSC within Bus Bridge", or
"External Bus Interface") as found on several Renesas ARM SoCs.
config SUN50I_DE2_BUS config SUN50I_DE2_BUS
bool "Allwinner A64 DE2 Bus Driver" bool "Allwinner A64 DE2 Bus Driver"
default ARM64 default ARM64

View File

@@ -27,7 +27,7 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o obj-$(CONFIG_OF) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o

View File

@@ -13,11 +13,36 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
static int simple_pm_bus_probe(struct platform_device *pdev) static int simple_pm_bus_probe(struct platform_device *pdev)
{ {
const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev); const struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node; const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
struct device_node *np = dev->of_node;
const struct of_device_id *match;
/*
* Allow user to use driver_override to bind this driver to a
* transparent bus device which has a different compatible string
* that's not listed in simple_pm_bus_of_match. We don't want to do any
* of the simple-pm-bus tasks for these devices, so return early.
*/
if (pdev->driver_override)
return 0;
match = of_match_device(dev->driver->of_match_table, dev);
/*
* These are transparent bus devices (not simple-pm-bus matches) that
* have their child nodes populated automatically. So, don't need to
* do anything more. We only match with the device if this driver is
* the most specific match because we don't want to incorrectly bind to
* a device that has a more specific driver.
*/
if (match && match->data) {
if (of_property_match_string(np, "compatible", match->compatible) == 0)
return 0;
else
return -ENODEV;
}
dev_dbg(&pdev->dev, "%s\n", __func__); dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -31,14 +56,25 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
static int simple_pm_bus_remove(struct platform_device *pdev) static int simple_pm_bus_remove(struct platform_device *pdev)
{ {
const void *data = of_device_get_match_data(&pdev->dev);
if (pdev->driver_override || data)
return 0;
dev_dbg(&pdev->dev, "%s\n", __func__); dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
return 0; return 0;
} }
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
static const struct of_device_id simple_pm_bus_of_match[] = { static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-pm-bus", }, { .compatible = "simple-pm-bus", },
{ .compatible = "simple-bus", .data = ONLY_BUS },
{ .compatible = "simple-mfd", .data = ONLY_BUS },
{ .compatible = "isa", .data = ONLY_BUS },
{ .compatible = "arm,amba-bus", .data = ONLY_BUS },
{ /* sentinel */ } { /* sentinel */ }
}; };
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match); MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);

View File

@@ -564,6 +564,7 @@ config SM_GCC_6125
config SM_GCC_6350 config SM_GCC_6350
tristate "SM6350 Global Clock Controller" tristate "SM6350 Global Clock Controller"
select QCOM_GDSC
help help
Support for the global clock controller on SM6350 devices. Support for the global clock controller on SM6350 devices.
Say Y if you want to use peripheral devices such as UART, Say Y if you want to use peripheral devices such as UART,

View File

@@ -3242,7 +3242,7 @@ static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
}; };
static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = { static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
.gdscr = 0x7d060, .gdscr = 0x7d07c,
.pd = { .pd = {
.name = "hlos1_vote_turing_mmu_tbu0", .name = "hlos1_vote_turing_mmu_tbu0",
}, },

View File

@@ -186,6 +186,8 @@ static struct rzg2l_reset r9a07g044_resets[] = {
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = { static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A07G044_GIC600_GICCLK, MOD_CLK_BASE + R9A07G044_GIC600_GICCLK,
MOD_CLK_BASE + R9A07G044_IA55_CLK,
MOD_CLK_BASE + R9A07G044_DMAC_ACLK,
}; };
const struct rzg2l_cpg_info r9a07g044_cpg_info = { const struct rzg2l_cpg_info r9a07g044_cpg_info = {

View File

@@ -391,7 +391,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
value = readl(priv->base + CLK_MON_R(clock->off)); value = readl(priv->base + CLK_MON_R(clock->off));
return !(value & bitmask); return value & bitmask;
} }
static const struct clk_ops rzg2l_mod_clock_ops = { static const struct clk_ops rzg2l_mod_clock_ops = {

View File

@@ -165,13 +165,6 @@ static const struct clk_parent_data mpu_mux[] = {
.name = "boot_clk", }, .name = "boot_clk", },
}; };
static const struct clk_parent_data s2f_usr0_mux[] = {
{ .fw_name = "f2s-free-clk",
.name = "f2s-free-clk", },
{ .fw_name = "boot_clk",
.name = "boot_clk", },
};
static const struct clk_parent_data emac_mux[] = { static const struct clk_parent_data emac_mux[] = {
{ .fw_name = "emaca_free_clk", { .fw_name = "emaca_free_clk",
.name = "emaca_free_clk", }, .name = "emaca_free_clk", },
@@ -312,8 +305,6 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = {
4, 0x44, 28, 1, 0, 0, 0}, 4, 0x44, 28, 1, 0, 0, 0},
{ AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24, { AGILEX_CS_TIMER_CLK, "cs_timer_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 0, 0x24,
5, 0, 0, 0, 0x30, 1, 0}, 5, 0, 0, 0, 0x30, 1, 0},
{ AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
6, 0, 0, 0, 0, 0, 0},
{ AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C, { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
0, 0, 0, 0, 0x94, 26, 0}, 0, 0, 0, 0, 0x94, 26, 0},
{ AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C, { AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,

View File

@@ -178,7 +178,7 @@ static void axp_mc_check(struct mem_ctl_info *mci)
"details unavailable (multiple errors)"); "details unavailable (multiple errors)");
if (cnt_dbe) if (cnt_dbe)
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
cnt_sbe, /* error count */ cnt_dbe, /* error count */
0, 0, 0, /* pfn, offset, syndrome */ 0, 0, 0, /* pfn, offset, syndrome */
-1, -1, -1, /* top, mid, low layer */ -1, -1, -1, /* top, mid, low layer */
mci->ctl_name, mci->ctl_name,

View File

@@ -25,8 +25,6 @@
#include <acpi/ghes.h> #include <acpi/ghes.h>
#include <ras/ras_event.h> #include <ras/ras_event.h>
static char rcd_decode_str[CPER_REC_LEN];
/* /*
* CPER record ID need to be unique even after reboot, because record * CPER record ID need to be unique even after reboot, because record
* ID is used as index for ERST storage, while CPER records from * ID is used as index for ERST storage, while CPER records from
@@ -312,6 +310,7 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
struct cper_mem_err_compact *cmem) struct cper_mem_err_compact *cmem)
{ {
const char *ret = trace_seq_buffer_ptr(p); const char *ret = trace_seq_buffer_ptr(p);
char rcd_decode_str[CPER_REC_LEN];
if (cper_mem_err_location(cmem, rcd_decode_str)) if (cper_mem_err_location(cmem, rcd_decode_str))
trace_seq_printf(p, "%s", rcd_decode_str); trace_seq_printf(p, "%s", rcd_decode_str);
@@ -326,6 +325,7 @@ static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
int len) int len)
{ {
struct cper_mem_err_compact cmem; struct cper_mem_err_compact cmem;
char rcd_decode_str[CPER_REC_LEN];
/* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
if (len == sizeof(struct cper_sec_mem_err_old) && if (len == sizeof(struct cper_sec_mem_err_old) &&

View File

@@ -271,7 +271,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
return status; return status;
} }
efi_info("Exiting boot services and installing virtual address map...\n"); efi_info("Exiting boot services...\n");
map.map = &memory_map; map.map = &memory_map;
status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX); status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);

View File

@@ -414,7 +414,7 @@ static void virt_efi_reset_system(int reset_type,
unsigned long data_size, unsigned long data_size,
efi_char16_t *data) efi_char16_t *data)
{ {
if (down_interruptible(&efi_runtime_lock)) { if (down_trylock(&efi_runtime_lock)) {
pr_warn("failed to invoke the reset_system() runtime service:\n" pr_warn("failed to invoke the reset_system() runtime service:\n"
"could not get exclusive access to the firmware\n"); "could not get exclusive access to the firmware\n");
return; return;

View File

@@ -192,12 +192,19 @@ static const struct of_device_id ice40_fpga_of_match[] = {
}; };
MODULE_DEVICE_TABLE(of, ice40_fpga_of_match); MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
static const struct spi_device_id ice40_fpga_spi_ids[] = {
{ .name = "ice40-fpga-mgr", },
{},
};
MODULE_DEVICE_TABLE(spi, ice40_fpga_spi_ids);
static struct spi_driver ice40_fpga_driver = { static struct spi_driver ice40_fpga_driver = {
.probe = ice40_fpga_probe, .probe = ice40_fpga_probe,
.driver = { .driver = {
.name = "ice40spi", .name = "ice40spi",
.of_match_table = of_match_ptr(ice40_fpga_of_match), .of_match_table = of_match_ptr(ice40_fpga_of_match),
}, },
.id_table = ice40_fpga_spi_ids,
}; };
module_spi_driver(ice40_fpga_driver); module_spi_driver(ice40_fpga_driver);

View File

@@ -738,7 +738,7 @@ static irqreturn_t fxls8962af_interrupt(int irq, void *p)
if (reg & FXLS8962AF_INT_STATUS_SRC_BUF) { if (reg & FXLS8962AF_INT_STATUS_SRC_BUF) {
ret = fxls8962af_fifo_flush(indio_dev); ret = fxls8962af_fifo_flush(indio_dev);
if (ret) if (ret < 0)
return IRQ_NONE; return IRQ_NONE;
return IRQ_HANDLED; return IRQ_HANDLED;

View File

@@ -293,6 +293,7 @@ static const struct ad_sigma_delta_info ad7192_sigma_delta_info = {
.has_registers = true, .has_registers = true,
.addr_shift = 3, .addr_shift = 3,
.read_mask = BIT(6), .read_mask = BIT(6),
.irq_flags = IRQF_TRIGGER_FALLING,
}; };
static const struct ad_sd_calib_data ad7192_calib_arr[8] = { static const struct ad_sd_calib_data ad7192_calib_arr[8] = {

View File

@@ -203,7 +203,7 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
.set_mode = ad7780_set_mode, .set_mode = ad7780_set_mode,
.postprocess_sample = ad7780_postprocess_sample, .postprocess_sample = ad7780_postprocess_sample,
.has_registers = false, .has_registers = false,
.irq_flags = IRQF_TRIGGER_LOW, .irq_flags = IRQF_TRIGGER_FALLING,
}; };
#define _AD7780_CHANNEL(_bits, _wordsize, _mask_all) \ #define _AD7780_CHANNEL(_bits, _wordsize, _mask_all) \

View File

@@ -206,7 +206,7 @@ static const struct ad_sigma_delta_info ad7793_sigma_delta_info = {
.has_registers = true, .has_registers = true,
.addr_shift = 3, .addr_shift = 3,
.read_mask = BIT(6), .read_mask = BIT(6),
.irq_flags = IRQF_TRIGGER_LOW, .irq_flags = IRQF_TRIGGER_FALLING,
}; };
static const struct ad_sd_calib_data ad7793_calib_arr[6] = { static const struct ad_sd_calib_data ad7793_calib_arr[6] = {

View File

@@ -183,6 +183,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
data = iio_priv(indio_dev); data = iio_priv(indio_dev);
data->dev = &pdev->dev; data->dev = &pdev->dev;
platform_set_drvdata(pdev, indio_dev);
data->base = devm_platform_ioremap_resource(pdev, 0); data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base)) if (IS_ERR(data->base))

View File

@@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
.sign = 'u', \ .sign = 'u', \
.realbits = depth, \ .realbits = depth, \
.storagebits = 16, \ .storagebits = 16, \
.shift = 2, \ .shift = (depth == 10) ? 2 : 0, \
.endianness = IIO_BE, \ .endianness = IIO_BE, \
}, \ }, \
} }
@@ -142,7 +142,6 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
MAX1027_V_CHAN(11, depth) MAX1027_V_CHAN(11, depth)
#define MAX1X31_CHANNELS(depth) \ #define MAX1X31_CHANNELS(depth) \
MAX1X27_CHANNELS(depth), \
MAX1X29_CHANNELS(depth), \ MAX1X29_CHANNELS(depth), \
MAX1027_V_CHAN(12, depth), \ MAX1027_V_CHAN(12, depth), \
MAX1027_V_CHAN(13, depth), \ MAX1027_V_CHAN(13, depth), \

View File

@@ -82,6 +82,10 @@ static const struct iio_chan_spec mt6577_auxadc_iio_channels[] = {
MT6577_AUXADC_CHANNEL(15), MT6577_AUXADC_CHANNEL(15),
}; };
/* For Voltage calculation */
#define VOLTAGE_FULL_RANGE 1500 /* VA voltage */
#define AUXADC_PRECISE 4096 /* 12 bits */
static int mt_auxadc_get_cali_data(int rawdata, bool enable_cali) static int mt_auxadc_get_cali_data(int rawdata, bool enable_cali)
{ {
return rawdata; return rawdata;
@@ -191,6 +195,10 @@ static int mt6577_auxadc_read_raw(struct iio_dev *indio_dev,
} }
if (adc_dev->dev_comp->sample_data_cali) if (adc_dev->dev_comp->sample_data_cali)
*val = mt_auxadc_get_cali_data(*val, true); *val = mt_auxadc_get_cali_data(*val, true);
/* Convert adc raw data to voltage: 0 - 1500 mV */
*val = *val * VOLTAGE_FULL_RANGE / AUXADC_PRECISE;
return IIO_VAL_INT; return IIO_VAL_INT;
default: default:

View File

@@ -401,7 +401,7 @@ static int rzg2l_adc_hw_init(struct rzg2l_adc *adc)
exit_hw_init: exit_hw_init:
clk_disable_unprepare(adc->pclk); clk_disable_unprepare(adc->pclk);
return 0; return ret;
} }
static void rzg2l_adc_pm_runtime_disable(void *data) static void rzg2l_adc_pm_runtime_disable(void *data)
@@ -570,8 +570,10 @@ static int __maybe_unused rzg2l_adc_pm_runtime_resume(struct device *dev)
return ret; return ret;
ret = clk_prepare_enable(adc->adclk); ret = clk_prepare_enable(adc->adclk);
if (ret) if (ret) {
clk_disable_unprepare(adc->pclk);
return ret; return ret;
}
rzg2l_adc_pwr(adc, true); rzg2l_adc_pwr(adc, true);

View File

@@ -171,7 +171,13 @@ static int adc128_probe(struct spi_device *spi)
mutex_init(&adc->lock); mutex_init(&adc->lock);
ret = iio_device_register(indio_dev); ret = iio_device_register(indio_dev);
if (ret)
goto err_disable_regulator;
return 0;
err_disable_regulator:
regulator_disable(adc->reg);
return ret; return ret;
} }

View File

@@ -137,7 +137,7 @@ static int ssp_print_mcu_debug(char *data_frame, int *data_index,
if (length > received_len - *data_index || length <= 0) { if (length > received_len - *data_index || length <= 0) {
ssp_dbg("[SSP]: MSG From MCU-invalid debug length(%d/%d)\n", ssp_dbg("[SSP]: MSG From MCU-invalid debug length(%d/%d)\n",
length, received_len); length, received_len);
return length ? length : -EPROTO; return -EPROTO;
} }
ssp_dbg("[SSP]: MSG From MCU - %s\n", &data_frame[*data_index]); ssp_dbg("[SSP]: MSG From MCU - %s\n", &data_frame[*data_index]);
@@ -273,6 +273,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
for (idx = 0; idx < len;) { for (idx = 0; idx < len;) {
switch (dataframe[idx++]) { switch (dataframe[idx++]) {
case SSP_MSG2AP_INST_BYPASS_DATA: case SSP_MSG2AP_INST_BYPASS_DATA:
if (idx >= len)
return -EPROTO;
sd = dataframe[idx++]; sd = dataframe[idx++];
if (sd < 0 || sd >= SSP_SENSOR_MAX) { if (sd < 0 || sd >= SSP_SENSOR_MAX) {
dev_err(SSP_DEV, dev_err(SSP_DEV,
@@ -282,10 +284,13 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
if (indio_devs[sd]) { if (indio_devs[sd]) {
spd = iio_priv(indio_devs[sd]); spd = iio_priv(indio_devs[sd]);
if (spd->process_data) if (spd->process_data) {
if (idx >= len)
return -EPROTO;
spd->process_data(indio_devs[sd], spd->process_data(indio_devs[sd],
&dataframe[idx], &dataframe[idx],
data->timestamp); data->timestamp);
}
} else { } else {
dev_err(SSP_DEV, "no client for frame\n"); dev_err(SSP_DEV, "no client for frame\n");
} }
@@ -293,6 +298,8 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len)
idx += ssp_offset_map[sd]; idx += ssp_offset_map[sd];
break; break;
case SSP_MSG2AP_INST_DEBUG_DATA: case SSP_MSG2AP_INST_DEBUG_DATA:
if (idx >= len)
return -EPROTO;
sd = ssp_print_mcu_debug(dataframe, &idx, len); sd = ssp_print_mcu_debug(dataframe, &idx, len);
if (sd) { if (sd) {
dev_err(SSP_DEV, dev_err(SSP_DEV,

View File

@@ -350,6 +350,7 @@ static int dac5571_probe(struct i2c_client *client,
data->dac5571_pwrdwn = dac5571_pwrdwn_quad; data->dac5571_pwrdwn = dac5571_pwrdwn_quad;
break; break;
default: default:
ret = -EINVAL;
goto err; goto err;
} }

View File

@@ -353,10 +353,11 @@ static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
if (dec > st->info->max_dec) if (dec > st->info->max_dec)
dec = st->info->max_dec; dec = st->info->max_dec;
ret = adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec); ret = __adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
if (ret) if (ret)
goto error; goto error;
adis_dev_unlock(&st->adis);
/* /*
* If decimation is used, then gyro and accel data will have meaningful * If decimation is used, then gyro and accel data will have meaningful
* bits on the LSB registers. This info is used on the trigger handler. * bits on the LSB registers. This info is used on the trigger handler.

View File

@@ -144,6 +144,7 @@ struct adis16480_chip_info {
unsigned int max_dec_rate; unsigned int max_dec_rate;
const unsigned int *filter_freqs; const unsigned int *filter_freqs;
bool has_pps_clk_mode; bool has_pps_clk_mode;
bool has_sleep_cnt;
const struct adis_data adis_data; const struct adis_data adis_data;
}; };
@@ -939,6 +940,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */ .temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000, .int_clk = 2460000,
.max_dec_rate = 2048, .max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs, .filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0), .adis_data = ADIS16480_DATA(16375, &adis16485_timeouts, 0),
}, },
@@ -952,6 +954,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */ .temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000, .int_clk = 2460000,
.max_dec_rate = 2048, .max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs, .filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0), .adis_data = ADIS16480_DATA(16480, &adis16480_timeouts, 0),
}, },
@@ -965,6 +968,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */ .temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000, .int_clk = 2460000,
.max_dec_rate = 2048, .max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs, .filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0), .adis_data = ADIS16480_DATA(16485, &adis16485_timeouts, 0),
}, },
@@ -978,6 +982,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.temp_scale = 5650, /* 5.65 milli degree Celsius */ .temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000, .int_clk = 2460000,
.max_dec_rate = 2048, .max_dec_rate = 2048,
.has_sleep_cnt = true,
.filter_freqs = adis16480_def_filter_freqs, .filter_freqs = adis16480_def_filter_freqs,
.adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0), .adis_data = ADIS16480_DATA(16488, &adis16485_timeouts, 0),
}, },
@@ -1425,9 +1430,12 @@ static int adis16480_probe(struct spi_device *spi)
if (ret) if (ret)
return ret; return ret;
ret = devm_add_action_or_reset(&spi->dev, adis16480_stop, indio_dev); if (st->chip_info->has_sleep_cnt) {
ret = devm_add_action_or_reset(&spi->dev, adis16480_stop,
indio_dev);
if (ret) if (ret)
return ret; return ret;
}
ret = adis16480_config_irq_pin(spi->dev.of_node, st); ret = adis16480_config_irq_pin(spi->dev.of_node, st);
if (ret) if (ret)

View File

@@ -276,6 +276,8 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
ret = wait_event_timeout(opt->result_ready_queue, ret = wait_event_timeout(opt->result_ready_queue,
opt->result_ready, opt->result_ready,
msecs_to_jiffies(OPT3001_RESULT_READY_LONG)); msecs_to_jiffies(OPT3001_RESULT_READY_LONG));
if (ret == 0)
return -ETIMEDOUT;
} else { } else {
/* Sleep for result ready time */ /* Sleep for result ready time */
timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ? timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ?
@@ -312,9 +314,7 @@ err:
/* Disallow IRQ to access the device while lock is active */ /* Disallow IRQ to access the device while lock is active */
opt->ok_to_ignore_lock = false; opt->ok_to_ignore_lock = false;
if (ret == 0) if (ret < 0)
return -ETIMEDOUT;
else if (ret < 0)
return ret; return ret;
if (opt->use_irq) { if (opt->use_irq) {

View File

@@ -334,6 +334,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
@@ -451,6 +452,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
{ } { }
}; };

View File

@@ -3,6 +3,7 @@
// Driver for the IMX SNVS ON/OFF Power Key // Driver for the IMX SNVS ON/OFF Power Key
// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved. // Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/clk.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
@@ -99,6 +100,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void imx_snvs_pwrkey_disable_clk(void *data)
{
clk_disable_unprepare(data);
}
static void imx_snvs_pwrkey_act(void *pdata) static void imx_snvs_pwrkey_act(void *pdata)
{ {
struct pwrkey_drv_data *pd = pdata; struct pwrkey_drv_data *pd = pdata;
@@ -111,6 +117,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct pwrkey_drv_data *pdata; struct pwrkey_drv_data *pdata;
struct input_dev *input; struct input_dev *input;
struct device_node *np; struct device_node *np;
struct clk *clk;
int error; int error;
u32 vid; u32 vid;
@@ -134,6 +141,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n"); dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
} }
clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
if (error) {
dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
ERR_PTR(error));
return error;
}
error = devm_add_action_or_reset(&pdev->dev,
imx_snvs_pwrkey_disable_clk, clk);
if (error) {
dev_err(&pdev->dev,
"Failed to register clock cleanup handler (%pe)\n",
ERR_PTR(error));
return error;
}
pdata->wakeup = of_property_read_bool(np, "wakeup-source"); pdata->wakeup = of_property_read_bool(np, "wakeup-source");
pdata->irq = platform_get_irq(pdev, 0); pdata->irq = platform_get_irq(pdev, 0);

View File

@@ -80,12 +80,12 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x", data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x",
input_abs_get_min(input, axis_x), input_abs_get_min(input, axis_x),
&minimum) | &minimum);
touchscreen_get_prop_u32(dev, "touchscreen-size-x", data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-x",
input_abs_get_max(input, input_abs_get_max(input,
axis_x) + 1, axis_x) + 1,
&maximum) | &maximum);
touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x", data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
input_abs_get_fuzz(input, axis_x), input_abs_get_fuzz(input, axis_x),
&fuzz); &fuzz);
if (data_present) if (data_present)
@@ -93,12 +93,12 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y", data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y",
input_abs_get_min(input, axis_y), input_abs_get_min(input, axis_y),
&minimum) | &minimum);
touchscreen_get_prop_u32(dev, "touchscreen-size-y", data_present |= touchscreen_get_prop_u32(dev, "touchscreen-size-y",
input_abs_get_max(input, input_abs_get_max(input,
axis_y) + 1, axis_y) + 1,
&maximum) | &maximum);
touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y", data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
input_abs_get_fuzz(input, axis_y), input_abs_get_fuzz(input, axis_y),
&fuzz); &fuzz);
if (data_present) if (data_present)
@@ -108,8 +108,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
data_present = touchscreen_get_prop_u32(dev, data_present = touchscreen_get_prop_u32(dev,
"touchscreen-max-pressure", "touchscreen-max-pressure",
input_abs_get_max(input, axis), input_abs_get_max(input, axis),
&maximum) | &maximum);
touchscreen_get_prop_u32(dev, data_present |= touchscreen_get_prop_u32(dev,
"touchscreen-fuzz-pressure", "touchscreen-fuzz-pressure",
input_abs_get_fuzz(input, axis), input_abs_get_fuzz(input, axis),
&fuzz); &fuzz);

View File

@@ -71,6 +71,7 @@ static int grts_cb(const void *data, void *private)
unsigned int z2 = touch_info[st->ch_map[GRTS_CH_Z2]]; unsigned int z2 = touch_info[st->ch_map[GRTS_CH_Z2]];
unsigned int Rt; unsigned int Rt;
if (likely(x && z1)) {
Rt = z2; Rt = z2;
Rt -= z1; Rt -= z1;
Rt *= st->x_plate_ohms; Rt *= st->x_plate_ohms;
@@ -79,12 +80,14 @@ static int grts_cb(const void *data, void *private)
Rt /= z1; Rt /= z1;
Rt = DIV_ROUND_CLOSEST(Rt, 256); Rt = DIV_ROUND_CLOSEST(Rt, 256);
/* /*
* On increased pressure the resistance (Rt) is decreasing * On increased pressure the resistance (Rt) is
* so, convert values to make it looks as real pressure. * decreasing so, convert values to make it looks as
* real pressure.
*/ */
if (Rt < GRTS_DEFAULT_PRESSURE_MAX) if (Rt < GRTS_DEFAULT_PRESSURE_MAX)
press = GRTS_DEFAULT_PRESSURE_MAX - Rt; press = GRTS_DEFAULT_PRESSURE_MAX - Rt;
} }
}
if ((!x && !y) || (st->pressure && (press < st->pressure_min))) { if ((!x && !y) || (st->pressure && (press < st->pressure_min))) {
/* report end of touch */ /* report end of touch */

View File

@@ -224,6 +224,7 @@ config HI6421V600_IRQ
tristate "HiSilicon Hi6421v600 IRQ and powerkey" tristate "HiSilicon Hi6421v600 IRQ and powerkey"
depends on OF depends on OF
depends on SPMI depends on SPMI
depends on HAS_IOMEM
select MFD_CORE select MFD_CORE
select REGMAP_SPMI select REGMAP_SPMI
help help

View File

@@ -47,7 +47,7 @@ static inline bool needs_unaligned_copy(const void *ptr)
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return false; return false;
#else #else
return ((ptr - NULL) & 3) != 0; return ((uintptr_t)ptr & 3) != 0;
#endif #endif
} }

View File

@@ -366,6 +366,13 @@ static const struct of_device_id at25_of_match[] = {
}; };
MODULE_DEVICE_TABLE(of, at25_of_match); MODULE_DEVICE_TABLE(of, at25_of_match);
static const struct spi_device_id at25_spi_ids[] = {
{ .name = "at25",},
{ .name = "fm25",},
{ }
};
MODULE_DEVICE_TABLE(spi, at25_spi_ids);
static int at25_probe(struct spi_device *spi) static int at25_probe(struct spi_device *spi)
{ {
struct at25_data *at25 = NULL; struct at25_data *at25 = NULL;
@@ -491,6 +498,7 @@ static struct spi_driver at25_driver = {
.dev_groups = sernum_groups, .dev_groups = sernum_groups,
}, },
.probe = at25_probe, .probe = at25_probe,
.id_table = at25_spi_ids,
}; };
module_spi_driver(at25_driver); module_spi_driver(at25_driver);

View File

@@ -406,6 +406,23 @@ static const struct of_device_id eeprom_93xx46_of_table[] = {
}; };
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
static const struct spi_device_id eeprom_93xx46_spi_ids[] = {
{ .name = "eeprom-93xx46",
.driver_data = (kernel_ulong_t)&at93c46_data, },
{ .name = "at93c46",
.driver_data = (kernel_ulong_t)&at93c46_data, },
{ .name = "at93c46d",
.driver_data = (kernel_ulong_t)&atmel_at93c46d_data, },
{ .name = "at93c56",
.driver_data = (kernel_ulong_t)&at93c56_data, },
{ .name = "at93c66",
.driver_data = (kernel_ulong_t)&at93c66_data, },
{ .name = "93lc46b",
.driver_data = (kernel_ulong_t)&microchip_93lc46b_data, },
{}
};
MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids);
static int eeprom_93xx46_probe_dt(struct spi_device *spi) static int eeprom_93xx46_probe_dt(struct spi_device *spi)
{ {
const struct of_device_id *of_id = const struct of_device_id *of_id =
@@ -555,6 +572,7 @@ static struct spi_driver eeprom_93xx46_driver = {
}, },
.probe = eeprom_93xx46_probe, .probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove, .remove = eeprom_93xx46_remove,
.id_table = eeprom_93xx46_spi_ids,
}; };
module_spi_driver(eeprom_93xx46_driver); module_spi_driver(eeprom_93xx46_driver);

View File

@@ -814,10 +814,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
rpra[i].pv = (u64) ctx->args[i].ptr; rpra[i].pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys; pages[i].addr = ctx->maps[i]->phys;
mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[i].ptr); vma = find_vma(current->mm, ctx->args[i].ptr);
if (vma) if (vma)
pages[i].addr += ctx->args[i].ptr - pages[i].addr += ctx->args[i].ptr -
vma->vm_start; vma->vm_start;
mmap_read_unlock(current->mm);
pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT; pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >> pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>

View File

@@ -539,6 +539,7 @@ static int gehc_achc_probe(struct spi_device *spi)
static const struct spi_device_id gehc_achc_id[] = { static const struct spi_device_id gehc_achc_id[] = {
{ "ge,achc", 0 }, { "ge,achc", 0 },
{ "achc", 0 },
{ } { }
}; };
MODULE_DEVICE_TABLE(spi, gehc_achc_id); MODULE_DEVICE_TABLE(spi, gehc_achc_id);

View File

@@ -2649,11 +2649,18 @@ put_ctx:
free_seq_arr: free_seq_arr:
kfree(cs_seq_arr); kfree(cs_seq_arr);
/* update output args */
memset(args, 0, sizeof(*args));
if (rc) if (rc)
return rc; return rc;
if (mcs_data.wait_status == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for Multi-CS\n");
return -EINTR;
}
/* update output args */
memset(args, 0, sizeof(*args));
if (mcs_data.completion_bitmap) { if (mcs_data.completion_bitmap) {
args->out.status = HL_WAIT_CS_STATUS_COMPLETED; args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
args->out.cs_completion_map = mcs_data.completion_bitmap; args->out.cs_completion_map = mcs_data.completion_bitmap;
@@ -2667,8 +2674,6 @@ free_seq_arr:
/* update if some CS was gone */ /* update if some CS was gone */
if (mcs_data.timestamp) if (mcs_data.timestamp)
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
} else if (mcs_data.wait_status == -ERESTARTSYS) {
args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
} else { } else {
args->out.status = HL_WAIT_CS_STATUS_BUSY; args->out.status = HL_WAIT_CS_STATUS_BUSY;
} }
@@ -2688,16 +2693,17 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
&status, &timestamp); &status, &timestamp);
memset(args, 0, sizeof(*args));
if (rc) {
if (rc == -ERESTARTSYS) { if (rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for CS handle %llu\n", "user process got signal while waiting for CS handle %llu\n",
seq); seq);
args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED; return -EINTR;
rc = -EINTR; }
} else if (rc == -ETIMEDOUT) {
memset(args, 0, sizeof(*args));
if (rc) {
if (rc == -ETIMEDOUT) {
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"CS %llu has timed-out while user process is waiting for it\n", "CS %llu has timed-out while user process is waiting for it\n",
seq); seq);
@@ -2823,7 +2829,6 @@ wait_again:
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for interrupt ID %d\n", "user process got signal while waiting for interrupt ID %d\n",
interrupt->interrupt_id); interrupt->interrupt_id);
*status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR; rc = -EINTR;
} else { } else {
*status = CS_WAIT_STATUS_BUSY; *status = CS_WAIT_STATUS_BUSY;
@@ -2878,8 +2883,6 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
args->in.interrupt_timeout_us, args->in.addr, args->in.interrupt_timeout_us, args->in.addr,
args->in.target, interrupt_offset, &status); args->in.target, interrupt_offset, &status);
memset(args, 0, sizeof(*args));
if (rc) { if (rc) {
if (rc != -EINTR) if (rc != -EINTR)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
@@ -2888,6 +2891,8 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return rc; return rc;
} }
memset(args, 0, sizeof(*args));
switch (status) { switch (status) {
case CS_WAIT_STATUS_COMPLETED: case CS_WAIT_STATUS_COMPLETED:
args->out.status = HL_WAIT_CS_STATUS_COMPLETED; args->out.status = HL_WAIT_CS_STATUS_COMPLETED;

View File

@@ -1298,7 +1298,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS || if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_STARTING) { dev->hbm_state != MEI_HBM_STARTING) {
if (dev->dev_state == MEI_DEV_POWER_DOWN) { if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n"); dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
return 0; return 0;
} }
@@ -1381,7 +1382,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS || if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_DR_SETUP) { dev->hbm_state != MEI_HBM_DR_SETUP) {
if (dev->dev_state == MEI_DEV_POWER_DOWN) { if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n"); dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
return 0; return 0;
} }
@@ -1448,7 +1450,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS || if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
if (dev->dev_state == MEI_DEV_POWER_DOWN) { if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n"); dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
return 0; return 0;
} }
@@ -1490,7 +1493,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS || if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
if (dev->dev_state == MEI_DEV_POWER_DOWN) { if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n"); dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
return 0; return 0;
} }

View File

@@ -92,6 +92,7 @@
#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */ #define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
#define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */ #define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */

View File

@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},

View File

@@ -3550,10 +3550,15 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
return 0; return 0;
} }
static void nvme_cdev_rel(struct device *dev)
{
ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
}
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
{ {
cdev_device_del(cdev, cdev_device); cdev_device_del(cdev, cdev_device);
ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt)); put_device(cdev_device);
} }
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
@@ -3566,14 +3571,14 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
return minor; return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
cdev_device->class = nvme_ns_chr_class; cdev_device->class = nvme_ns_chr_class;
cdev_device->release = nvme_cdev_rel;
device_initialize(cdev_device); device_initialize(cdev_device);
cdev_init(cdev, fops); cdev_init(cdev, fops);
cdev->owner = owner; cdev->owner = owner;
ret = cdev_device_add(cdev, cdev_device); ret = cdev_device_add(cdev, cdev_device);
if (ret) { if (ret)
put_device(cdev_device); put_device(cdev_device);
ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
}
return ret; return ret;
} }
@@ -3605,11 +3610,9 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
ns->ctrl->instance, ns->head->instance); ns->ctrl->instance, ns->head->instance);
if (ret) if (ret)
return ret; return ret;
ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
ns->ctrl->ops->module); ns->ctrl->ops->module);
if (ret)
kfree_const(ns->cdev_device.kobj.name);
return ret;
} }
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,

View File

@@ -431,8 +431,6 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
return ret; return ret;
ret = nvme_cdev_add(&head->cdev, &head->cdev_device, ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
&nvme_ns_head_chr_fops, THIS_MODULE); &nvme_ns_head_chr_fops, THIS_MODULE);
if (ret)
kfree_const(head->cdev_device.kobj.name);
return ret; return ret;
} }

View File

@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
iod->aborted = 1; iod->aborted = 1;
cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.opcode = nvme_admin_abort_cmd;
cmd.abort.cid = req->tag; cmd.abort.cid = nvme_cid(req);
cmd.abort.sqid = cpu_to_le16(nvmeq->qid); cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device, dev_warn(nvmeq->dev->ctrl.device,

View File

@@ -1383,6 +1383,7 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
*p-- = 0; *p-- = 0;
/* clear msb bits if any leftover in the last byte */ /* clear msb bits if any leftover in the last byte */
if (cell->nbits % BITS_PER_BYTE)
*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
} }

View File

@@ -5,7 +5,6 @@ config SOC_K210_SYSCTL
depends on RISCV && SOC_CANAAN && OF depends on RISCV && SOC_CANAAN && OF
default SOC_CANAAN default SOC_CANAAN
select PM select PM
select SIMPLE_PM_BUS
select SYSCON select SYSCON
select MFD_SYSCON select MFD_SYSCON
help help

View File

@@ -248,7 +248,7 @@ void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
#ifdef CONFIG_88EU_AP_MODE #ifdef CONFIG_88EU_AP_MODE
struct sta_info *psta = NULL; struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &adapt->stapriv; struct sta_priv *pstapriv = &adapt->stapriv;
if ((mac_id - 1) > 0) if (mac_id >= 2)
psta = pstapriv->sta_aid[(mac_id - 1) - 1]; psta = pstapriv->sta_aid[(mac_id - 1) - 1];
if (psta) if (psta)
add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/ add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/

View File

@@ -182,7 +182,7 @@ create_pagelist(char *buf, char __user *ubuf,
offset = (uintptr_t)ubuf & (PAGE_SIZE - 1); offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
if (num_pages > (SIZE_MAX - sizeof(struct pagelist) - if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
sizeof(struct vchiq_pagelist_info)) / sizeof(struct vchiq_pagelist_info)) /
(sizeof(u32) + sizeof(pages[0]) + (sizeof(u32) + sizeof(pages[0]) +
sizeof(struct scatterlist))) sizeof(struct scatterlist)))

View File

@@ -361,9 +361,13 @@ config SERIAL_8250_BCM2835AUX
If unsure, say N. If unsure, say N.
config SERIAL_8250_FSL config SERIAL_8250_FSL
bool bool "Freescale 16550 UART support" if COMPILE_TEST && !(PPC || ARM || ARM64)
depends on SERIAL_8250_CONSOLE depends on SERIAL_8250_CONSOLE
default PPC || ARM || ARM64 || COMPILE_TEST default PPC || ARM || ARM64
help
Selecting this option enables a workaround for a break-detection
erratum for Freescale 16550 UARTs in the 8250 driver. It also
enables support for ACPI enumeration.
config SERIAL_8250_DW config SERIAL_8250_DW
tristate "Support for Synopsys DesignWare 8250 quirks" tristate "Support for Synopsys DesignWare 8250 quirks"

View File

@@ -408,40 +408,38 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
return -EBUSY; return -EBUSY;
xhci_dbc_tty_init_port(dbc, port); xhci_dbc_tty_init_port(dbc, port);
tty_dev = tty_port_register_device(&port->port,
dbc_tty_driver, 0, NULL);
if (IS_ERR(tty_dev)) {
ret = PTR_ERR(tty_dev);
goto register_fail;
}
ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL); ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
if (ret) if (ret)
goto buf_alloc_fail; goto err_exit_port;
ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool, ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
dbc_read_complete); dbc_read_complete);
if (ret) if (ret)
goto request_fail; goto err_free_fifo;
ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool, ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
dbc_write_complete); dbc_write_complete);
if (ret) if (ret)
goto request_fail; goto err_free_requests;
tty_dev = tty_port_register_device(&port->port,
dbc_tty_driver, 0, NULL);
if (IS_ERR(tty_dev)) {
ret = PTR_ERR(tty_dev);
goto err_free_requests;
}
port->registered = true; port->registered = true;
return 0; return 0;
request_fail: err_free_requests:
xhci_dbc_free_requests(&port->read_pool); xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->write_pool); xhci_dbc_free_requests(&port->write_pool);
err_free_fifo:
kfifo_free(&port->write_fifo); kfifo_free(&port->write_fifo);
err_exit_port:
buf_alloc_fail:
tty_unregister_device(dbc_tty_driver, 0);
register_fail:
xhci_dbc_tty_exit_port(port); xhci_dbc_tty_exit_port(port);
dev_err(dbc->dev, "can't register tty port, err %d\n", ret); dev_err(dbc->dev, "can't register tty port, err %d\n", ret);

View File

@@ -30,6 +30,7 @@
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
#define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_VENDOR_ID_ETRON 0x1b6f
@@ -113,6 +114,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
/* Look for vendor-specific quirks */ /* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) { pdev->revision == 0x0) {
@@ -279,8 +281,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432) pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS; xhci->quirks |= XHCI_BROKEN_STREAMS;
if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)

View File

@@ -366,16 +366,22 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
/* Must be called with xhci->lock held, releases and aquires lock back */ /* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{ {
u64 temp_64; u32 temp_32;
int ret; int ret;
xhci_dbg(xhci, "Abort command ring\n"); xhci_dbg(xhci, "Abort command ring\n");
reinit_completion(&xhci->cmd_ring_stop_completion); reinit_completion(&xhci->cmd_ring_stop_completion);
temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); /*
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, * The control bits like command stop, abort are located in lower
&xhci->op_regs->cmd_ring); * dword of the command ring control register. Limit the write
* to the lower dword to avoid corrupting the command ring pointer
* in case if the command ring is stopped by the time upper dword
* is written.
*/
temp_32 = readl(&xhci->op_regs->cmd_ring);
writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
* completion of the Command Abort operation. If CRR is not negated in 5 * completion of the Command Abort operation. If CRR is not negated in 5
@@ -559,8 +565,11 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
struct xhci_command *cmd; struct xhci_command *cmd;
struct xhci_segment *new_seg; struct xhci_segment *new_seg;
struct xhci_segment *halted_seg = NULL;
union xhci_trb *new_deq; union xhci_trb *new_deq;
int new_cycle; int new_cycle;
union xhci_trb *halted_trb;
int index = 0;
dma_addr_t addr; dma_addr_t addr;
u64 hw_dequeue; u64 hw_dequeue;
bool cycle_found = false; bool cycle_found = false;
@@ -598,7 +607,27 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg; new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue; new_deq = ep_ring->dequeue;
/*
* Quirk: xHC write-back of the DCS field in the hardware dequeue
* pointer is wrong - use the cycle state of the TRB pointed to by
* the dequeue pointer.
*/
if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
!(ep->ep_state & EP_HAS_STREAMS))
halted_seg = trb_in_td(xhci, td->start_seg,
td->first_trb, td->last_trb,
hw_dequeue & ~0xf, false);
if (halted_seg) {
index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
sizeof(*halted_trb);
halted_trb = &halted_seg->trbs[index];
new_cycle = halted_trb->generic.field[3] & 0x1;
xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
(u8)(hw_dequeue & 0x1), index, new_cycle);
} else {
new_cycle = hw_dequeue & 0x1; new_cycle = hw_dequeue & 0x1;
}
/* /*
* We want to find the pointer, segment and cycle state of the new trb * We want to find the pointer, segment and cycle state of the new trb

View File

@@ -3214,10 +3214,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
return; return;
/* Bail out if toggle is already being cleared by a endpoint reset */ /* Bail out if toggle is already being cleared by a endpoint reset */
spin_lock_irqsave(&xhci->lock, flags);
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
spin_unlock_irqrestore(&xhci->lock, flags);
return; return;
} }
spin_unlock_irqrestore(&xhci->lock, flags);
/* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
if (usb_endpoint_xfer_control(&host_ep->desc) || if (usb_endpoint_xfer_control(&host_ep->desc) ||
usb_endpoint_xfer_isoc(&host_ep->desc)) usb_endpoint_xfer_isoc(&host_ep->desc))
@@ -3303,8 +3306,10 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_free_command(xhci, cfg_cmd); xhci_free_command(xhci, cfg_cmd);
cleanup: cleanup:
xhci_free_command(xhci, stop_cmd); xhci_free_command(xhci, stop_cmd);
spin_lock_irqsave(&xhci->lock, flags);
if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
spin_unlock_irqrestore(&xhci->lock, flags);
} }
static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,

View File

@@ -1899,6 +1899,7 @@ struct xhci_hcd {
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40) #define XHCI_NO_SOFT_RETRY BIT_ULL(40)
#define XHCI_BROKEN_D3COLD BIT_ULL(41) #define XHCI_BROKEN_D3COLD BIT_ULL(41)
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
unsigned int num_active_eps; unsigned int num_active_eps;
unsigned int limit_active_eps; unsigned int limit_active_eps;

View File

@@ -899,11 +899,13 @@ static int dsps_probe(struct platform_device *pdev)
if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
ret = dsps_setup_optional_vbus_irq(pdev, glue); ret = dsps_setup_optional_vbus_irq(pdev, glue);
if (ret) if (ret)
goto err; goto unregister_pdev;
} }
return 0; return 0;
unregister_pdev:
platform_device_unregister(glue->musb);
err: err:
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
iounmap(glue->usbss_base); iounmap(glue->usbss_base);

View File

@@ -246,11 +246,13 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */ /* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_EC200T 0x6026
#define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_VENDOR_ID 0x16d8
@@ -1111,6 +1113,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff), { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 }, .driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG91, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 }, .driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
@@ -1128,6 +1133,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP }, .driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
@@ -1227,6 +1233,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) }, .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1204, 0xff), /* Telit LE910Cx (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),

View File

@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */

View File

@@ -173,6 +173,10 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
if (status != 0 && (ops->get_status(vdpa) & ~status) != 0) if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
return -EINVAL; return -EINVAL;
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
vhost_vdpa_unsetup_vq_irq(v, i);
if (status == 0) { if (status == 0) {
ret = ops->reset(vdpa); ret = ops->reset(vdpa);
if (ret) if (ret)
@@ -184,10 +188,6 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
for (i = 0; i < nvqs; i++) for (i = 0; i < nvqs; i++)
vhost_vdpa_setup_vq_irq(v, i); vhost_vdpa_setup_vq_irq(v, i);
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
vhost_vdpa_unsetup_vq_irq(v, i);
return 0; return 0;
} }
@@ -322,7 +322,7 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
struct eventfd_ctx *ctx; struct eventfd_ctx *ctx;
cb.callback = vhost_vdpa_config_cb; cb.callback = vhost_vdpa_config_cb;
cb.private = v->vdpa; cb.private = v;
if (copy_from_user(&fd, argp, sizeof(fd))) if (copy_from_user(&fd, argp, sizeof(fd)))
return -EFAULT; return -EFAULT;

View File

@@ -239,6 +239,17 @@ static int virtio_dev_probe(struct device *_d)
driver_features_legacy = driver_features; driver_features_legacy = driver_features;
} }
/*
* Some devices detect legacy solely via F_VERSION_1. Write
* F_VERSION_1 to force LE config space accesses before FEATURES_OK for
* these when needed.
*/
if (drv->validate && !virtio_legacy_is_little_endian()
&& device_features & BIT_ULL(VIRTIO_F_VERSION_1)) {
dev->features = BIT_ULL(VIRTIO_F_VERSION_1);
dev->config->finalize_features(dev);
}
if (device_features & (1ULL << VIRTIO_F_VERSION_1)) if (device_features & (1ULL << VIRTIO_F_VERSION_1))
dev->features = driver_features & device_features; dev->features = driver_features & device_features;
else else

View File

@@ -2949,7 +2949,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
req_set_fail(req); req_set_fail(req);
if (issue_flags & IO_URING_F_NONBLOCK) { if (!(issue_flags & IO_URING_F_NONBLOCK)) {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
__io_req_complete(req, issue_flags, ret, cflags); __io_req_complete(req, issue_flags, ret, cflags);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);

View File

@@ -1111,7 +1111,14 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
kn = kernfs_find_ns(parent, dentry->d_name.name, ns); kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
/* attach dentry and inode */ /* attach dentry and inode */
if (kn && kernfs_active(kn)) { if (kn) {
/* Inactive nodes are invisible to the VFS so don't
* create a negative.
*/
if (!kernfs_active(kn)) {
up_read(&kernfs_rwsem);
return NULL;
}
inode = kernfs_get_inode(dir->i_sb, kn); inode = kernfs_get_inode(dir->i_sb, kn);
if (!inode) if (!inode)
inode = ERR_PTR(-ENOMEM); inode = ERR_PTR(-ENOMEM);

View File

@@ -149,6 +149,7 @@ struct gendisk {
unsigned long state; unsigned long state;
#define GD_NEED_PART_SCAN 0 #define GD_NEED_PART_SCAN 0
#define GD_READ_ONLY 1 #define GD_READ_ONLY 1
#define GD_DEAD 2
struct mutex open_mutex; /* open/close mutex */ struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */ unsigned open_partitions; /* number of open partitions */

View File

@@ -13,11 +13,11 @@
TRACE_EVENT(kyber_latency, TRACE_EVENT(kyber_latency,
TP_PROTO(struct request_queue *q, const char *domain, const char *type, TP_PROTO(dev_t dev, const char *domain, const char *type,
unsigned int percentile, unsigned int numerator, unsigned int percentile, unsigned int numerator,
unsigned int denominator, unsigned int samples), unsigned int denominator, unsigned int samples),
TP_ARGS(q, domain, type, percentile, numerator, denominator, samples), TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = disk_devt(q->disk); __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->domain, domain, sizeof(__entry->domain));
strlcpy(__entry->type, type, sizeof(__entry->type)); strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile; __entry->percentile = percentile;
@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
TRACE_EVENT(kyber_adjust, TRACE_EVENT(kyber_adjust,
TP_PROTO(struct request_queue *q, const char *domain, TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
unsigned int depth),
TP_ARGS(q, domain, depth), TP_ARGS(dev, domain, depth),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = disk_devt(q->disk); __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth; __entry->depth = depth;
), ),
@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
TRACE_EVENT(kyber_throttled, TRACE_EVENT(kyber_throttled,
TP_PROTO(struct request_queue *q, const char *domain), TP_PROTO(dev_t dev, const char *domain),
TP_ARGS(q, domain), TP_ARGS(dev, domain),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = disk_devt(q->disk); __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->domain, domain, sizeof(__entry->domain));
), ),

View File

@@ -917,7 +917,6 @@ struct hl_wait_cs_in {
#define HL_WAIT_CS_STATUS_BUSY 1 #define HL_WAIT_CS_STATUS_BUSY 1
#define HL_WAIT_CS_STATUS_TIMEDOUT 2 #define HL_WAIT_CS_STATUS_TIMEDOUT 2
#define HL_WAIT_CS_STATUS_ABORTED 3 #define HL_WAIT_CS_STATUS_ABORTED 3
#define HL_WAIT_CS_STATUS_INTERRUPTED 4
#define HL_WAIT_CS_STATUS_FLAG_GONE 0x1 #define HL_WAIT_CS_STATUS_FLAG_GONE 0x1
#define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD 0x2 #define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD 0x2
@@ -1286,7 +1285,8 @@ struct hl_debug_args {
* EIO - The CS was aborted (usually because the device was reset) * EIO - The CS was aborted (usually because the device was reset)
* ENODEV - The device wants to do hard-reset (so user need to close FD) * ENODEV - The device wants to do hard-reset (so user need to close FD)
* *
* The driver also returns a custom define inside the IOCTL which can be: * The driver also returns a custom define in case the IOCTL call returned 0.
* The define can be one of the following:
* *
* HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0) * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
* HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0) * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
@@ -1294,8 +1294,6 @@ struct hl_debug_args {
* (ETIMEDOUT) * (ETIMEDOUT)
* HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
* device was reset (EIO) * device was reset (EIO)
* HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
*
*/ */
#define HL_IOCTL_WAIT_CS \ #define HL_IOCTL_WAIT_CS \

View File

@@ -382,6 +382,7 @@ static char * __init xbc_make_cmdline(const char *key)
ret = xbc_snprint_cmdline(new_cmdline, len + 1, root); ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
if (ret < 0 || ret > len) { if (ret < 0 || ret > len) {
pr_err("Failed to print extra kernel cmdline.\n"); pr_err("Failed to print extra kernel cmdline.\n");
memblock_free_ptr(new_cmdline, len + 1);
return NULL; return NULL;
} }

View File

@@ -1745,16 +1745,15 @@ void latency_fsnotify(struct trace_array *tr)
irq_work_queue(&tr->fsnotify_irqwork); irq_work_queue(&tr->fsnotify_irqwork);
} }
/* #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
* (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ || defined(CONFIG_OSNOISE_TRACER)
* defined(CONFIG_FSNOTIFY)
*/
#else
#define trace_create_maxlat_file(tr, d_tracer) \ #define trace_create_maxlat_file(tr, d_tracer) \
trace_create_file("tracing_max_latency", 0644, d_tracer, \ trace_create_file("tracing_max_latency", 0644, d_tracer, \
&tr->max_latency, &tracing_max_lat_fops) &tr->max_latency, &tracing_max_lat_fops)
#else
#define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
#endif #endif
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
@@ -9474,9 +9473,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
create_trace_options_dir(tr); create_trace_options_dir(tr);
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
trace_create_maxlat_file(tr, d_tracer); trace_create_maxlat_file(tr, d_tracer);
#endif
if (ftrace_create_function_files(tr, d_tracer)) if (ftrace_create_function_files(tr, d_tracer))
MEM_FAIL(1, "Could not allocate function filter files"); MEM_FAIL(1, "Could not allocate function filter files");

View File

@@ -119,10 +119,58 @@ static bool eprobe_dyn_event_match(const char *system, const char *event,
int argc, const char **argv, struct dyn_event *ev) int argc, const char **argv, struct dyn_event *ev)
{ {
struct trace_eprobe *ep = to_trace_eprobe(ev); struct trace_eprobe *ep = to_trace_eprobe(ev);
const char *slash;
return strcmp(trace_probe_name(&ep->tp), event) == 0 && /*
(!system || strcmp(trace_probe_group_name(&ep->tp), system) == 0) && * We match the following:
trace_probe_match_command_args(&ep->tp, argc, argv); * event only - match all eprobes with event name
* system and event only - match all system/event probes
*
* The below has the above satisfied with more arguments:
*
* attached system/event - If the arg has the system and event
* the probe is attached to, match
* probes with the attachment.
*
* If any more args are given, then it requires a full match.
*/
/*
* If system exists, but this probe is not part of that system
* do not match.
*/
if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
return false;
/* Must match the event name */
if (strcmp(trace_probe_name(&ep->tp), event) != 0)
return false;
/* No arguments match all */
if (argc < 1)
return true;
/* First argument is the system/event the probe is attached to */
slash = strchr(argv[0], '/');
if (!slash)
slash = strchr(argv[0], '.');
if (!slash)
return false;
if (strncmp(ep->event_system, argv[0], slash - argv[0]))
return false;
if (strcmp(ep->event_name, slash + 1))
return false;
argc--;
argv++;
/* If there are no other args, then match */
if (argc < 1)
return true;
return trace_probe_match_command_args(&ep->tp, argc, argv);
} }
static struct dyn_event_operations eprobe_dyn_event_ops = { static struct dyn_event_operations eprobe_dyn_event_ops = {
@@ -632,6 +680,13 @@ static int disable_eprobe(struct trace_eprobe *ep,
trace_event_trigger_enable_disable(file, 0); trace_event_trigger_enable_disable(file, 0);
update_cond_flag(file); update_cond_flag(file);
/* Make sure nothing is using the edata or trigger */
tracepoint_synchronize_unregister();
kfree(edata);
kfree(trigger);
return 0; return 0;
} }

View File

@@ -2506,7 +2506,7 @@ find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
* events. However, for convenience, users are allowed to directly * events. However, for convenience, users are allowed to directly
* specify an event field in an action, which will be automatically * specify an event field in an action, which will be automatically
* converted into a variable on their behalf. * converted into a variable on their behalf.
*
* If a user specifies a field on an event that isn't the event the * If a user specifies a field on an event that isn't the event the
* histogram currently being defined (the target event histogram), the * histogram currently being defined (the target event histogram), the
* only way that can be accomplished is if a new hist trigger is * only way that can be accomplished is if a new hist trigger is

View File

@@ -936,7 +936,12 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
*/ */
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
{ {
return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); int ret = memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
if (!ret)
kmemleak_free_part_phys(base, size);
return ret;
} }
/** /**

View File

@@ -189,7 +189,7 @@ if ($arch =~ /(x86(_64)?)|(i386)/) {
$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)"; $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
$weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)"; $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
$section_regex = "Disassembly of section\\s+(\\S+):"; $section_regex = "Disassembly of section\\s+(\\S+):";
$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; $function_regex = "^([0-9a-fA-F]+)\\s+<([^^]*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$"; $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$";
$section_type = '@progbits'; $section_type = '@progbits';
$mcount_adjust = 0; $mcount_adjust = 0;

View File

@@ -40,7 +40,7 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE, .type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_TASK_CLOCK, .config = PERF_COUNT_SW_TASK_CLOCK,
}; };
int err, cpu, tmp; int err, idx;
cpus = perf_cpu_map__new(NULL); cpus = perf_cpu_map__new(NULL);
__T("failed to create cpus", cpus); __T("failed to create cpus", cpus);
@@ -70,10 +70,10 @@ static int test_stat_cpu(void)
perf_evlist__for_each_evsel(evlist, evsel) { perf_evlist__for_each_evsel(evlist, evsel) {
cpus = perf_evsel__cpus(evsel); cpus = perf_evsel__cpus(evsel);
perf_cpu_map__for_each_cpu(cpu, tmp, cpus) { for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 }; struct perf_counts_values counts = { .val = 0 };
perf_evsel__read(evsel, cpu, 0, &counts); perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0); __T("failed to read value for evsel", counts.val != 0);
} }
} }

View File

@@ -22,7 +22,7 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE, .type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK, .config = PERF_COUNT_SW_CPU_CLOCK,
}; };
int err, cpu, tmp; int err, idx;
cpus = perf_cpu_map__new(NULL); cpus = perf_cpu_map__new(NULL);
__T("failed to create cpus", cpus); __T("failed to create cpus", cpus);
@@ -33,10 +33,10 @@ static int test_stat_cpu(void)
err = perf_evsel__open(evsel, cpus, NULL); err = perf_evsel__open(evsel, cpus, NULL);
__T("failed to open evsel", err == 0); __T("failed to open evsel", err == 0);
perf_cpu_map__for_each_cpu(cpu, tmp, cpus) { for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 }; struct perf_counts_values counts = { .val = 0 };
perf_evsel__read(evsel, cpu, 0, &counts); perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0); __T("failed to read value for evsel", counts.val != 0);
} }
@@ -148,6 +148,7 @@ static int test_stat_user_read(int event)
__T("failed to mmap evsel", err == 0); __T("failed to mmap evsel", err == 0);
pc = perf_evsel__mmap_base(evsel, 0, 0); pc = perf_evsel__mmap_base(evsel, 0, 0);
__T("failed to get mmapped address", pc);
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
__T("userspace counter access not supported", pc->cap_user_rdpmc); __T("userspace counter access not supported", pc->cap_user_rdpmc);

View File

@@ -508,6 +508,7 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
list_add_tail(&reloc->list, &sec->reloc->reloc_list); list_add_tail(&reloc->list, &sec->reloc->reloc_list);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc)); elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
sec->reloc->sh.sh_size += sec->reloc->sh.sh_entsize;
sec->reloc->changed = true; sec->reloc->changed = true;
return 0; return 0;
@@ -977,63 +978,63 @@ static struct section *elf_create_reloc_section(struct elf *elf,
} }
} }
static int elf_rebuild_rel_reloc_section(struct section *sec, int nr) static int elf_rebuild_rel_reloc_section(struct section *sec)
{ {
struct reloc *reloc; struct reloc *reloc;
int idx = 0, size; int idx = 0;
void *buf; void *buf;
/* Allocate a buffer for relocations */ /* Allocate a buffer for relocations */
size = nr * sizeof(GElf_Rel); buf = malloc(sec->sh.sh_size);
buf = malloc(size);
if (!buf) { if (!buf) {
perror("malloc"); perror("malloc");
return -1; return -1;
} }
sec->data->d_buf = buf; sec->data->d_buf = buf;
sec->data->d_size = size; sec->data->d_size = sec->sh.sh_size;
sec->data->d_type = ELF_T_REL; sec->data->d_type = ELF_T_REL;
sec->sh.sh_size = size;
idx = 0; idx = 0;
list_for_each_entry(reloc, &sec->reloc_list, list) { list_for_each_entry(reloc, &sec->reloc_list, list) {
reloc->rel.r_offset = reloc->offset; reloc->rel.r_offset = reloc->offset;
reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type); reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
gelf_update_rel(sec->data, idx, &reloc->rel); if (!gelf_update_rel(sec->data, idx, &reloc->rel)) {
WARN_ELF("gelf_update_rel");
return -1;
}
idx++; idx++;
} }
return 0; return 0;
} }
static int elf_rebuild_rela_reloc_section(struct section *sec, int nr) static int elf_rebuild_rela_reloc_section(struct section *sec)
{ {
struct reloc *reloc; struct reloc *reloc;
int idx = 0, size; int idx = 0;
void *buf; void *buf;
/* Allocate a buffer for relocations with addends */ /* Allocate a buffer for relocations with addends */
size = nr * sizeof(GElf_Rela); buf = malloc(sec->sh.sh_size);
buf = malloc(size);
if (!buf) { if (!buf) {
perror("malloc"); perror("malloc");
return -1; return -1;
} }
sec->data->d_buf = buf; sec->data->d_buf = buf;
sec->data->d_size = size; sec->data->d_size = sec->sh.sh_size;
sec->data->d_type = ELF_T_RELA; sec->data->d_type = ELF_T_RELA;
sec->sh.sh_size = size;
idx = 0; idx = 0;
list_for_each_entry(reloc, &sec->reloc_list, list) { list_for_each_entry(reloc, &sec->reloc_list, list) {
reloc->rela.r_offset = reloc->offset; reloc->rela.r_offset = reloc->offset;
reloc->rela.r_addend = reloc->addend; reloc->rela.r_addend = reloc->addend;
reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type); reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
gelf_update_rela(sec->data, idx, &reloc->rela); if (!gelf_update_rela(sec->data, idx, &reloc->rela)) {
WARN_ELF("gelf_update_rela");
return -1;
}
idx++; idx++;
} }
@@ -1042,16 +1043,9 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec) static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
{ {
struct reloc *reloc;
int nr;
nr = 0;
list_for_each_entry(reloc, &sec->reloc_list, list)
nr++;
switch (sec->sh.sh_type) { switch (sec->sh.sh_type) {
case SHT_REL: return elf_rebuild_rel_reloc_section(sec, nr); case SHT_REL: return elf_rebuild_rel_reloc_section(sec);
case SHT_RELA: return elf_rebuild_rela_reloc_section(sec, nr); case SHT_RELA: return elf_rebuild_rela_reloc_section(sec);
default: return -1; default: return -1;
} }
} }
@@ -1111,12 +1105,6 @@ int elf_write(struct elf *elf)
/* Update changed relocation sections and section headers: */ /* Update changed relocation sections and section headers: */
list_for_each_entry(sec, &elf->sections, list) { list_for_each_entry(sec, &elf->sections, list) {
if (sec->changed) { if (sec->changed) {
if (sec->base &&
elf_rebuild_reloc_section(elf, sec)) {
WARN("elf_rebuild_reloc_section");
return -1;
}
s = elf_getscn(elf->elf, sec->idx); s = elf_getscn(elf->elf, sec->idx);
if (!s) { if (!s) {
WARN_ELF("elf_getscn"); WARN_ELF("elf_getscn");
@@ -1127,6 +1115,12 @@ int elf_write(struct elf *elf)
return -1; return -1;
} }
if (sec->base &&
elf_rebuild_reloc_section(elf, sec)) {
WARN("elf_rebuild_reloc_section");
return -1;
}
sec->changed = false; sec->changed = false;
elf->changed = true; elf->changed = true;
} }

View File

@@ -2116,7 +2116,7 @@ fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
static int __perf_session__process_decomp_events(struct perf_session *session) static int __perf_session__process_decomp_events(struct perf_session *session)
{ {
s64 skip; s64 skip;
u64 size, file_pos = 0; u64 size;
struct decomp *decomp = session->decomp_last; struct decomp *decomp = session->decomp_last;
if (!decomp) if (!decomp)
@@ -2132,7 +2132,7 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
size = event->header.size; size = event->header.size;
if (size < sizeof(struct perf_event_header) || if (size < sizeof(struct perf_event_header) ||
(skip = perf_session__process_event(session, event, file_pos)) < 0) { (skip = perf_session__process_event(session, event, decomp->file_pos)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
decomp->file_pos + decomp->head, event->header.size, event->header.type); decomp->file_pos + decomp->head, event->header.size, event->header.type);
return -EINVAL; return -EINVAL;

View File

@@ -11,8 +11,8 @@ SYSTEM="syscalls"
EVENT="sys_enter_openat" EVENT="sys_enter_openat"
FIELD="filename" FIELD="filename"
EPROBE="eprobe_open" EPROBE="eprobe_open"
OPTIONS="file=+0(\$filename):ustring"
echo "e:$EPROBE $SYSTEM/$EVENT file=+0(\$filename):ustring" >> dynamic_events echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
grep -q "$EPROBE" dynamic_events grep -q "$EPROBE" dynamic_events
test -d events/eprobes/$EPROBE test -d events/eprobes/$EPROBE
@@ -37,4 +37,54 @@ echo "-:$EPROBE" >> dynamic_events
! grep -q "$EPROBE" dynamic_events ! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE ! test -d events/eprobes/$EPROBE
# test various ways to remove the probe (already tested with just event name)
# With group name
echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
grep -q "$EPROBE" dynamic_events
test -d events/eprobes/$EPROBE
echo "-:eprobes/$EPROBE" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
# With group name and system/event
echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
grep -q "$EPROBE" dynamic_events
test -d events/eprobes/$EPROBE
echo "-:eprobes/$EPROBE $SYSTEM/$EVENT" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
# With just event name and system/event
echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
grep -q "$EPROBE" dynamic_events
test -d events/eprobes/$EPROBE
echo "-:$EPROBE $SYSTEM/$EVENT" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
# With just event name and system/event and options
echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
grep -q "$EPROBE" dynamic_events
test -d events/eprobes/$EPROBE
echo "-:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
# With group name and system/event and options
echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
grep -q "$EPROBE" dynamic_events
test -d events/eprobes/$EPROBE
echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
# Finally make sure what is in the dynamic_events file clears it too
echo "e:$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
LINE=`sed -e '/$EPROBE/s/^e/-/' < dynamic_events`
test -d events/eprobes/$EPROBE
echo "-:eprobes/$EPROBE $SYSTEM/$EVENT $OPTIONS" >> dynamic_events
! grep -q "$EPROBE" dynamic_events
! test -d events/eprobes/$EPROBE
clear_trace clear_trace