Merge tag 'perf-urgent-2021-06-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"Misc fixes:
- Fix the NMI watchdog on ancient Intel CPUs
- Remove a misguided, NMI-unsafe KASAN callback from the NMI-safe
irq_work path used by perf.
- Fix uncore events on Ice Lake servers.
- Someone booted maxcpus=1 on an SNB-EP, and the uncore driver
emitted warnings and was probably buggy. Fix it.
- KCSAN found a genuine data race in the core perf code. Somewhat
ironically the bug was introduced through a recent race fix. :-/
In our defense, the new race window was much more narrow. Fix it"
* tag 'perf-urgent-2021-06-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/nmi_watchdog: Fix old-style NMI watchdog regression on old Intel CPUs
irq_work: Make irq_work_queue() NMI-safe again
perf/x86/intel/uncore: Fix M2M event umask for Ice Lake server
perf/x86/intel/uncore: Fix a kernel WARNING triggered by maxcpus=1
perf: Fix data race between pin_count increment/decrement
This commit is contained in:
@@ -1406,6 +1406,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
|
|||||||
die_id = i;
|
die_id = i;
|
||||||
else
|
else
|
||||||
die_id = topology_phys_to_logical_pkg(i);
|
die_id = topology_phys_to_logical_pkg(i);
|
||||||
|
if (die_id < 0)
|
||||||
|
die_id = -ENODEV;
|
||||||
map->pbus_to_dieid[bus] = die_id;
|
map->pbus_to_dieid[bus] = die_id;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -1452,14 +1454,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
|
|||||||
i = -1;
|
i = -1;
|
||||||
if (reverse) {
|
if (reverse) {
|
||||||
for (bus = 255; bus >= 0; bus--) {
|
for (bus = 255; bus >= 0; bus--) {
|
||||||
if (map->pbus_to_dieid[bus] >= 0)
|
if (map->pbus_to_dieid[bus] != -1)
|
||||||
i = map->pbus_to_dieid[bus];
|
i = map->pbus_to_dieid[bus];
|
||||||
else
|
else
|
||||||
map->pbus_to_dieid[bus] = i;
|
map->pbus_to_dieid[bus] = i;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (bus = 0; bus <= 255; bus++) {
|
for (bus = 0; bus <= 255; bus++) {
|
||||||
if (map->pbus_to_dieid[bus] >= 0)
|
if (map->pbus_to_dieid[bus] != -1)
|
||||||
i = map->pbus_to_dieid[bus];
|
i = map->pbus_to_dieid[bus];
|
||||||
else
|
else
|
||||||
map->pbus_to_dieid[bus] = i;
|
map->pbus_to_dieid[bus] = i;
|
||||||
@@ -5097,9 +5099,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
|
|||||||
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
|
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
|
||||||
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
|
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
|
||||||
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
|
||||||
|
.event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
|
||||||
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
|
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
|
||||||
.ops = &snr_m2m_uncore_pci_ops,
|
.ops = &snr_m2m_uncore_pci_ops,
|
||||||
.format_group = &skx_uncore_format_group,
|
.format_group = &snr_m2m_uncore_format_group,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct attribute *icx_upi_uncore_formats_attr[] = {
|
static struct attribute *icx_upi_uncore_formats_attr[] = {
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
|
|||||||
case 15:
|
case 15:
|
||||||
return msr - MSR_P4_BPU_PERFCTR0;
|
return msr - MSR_P4_BPU_PERFCTR0;
|
||||||
}
|
}
|
||||||
fallthrough;
|
break;
|
||||||
case X86_VENDOR_ZHAOXIN:
|
case X86_VENDOR_ZHAOXIN:
|
||||||
case X86_VENDOR_CENTAUR:
|
case X86_VENDOR_CENTAUR:
|
||||||
return msr - MSR_ARCH_PERFMON_PERFCTR0;
|
return msr - MSR_ARCH_PERFMON_PERFCTR0;
|
||||||
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
|
|||||||
case 15:
|
case 15:
|
||||||
return msr - MSR_P4_BSU_ESCR0;
|
return msr - MSR_P4_BSU_ESCR0;
|
||||||
}
|
}
|
||||||
fallthrough;
|
break;
|
||||||
case X86_VENDOR_ZHAOXIN:
|
case X86_VENDOR_ZHAOXIN:
|
||||||
case X86_VENDOR_CENTAUR:
|
case X86_VENDOR_CENTAUR:
|
||||||
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
|
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
|
||||||
|
|||||||
@@ -4609,7 +4609,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
|
|||||||
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
||||||
ctx = &cpuctx->ctx;
|
ctx = &cpuctx->ctx;
|
||||||
get_ctx(ctx);
|
get_ctx(ctx);
|
||||||
|
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||||
++ctx->pin_count;
|
++ctx->pin_count;
|
||||||
|
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work)
|
|||||||
if (!irq_work_claim(work))
|
if (!irq_work_claim(work))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*record irq_work call stack in order to print it in KASAN reports*/
|
|
||||||
kasan_record_aux_stack(work);
|
|
||||||
|
|
||||||
/* Queue the entry and raise the IPI if needed. */
|
/* Queue the entry and raise the IPI if needed. */
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
__irq_work_queue_local(work);
|
__irq_work_queue_local(work);
|
||||||
|
|||||||
Reference in New Issue
Block a user