diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 84f8c0bc15a0..20f1307c4e21 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -675,7 +675,7 @@ static int __x86_pmu_event_init(struct perf_event *event) event->hw.idx = -1; event->hw.last_cpu = -1; event->hw.last_tag = ~0ULL; - event->hw.dyn_constraint = ~0ULL; + event->hw_ext->dyn_constraint = ~0ULL; /* mark unused */ event->hw.extra_reg.idx = EXTRA_REG_NONE; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index cbf2fc57a923..833a52defdc8 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2927,6 +2927,7 @@ static void intel_pmu_config_acr(int idx, u64 mask, u32 reload) static void intel_pmu_enable_acr(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_ext *hw_ext = event->hw_ext; if (!is_acr_event_group(event) || !event->attr.config2) { /* @@ -2937,7 +2938,7 @@ static void intel_pmu_enable_acr(struct perf_event *event) return; } - intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period); + intel_pmu_config_acr(hwc->idx, hw_ext->config1, -hwc->sample_period); } DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr); @@ -2998,7 +2999,7 @@ static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc) if (i + idx >= cpuc->n_events || !is_acr_event_group(cpuc->event_list[i + idx])) return; - __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1); + __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw_ext->config1); } } i = j - 1; @@ -3844,9 +3845,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (cpuc->excl_cntrs) return intel_get_excl_constraints(cpuc, event, idx, c2); - if (event->hw.dyn_constraint != ~0ULL) { + if (event->hw_ext->dyn_constraint != ~0ULL) { c2 = dyn_constraint(cpuc, c2, idx); - c2->idxmsk64 &= event->hw.dyn_constraint; + c2->idxmsk64 &= event->hw_ext->dyn_constraint; c2->weight = hweight64(c2->idxmsk64); } @@ -4208,7 +4209,7 @@ static bool intel_pmu_is_acr_group(struct perf_event *event) static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event, u64 *cause_mask, int *num) { - event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); + event->hw_ext->dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); *cause_mask |= event->attr.config2; *num += 1; } @@ -4217,7 +4218,7 @@ static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event, int idx, u64 cause_mask) { if (test_bit(idx, (unsigned long *)&cause_mask)) - event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); + event->hw_ext->dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); } static int intel_pmu_hw_config(struct perf_event *event) @@ -4283,7 +4284,7 @@ static int intel_pmu_hw_config(struct perf_event *event) return -EINVAL; if (branch_sample_counters(leader)) { num++; - leader->hw.dyn_constraint &= x86_pmu.lbr_counters; + leader->hw_ext->dyn_constraint &= x86_pmu.lbr_counters; } leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; @@ -4292,7 +4293,7 @@ static int intel_pmu_hw_config(struct perf_event *event) return -EINVAL; if (branch_sample_counters(sibling)) { num++; - sibling->hw.dyn_constraint &= x86_pmu.lbr_counters; + sibling->hw_ext->dyn_constraint &= x86_pmu.lbr_counters; } } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index df9adf0ec93b..4dc058ad7c7d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -150,6 +150,17 @@ struct hw_perf_event_extra { static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0); +struct hw_perf_event_ext { +#ifdef CONFIG_PERF_EVENTS + union { + struct { + u64 config1; + u64 dyn_constraint; + }; + }; +#endif +}; + /** * struct hw_perf_event - performance event hardware details: */ @@ -158,9 +169,7 @@ struct hw_perf_event { union { struct { /* hardware */ u64 config; - u64 config1; u64 last_tag; - u64 dyn_constraint; unsigned long config_base; unsigned long event_base; int event_base_rdpmc; @@ -854,7 +863,7 @@ struct perf_event { */ __u32 orig_type; - DEEPIN_KABI_RESERVE(1) + DEEPIN_KABI_USE(1, struct hw_perf_event_ext *hw_ext) DEEPIN_KABI_RESERVE(2) DEEPIN_KABI_RESERVE(3) DEEPIN_KABI_RESERVE(4) diff --git a/kernel/events/core.c b/kernel/events/core.c index adb1530a48dd..d8c264b9c488 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -425,6 +425,7 @@ static DEFINE_MUTEX(pmus_lock); static struct srcu_struct pmus_srcu; static cpumask_var_t perf_online_mask; static struct kmem_cache *perf_event_cache; +static struct kmem_cache *perf_hw_event_cache; /* * perf event paranoia level: @@ -5012,6 +5013,7 @@ static void free_event_rcu(struct rcu_head *head) if (event->ns) put_pid_ns(event->ns); perf_event_free_filter(event); + kmem_cache_free(perf_hw_event_cache, event->hw_ext); kmem_cache_free(perf_event_cache, event); } @@ -12067,6 +12069,14 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!event) return ERR_PTR(-ENOMEM); + event->hw_ext = kmem_cache_alloc_node(perf_hw_event_cache, + GFP_KERNEL | __GFP_ZERO, + node); + if (!event->hw_ext) { + kmem_cache_free(perf_event_cache, event); + return ERR_PTR(-ENOMEM); + } + /* * Single events are their own group leaders, with an * empty sibling list: @@ -13929,6 +13939,7 @@ void __init perf_event_init(void) WARN(ret, "hw_breakpoint initialization failed with: %d", ret); perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC); + perf_hw_event_cache = KMEM_CACHE(hw_perf_event_ext, SLAB_PANIC); /* * Build time assertion that we keep the data_head at the intended