diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index fb96ae447754a..d5b3b19b87195 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -675,7 +675,7 @@ static int __x86_pmu_event_init(struct perf_event *event) event->hw.idx = -1; event->hw.last_cpu = -1; event->hw.last_tag = ~0ULL; - event->hw_ext->dyn_constraint = ~0ULL; + event->hw.dyn_constraint = ~0ULL; /* mark unused */ event->hw.extra_reg.idx = EXTRA_REG_NONE; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index c3e8fb6105769..82db0084b01ff 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2927,7 +2927,6 @@ static void intel_pmu_config_acr(int idx, u64 mask, u32 reload) static void intel_pmu_enable_acr(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_ext *hw_ext = event->hw_ext; if (!is_acr_event_group(event) || !event->attr.config2) { /* @@ -2938,7 +2937,7 @@ static void intel_pmu_enable_acr(struct perf_event *event) return; } - intel_pmu_config_acr(hwc->idx, hw_ext->config1, -hwc->sample_period); + intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period); } DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr); @@ -2999,7 +2998,7 @@ static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc) if (i + idx >= cpuc->n_events || !is_acr_event_group(cpuc->event_list[i + idx])) return; - __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw_ext->config1); + __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1); } } i = j - 1; @@ -3845,9 +3844,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (cpuc->excl_cntrs) return intel_get_excl_constraints(cpuc, event, idx, c2); - if (event->hw_ext->dyn_constraint != ~0ULL) { + if (event->hw.dyn_constraint != ~0ULL) { c2 = dyn_constraint(cpuc, c2, idx); - c2->idxmsk64 &= event->hw_ext->dyn_constraint; + c2->idxmsk64 &= event->hw.dyn_constraint; c2->weight = hweight64(c2->idxmsk64); } @@ -4211,7 +4210,7 @@ static bool intel_pmu_is_acr_group(struct perf_event *event) static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event, u64 *cause_mask, int *num) { - event->hw_ext->dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); + event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); *cause_mask |= event->attr.config2; *num += 1; } @@ -4220,7 +4219,7 @@ static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event, int idx, u64 cause_mask) { if (test_bit(idx, (unsigned long *)&cause_mask)) - event->hw_ext->dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); + event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); } static int intel_pmu_hw_config(struct perf_event *event) @@ -4286,7 +4285,7 @@ static int intel_pmu_hw_config(struct perf_event *event) return -EINVAL; if (branch_sample_counters(leader)) { num++; - leader->hw_ext->dyn_constraint &= x86_pmu.lbr_counters; + leader->hw.dyn_constraint &= x86_pmu.lbr_counters; } leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; @@ -4295,7 +4294,7 @@ static int intel_pmu_hw_config(struct perf_event *event) return -EINVAL; if (branch_sample_counters(sibling)) { num++; - sibling->hw_ext->dyn_constraint &= x86_pmu.lbr_counters; + sibling->hw.dyn_constraint &= x86_pmu.lbr_counters; } } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4dc058ad7c7d9..df9adf0ec93b0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -150,17 +150,6 @@ struct hw_perf_event_extra { static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0); -struct hw_perf_event_ext { -#ifdef CONFIG_PERF_EVENTS - union { - struct { - u64 config1; - u64 dyn_constraint; - }; - }; -#endif -}; - /** * struct hw_perf_event - performance event hardware details: */ @@ -169,7 +158,9 @@ struct hw_perf_event { union { struct { /* hardware */ u64 config; + u64 config1; u64 last_tag; + u64 dyn_constraint; unsigned long config_base; unsigned long event_base; int event_base_rdpmc; @@ -863,7 +854,7 @@ struct perf_event { */ __u32 orig_type; - DEEPIN_KABI_USE(1, struct hw_perf_event_ext *hw_ext) + DEEPIN_KABI_RESERVE(1) DEEPIN_KABI_RESERVE(2) DEEPIN_KABI_RESERVE(3) DEEPIN_KABI_RESERVE(4) diff --git a/kernel/events/core.c b/kernel/events/core.c index 4b7d035c16049..e91322b80fb06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -425,7 +425,6 @@ static DEFINE_MUTEX(pmus_lock); static struct srcu_struct pmus_srcu; static cpumask_var_t perf_online_mask; static struct kmem_cache *perf_event_cache; -static struct kmem_cache *perf_hw_event_cache; /* * perf event paranoia level: @@ -5013,7 +5012,6 @@ static void free_event_rcu(struct rcu_head *head) if (event->ns) put_pid_ns(event->ns); perf_event_free_filter(event); - kmem_cache_free(perf_hw_event_cache, event->hw_ext); kmem_cache_free(perf_event_cache, event); } @@ -12069,14 +12067,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!event) return ERR_PTR(-ENOMEM); - event->hw_ext = kmem_cache_alloc_node(perf_hw_event_cache, - GFP_KERNEL | __GFP_ZERO, - node); - if (!event->hw_ext) { - kmem_cache_free(perf_event_cache, event); - return ERR_PTR(-ENOMEM); - } - /* * Single events are their own group leaders, with an * empty sibling list: @@ -13939,7 +13929,6 @@ void __init perf_event_init(void) WARN(ret, "hw_breakpoint initialization failed with: %d", ret); perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC); - perf_hw_event_cache = KMEM_CACHE(hw_perf_event_ext, SLAB_PANIC); /* * Build time assertion that we keep the data_head at the intended