Commit 0dc1a185 authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

arm_pmu: add armpmu_alloc_atomic()

In ACPI systems, we don't know the makeup of CPUs until we hotplug them
on, and thus have to allocate the PMU datastructures at hotplug time.
Thus, we must use GFP_ATOMIC allocations.

Let's add an armpmu_alloc_atomic() that we can use in this case.
Signed-off-by: default avatarMark Rutland <>
Cc: Will Deacon <>
Signed-off-by: default avatarWill Deacon <>
parent d3d5aac2
......@@ -760,18 +760,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
struct arm_pmu *armpmu_alloc(void)
static struct arm_pmu *__armpmu_alloc(gfp_t flags)
struct arm_pmu *pmu;
int cpu;
pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
pmu = kzalloc(sizeof(*pmu), flags);
if (!pmu) {
pr_info("failed to allocate PMU device!\n");
goto out;
pmu->hw_events = alloc_percpu(struct pmu_hw_events);
pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
......@@ -817,6 +817,17 @@ struct arm_pmu *armpmu_alloc(void)
return NULL;
struct arm_pmu *armpmu_alloc(void)
return __armpmu_alloc(GFP_KERNEL);
struct arm_pmu *armpmu_alloc_atomic(void)
return __armpmu_alloc(GFP_ATOMIC);
void armpmu_free(struct arm_pmu *pmu)
......@@ -127,7 +127,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
return pmu;
pmu = armpmu_alloc();
pmu = armpmu_alloc_atomic();
if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n",
......@@ -157,6 +157,7 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment