|
| 1 | +/* |
| 2 | + * Performance events - AMD Processor Power Reporting Mechanism |
| 3 | + * |
| 4 | + * Copyright (C) 2016 Advanced Micro Devices, Inc. |
| 5 | + * |
| 6 | + * Author: Huang Rui <[email protected]> |
| 7 | + * |
| 8 | + * This program is free software; you can redistribute it and/or modify |
| 9 | + * it under the terms of the GNU General Public License version 2 as |
| 10 | + * published by the Free Software Foundation. |
| 11 | + */ |
| 12 | + |
| 13 | +#include <linux/module.h> |
| 14 | +#include <linux/slab.h> |
| 15 | +#include <linux/perf_event.h> |
| 16 | +#include <asm/cpu_device_id.h> |
| 17 | +#include "../perf_event.h" |
| 18 | + |
| 19 | +#define MSR_F15H_CU_PWR_ACCUMULATOR 0xc001007a |
| 20 | +#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b |
| 21 | +#define MSR_F15H_PTSC 0xc0010280 |
| 22 | + |
| 23 | +/* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */ |
| 24 | +#define AMD_POWER_EVENT_MASK 0xFFULL |
| 25 | + |
| 26 | +/* |
| 27 | + * Accumulated power status counters. |
| 28 | + */ |
| 29 | +#define AMD_POWER_EVENTSEL_PKG 1 |
| 30 | + |
| 31 | +/* |
| 32 | + * The ratio of compute unit power accumulator sample period to the |
| 33 | + * PTSC period. |
| 34 | + */ |
| 35 | +static unsigned int cpu_pwr_sample_ratio; |
| 36 | + |
| 37 | +/* Maximum accumulated power of a compute unit. */ |
| 38 | +static u64 max_cu_acc_power; |
| 39 | + |
| 40 | +static struct pmu pmu_class; |
| 41 | + |
| 42 | +/* |
| 43 | + * Accumulated power represents the sum of each compute unit's (CU) power |
| 44 | + * consumption. On any core of each CU we read the total accumulated power from |
| 45 | + * MSR_F15H_CU_PWR_ACCUMULATOR. cpu_mask represents CPU bit map of all cores |
| 46 | + * which are picked to measure the power for the CUs they belong to. |
| 47 | + */ |
| 48 | +static cpumask_t cpu_mask; |
| 49 | + |
| 50 | +static void event_update(struct perf_event *event) |
| 51 | +{ |
| 52 | + struct hw_perf_event *hwc = &event->hw; |
| 53 | + u64 prev_pwr_acc, new_pwr_acc, prev_ptsc, new_ptsc; |
| 54 | + u64 delta, tdelta; |
| 55 | + |
| 56 | + prev_pwr_acc = hwc->pwr_acc; |
| 57 | + prev_ptsc = hwc->ptsc; |
| 58 | + rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc); |
| 59 | + rdmsrl(MSR_F15H_PTSC, new_ptsc); |
| 60 | + |
| 61 | + /* |
| 62 | + * Calculate the CU power consumption over a time period, the unit of |
| 63 | + * final value (delta) is micro-Watts. Then add it to the event count. |
| 64 | + */ |
| 65 | + if (new_pwr_acc < prev_pwr_acc) { |
| 66 | + delta = max_cu_acc_power + new_pwr_acc; |
| 67 | + delta -= prev_pwr_acc; |
| 68 | + } else |
| 69 | + delta = new_pwr_acc - prev_pwr_acc; |
| 70 | + |
| 71 | + delta *= cpu_pwr_sample_ratio * 1000; |
| 72 | + tdelta = new_ptsc - prev_ptsc; |
| 73 | + |
| 74 | + do_div(delta, tdelta); |
| 75 | + local64_add(delta, &event->count); |
| 76 | +} |
| 77 | + |
| 78 | +static void __pmu_event_start(struct perf_event *event) |
| 79 | +{ |
| 80 | + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
| 81 | + return; |
| 82 | + |
| 83 | + event->hw.state = 0; |
| 84 | + |
| 85 | + rdmsrl(MSR_F15H_PTSC, event->hw.ptsc); |
| 86 | + rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); |
| 87 | +} |
| 88 | + |
| 89 | +static void pmu_event_start(struct perf_event *event, int mode) |
| 90 | +{ |
| 91 | + __pmu_event_start(event); |
| 92 | +} |
| 93 | + |
| 94 | +static void pmu_event_stop(struct perf_event *event, int mode) |
| 95 | +{ |
| 96 | + struct hw_perf_event *hwc = &event->hw; |
| 97 | + |
| 98 | + /* Mark event as deactivated and stopped. */ |
| 99 | + if (!(hwc->state & PERF_HES_STOPPED)) |
| 100 | + hwc->state |= PERF_HES_STOPPED; |
| 101 | + |
| 102 | + /* Check if software counter update is necessary. */ |
| 103 | + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { |
| 104 | + /* |
| 105 | + * Drain the remaining delta count out of an event |
| 106 | + * that we are disabling: |
| 107 | + */ |
| 108 | + event_update(event); |
| 109 | + hwc->state |= PERF_HES_UPTODATE; |
| 110 | + } |
| 111 | +} |
| 112 | + |
| 113 | +static int pmu_event_add(struct perf_event *event, int mode) |
| 114 | +{ |
| 115 | + struct hw_perf_event *hwc = &event->hw; |
| 116 | + |
| 117 | + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
| 118 | + |
| 119 | + if (mode & PERF_EF_START) |
| 120 | + __pmu_event_start(event); |
| 121 | + |
| 122 | + return 0; |
| 123 | +} |
| 124 | + |
| 125 | +static void pmu_event_del(struct perf_event *event, int flags) |
| 126 | +{ |
| 127 | + pmu_event_stop(event, PERF_EF_UPDATE); |
| 128 | +} |
| 129 | + |
| 130 | +static int pmu_event_init(struct perf_event *event) |
| 131 | +{ |
| 132 | + u64 cfg = event->attr.config & AMD_POWER_EVENT_MASK; |
| 133 | + |
| 134 | + /* Only look at AMD power events. */ |
| 135 | + if (event->attr.type != pmu_class.type) |
| 136 | + return -ENOENT; |
| 137 | + |
| 138 | + /* Unsupported modes and filters. */ |
| 139 | + if (event->attr.exclude_user || |
| 140 | + event->attr.exclude_kernel || |
| 141 | + event->attr.exclude_hv || |
| 142 | + event->attr.exclude_idle || |
| 143 | + event->attr.exclude_host || |
| 144 | + event->attr.exclude_guest || |
| 145 | + /* no sampling */ |
| 146 | + event->attr.sample_period) |
| 147 | + return -EINVAL; |
| 148 | + |
| 149 | + if (cfg != AMD_POWER_EVENTSEL_PKG) |
| 150 | + return -EINVAL; |
| 151 | + |
| 152 | + return 0; |
| 153 | +} |
| 154 | + |
| 155 | +static void pmu_event_read(struct perf_event *event) |
| 156 | +{ |
| 157 | + event_update(event); |
| 158 | +} |
| 159 | + |
| 160 | +static ssize_t |
| 161 | +get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) |
| 162 | +{ |
| 163 | + return cpumap_print_to_pagebuf(true, buf, &cpu_mask); |
| 164 | +} |
| 165 | + |
| 166 | +static DEVICE_ATTR(cpumask, S_IRUGO, get_attr_cpumask, NULL); |
| 167 | + |
| 168 | +static struct attribute *pmu_attrs[] = { |
| 169 | + &dev_attr_cpumask.attr, |
| 170 | + NULL, |
| 171 | +}; |
| 172 | + |
| 173 | +static struct attribute_group pmu_attr_group = { |
| 174 | + .attrs = pmu_attrs, |
| 175 | +}; |
| 176 | + |
| 177 | +/* |
| 178 | + * Currently it only supports to report the power of each |
| 179 | + * processor/package. |
| 180 | + */ |
| 181 | +EVENT_ATTR_STR(power-pkg, power_pkg, "event=0x01"); |
| 182 | + |
| 183 | +EVENT_ATTR_STR(power-pkg.unit, power_pkg_unit, "mWatts"); |
| 184 | + |
| 185 | +/* Convert the count from micro-Watts to milli-Watts. */ |
| 186 | +EVENT_ATTR_STR(power-pkg.scale, power_pkg_scale, "1.000000e-3"); |
| 187 | + |
| 188 | +static struct attribute *events_attr[] = { |
| 189 | + EVENT_PTR(power_pkg), |
| 190 | + EVENT_PTR(power_pkg_unit), |
| 191 | + EVENT_PTR(power_pkg_scale), |
| 192 | + NULL, |
| 193 | +}; |
| 194 | + |
| 195 | +static struct attribute_group pmu_events_group = { |
| 196 | + .name = "events", |
| 197 | + .attrs = events_attr, |
| 198 | +}; |
| 199 | + |
| 200 | +PMU_FORMAT_ATTR(event, "config:0-7"); |
| 201 | + |
| 202 | +static struct attribute *formats_attr[] = { |
| 203 | + &format_attr_event.attr, |
| 204 | + NULL, |
| 205 | +}; |
| 206 | + |
| 207 | +static struct attribute_group pmu_format_group = { |
| 208 | + .name = "format", |
| 209 | + .attrs = formats_attr, |
| 210 | +}; |
| 211 | + |
| 212 | +static const struct attribute_group *attr_groups[] = { |
| 213 | + &pmu_attr_group, |
| 214 | + &pmu_format_group, |
| 215 | + &pmu_events_group, |
| 216 | + NULL, |
| 217 | +}; |
| 218 | + |
| 219 | +static struct pmu pmu_class = { |
| 220 | + .attr_groups = attr_groups, |
| 221 | + /* system-wide only */ |
| 222 | + .task_ctx_nr = perf_invalid_context, |
| 223 | + .event_init = pmu_event_init, |
| 224 | + .add = pmu_event_add, |
| 225 | + .del = pmu_event_del, |
| 226 | + .start = pmu_event_start, |
| 227 | + .stop = pmu_event_stop, |
| 228 | + .read = pmu_event_read, |
| 229 | +}; |
| 230 | + |
| 231 | +static void power_cpu_exit(int cpu) |
| 232 | +{ |
| 233 | + int target; |
| 234 | + |
| 235 | + if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask)) |
| 236 | + return; |
| 237 | + |
| 238 | + /* |
| 239 | + * Find a new CPU on the same compute unit, if was set in cpumask |
| 240 | + * and still some CPUs on compute unit. Then migrate event and |
| 241 | + * context to new CPU. |
| 242 | + */ |
| 243 | + target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); |
| 244 | + if (target < nr_cpumask_bits) { |
| 245 | + cpumask_set_cpu(target, &cpu_mask); |
| 246 | + perf_pmu_migrate_context(&pmu_class, cpu, target); |
| 247 | + } |
| 248 | +} |
| 249 | + |
| 250 | +static void power_cpu_init(int cpu) |
| 251 | +{ |
| 252 | + int target; |
| 253 | + |
| 254 | + /* |
| 255 | + * 1) If any CPU is set at cpu_mask in the same compute unit, do |
| 256 | + * nothing. |
| 257 | + * 2) If no CPU is set at cpu_mask in the same compute unit, |
| 258 | + * set current STARTING CPU. |
| 259 | + * |
| 260 | + * Note: if there is a CPU aside of the new one already in the |
| 261 | + * sibling mask, then it is also in cpu_mask. |
| 262 | + */ |
| 263 | + target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); |
| 264 | + if (target >= nr_cpumask_bits) |
| 265 | + cpumask_set_cpu(cpu, &cpu_mask); |
| 266 | +} |
| 267 | + |
| 268 | +static int |
| 269 | +power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
| 270 | +{ |
| 271 | + unsigned int cpu = (long)hcpu; |
| 272 | + |
| 273 | + switch (action & ~CPU_TASKS_FROZEN) { |
| 274 | + case CPU_DOWN_FAILED: |
| 275 | + case CPU_STARTING: |
| 276 | + power_cpu_init(cpu); |
| 277 | + break; |
| 278 | + case CPU_DOWN_PREPARE: |
| 279 | + power_cpu_exit(cpu); |
| 280 | + break; |
| 281 | + default: |
| 282 | + break; |
| 283 | + } |
| 284 | + |
| 285 | + return NOTIFY_OK; |
| 286 | +} |
| 287 | + |
| 288 | +static struct notifier_block power_cpu_notifier_nb = { |
| 289 | + .notifier_call = power_cpu_notifier, |
| 290 | + .priority = CPU_PRI_PERF, |
| 291 | +}; |
| 292 | + |
| 293 | +static const struct x86_cpu_id cpu_match[] = { |
| 294 | + { .vendor = X86_VENDOR_AMD, .family = 0x15 }, |
| 295 | + {}, |
| 296 | +}; |
| 297 | + |
| 298 | +static int __init amd_power_pmu_init(void) |
| 299 | +{ |
| 300 | + int cpu, target, ret; |
| 301 | + |
| 302 | + if (!x86_match_cpu(cpu_match)) |
| 303 | + return 0; |
| 304 | + |
| 305 | + if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) |
| 306 | + return -ENODEV; |
| 307 | + |
| 308 | + cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); |
| 309 | + |
| 310 | + if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { |
| 311 | + pr_err("Failed to read max compute unit power accumulator MSR\n"); |
| 312 | + return -ENODEV; |
| 313 | + } |
| 314 | + |
| 315 | + cpu_notifier_register_begin(); |
| 316 | + |
| 317 | + /* Choose one online core of each compute unit. */ |
| 318 | + for_each_online_cpu(cpu) { |
| 319 | + target = cpumask_first(topology_sibling_cpumask(cpu)); |
| 320 | + if (!cpumask_test_cpu(target, &cpu_mask)) |
| 321 | + cpumask_set_cpu(target, &cpu_mask); |
| 322 | + } |
| 323 | + |
| 324 | + ret = perf_pmu_register(&pmu_class, "power", -1); |
| 325 | + if (WARN_ON(ret)) { |
| 326 | + pr_warn("AMD Power PMU registration failed\n"); |
| 327 | + goto out; |
| 328 | + } |
| 329 | + |
| 330 | + __register_cpu_notifier(&power_cpu_notifier_nb); |
| 331 | + |
| 332 | + pr_info("AMD Power PMU detected\n"); |
| 333 | + |
| 334 | +out: |
| 335 | + cpu_notifier_register_done(); |
| 336 | + |
| 337 | + return ret; |
| 338 | +} |
| 339 | +module_init(amd_power_pmu_init); |
| 340 | + |
| 341 | +static void __exit amd_power_pmu_exit(void) |
| 342 | +{ |
| 343 | + cpu_notifier_register_begin(); |
| 344 | + __unregister_cpu_notifier(&power_cpu_notifier_nb); |
| 345 | + cpu_notifier_register_done(); |
| 346 | + |
| 347 | + perf_pmu_unregister(&pmu_class); |
| 348 | +} |
| 349 | +module_exit(amd_power_pmu_exit); |
| 350 | + |
| 351 | +MODULE_AUTHOR( "Huang Rui <[email protected]>"); |
| 352 | +MODULE_DESCRIPTION("AMD Processor Power Reporting Mechanism"); |
| 353 | +MODULE_LICENSE("GPL v2"); |
0 commit comments