blob: 9cdb3fbc8c1f345c27ab5c6afe2e4f021757a865 [file] [log] [blame]
Leo Yan2227b7c2017-06-05 14:15:16 -06001/*
2 * Copyright (c) 2017 Linaro Limited. All rights reserved.
3 *
4 * Author: Leo Yan <leo.yan@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 */
19#include <linux/amba/bus.h>
20#include <linux/coresight.h>
21#include <linux/cpu.h>
22#include <linux/debugfs.h>
23#include <linux/delay.h>
24#include <linux/device.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/io.h>
28#include <linux/iopoll.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/pm_qos.h>
33#include <linux/slab.h>
34#include <linux/smp.h>
35#include <linux/types.h>
36#include <linux/uaccess.h>
37
38#include "coresight-priv.h"
39
40#define EDPCSR 0x0A0
41#define EDCIDSR 0x0A4
42#define EDVIDSR 0x0A8
43#define EDPCSR_HI 0x0AC
44#define EDOSLAR 0x300
45#define EDPRCR 0x310
46#define EDPRSR 0x314
47#define EDDEVID1 0xFC4
48#define EDDEVID 0xFC8
49
50#define EDPCSR_PROHIBITED 0xFFFFFFFF
51
52/* bits definition for EDPCSR */
53#define EDPCSR_THUMB BIT(0)
54#define EDPCSR_ARM_INST_MASK GENMASK(31, 2)
55#define EDPCSR_THUMB_INST_MASK GENMASK(31, 1)
56
57/* bits definition for EDPRCR */
58#define EDPRCR_COREPURQ BIT(3)
59#define EDPRCR_CORENPDRQ BIT(0)
60
61/* bits definition for EDPRSR */
62#define EDPRSR_DLK BIT(6)
63#define EDPRSR_PU BIT(0)
64
65/* bits definition for EDVIDSR */
66#define EDVIDSR_NS BIT(31)
67#define EDVIDSR_E2 BIT(30)
68#define EDVIDSR_E3 BIT(29)
69#define EDVIDSR_HV BIT(28)
70#define EDVIDSR_VMID GENMASK(7, 0)
71
72/*
73 * bits definition for EDDEVID1:PSCROffset
74 *
75 * NOTE: armv8 and armv7 have different definition for the register,
76 * so consolidate the bits definition as below:
77 *
78 * 0b0000 - Sample offset applies based on the instruction state, we
79 * rely on EDDEVID to check if EDPCSR is implemented or not
80 * 0b0001 - No offset applies.
81 * 0b0010 - No offset applies, but do not use in AArch32 mode
82 *
83 */
84#define EDDEVID1_PCSR_OFFSET_MASK GENMASK(3, 0)
85#define EDDEVID1_PCSR_OFFSET_INS_SET (0x0)
86#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 (0x2)
87
88/* bits definition for EDDEVID */
89#define EDDEVID_PCSAMPLE_MODE GENMASK(3, 0)
90#define EDDEVID_IMPL_EDPCSR (0x1)
91#define EDDEVID_IMPL_EDPCSR_EDCIDSR (0x2)
92#define EDDEVID_IMPL_FULL (0x3)
93
94#define DEBUG_WAIT_SLEEP 1000
95#define DEBUG_WAIT_TIMEOUT 32000
96
97struct debug_drvdata {
98 void __iomem *base;
99 struct device *dev;
100 int cpu;
101
102 bool edpcsr_present;
103 bool edcidsr_present;
104 bool edvidsr_present;
105 bool pc_has_offset;
106
107 u32 edpcsr;
108 u32 edpcsr_hi;
109 u32 edprsr;
110 u32 edvidsr;
111 u32 edcidsr;
112};
113
114static DEFINE_MUTEX(debug_lock);
115static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
116static int debug_count;
117static struct dentry *debug_debugfs_dir;
118
119static bool debug_enable;
120module_param_named(enable, debug_enable, bool, 0600);
121MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
122
123static void debug_os_unlock(struct debug_drvdata *drvdata)
124{
125 /* Unlocks the debug registers */
126 writel_relaxed(0x0, drvdata->base + EDOSLAR);
127
128 /* Make sure the registers are unlocked before accessing */
129 wmb();
130}
131
132/*
133 * According to ARM DDI 0487A.k, before access external debug
134 * registers should firstly check the access permission; if any
135 * below condition has been met then cannot access debug
136 * registers to avoid lockup issue:
137 *
138 * - CPU power domain is powered off;
139 * - The OS Double Lock is locked;
140 *
141 * By checking EDPRSR can get to know if meet these conditions.
142 */
143static bool debug_access_permitted(struct debug_drvdata *drvdata)
144{
145 /* CPU is powered off */
146 if (!(drvdata->edprsr & EDPRSR_PU))
147 return false;
148
149 /* The OS Double Lock is locked */
150 if (drvdata->edprsr & EDPRSR_DLK)
151 return false;
152
153 return true;
154}
155
156static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
157{
158 u32 edprcr;
159
160try_again:
161
162 /*
163 * Send request to power management controller and assert
164 * DBGPWRUPREQ signal; if power management controller has
165 * sane implementation, it should enable CPU power domain
166 * in case CPU is in low power state.
167 */
168 edprcr = readl_relaxed(drvdata->base + EDPRCR);
169 edprcr |= EDPRCR_COREPURQ;
170 writel_relaxed(edprcr, drvdata->base + EDPRCR);
171
172 /* Wait for CPU to be powered up (timeout~=32ms) */
173 if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
174 drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
175 DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
176 /*
177 * Unfortunately the CPU cannot be powered up, so return
178 * back and later has no permission to access other
179 * registers. For this case, should disable CPU low power
180 * states to ensure CPU power domain is enabled!
181 */
182 dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
183 __func__, drvdata->cpu);
184 return;
185 }
186
187 /*
188 * At this point the CPU is powered up, so set the no powerdown
189 * request bit so we don't lose power and emulate power down.
190 */
191 edprcr = readl_relaxed(drvdata->base + EDPRCR);
192 edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
193 writel_relaxed(edprcr, drvdata->base + EDPRCR);
194
195 drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
196
197 /* The core power domain got switched off on use, try again */
198 if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
199 goto try_again;
200}
201
202static void debug_read_regs(struct debug_drvdata *drvdata)
203{
204 u32 save_edprcr;
205
206 CS_UNLOCK(drvdata->base);
207
208 /* Unlock os lock */
209 debug_os_unlock(drvdata);
210
211 /* Save EDPRCR register */
212 save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
213
214 /*
215 * Ensure CPU power domain is enabled to let registers
216 * are accessiable.
217 */
218 debug_force_cpu_powered_up(drvdata);
219
220 if (!debug_access_permitted(drvdata))
221 goto out;
222
223 drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
224
225 /*
226 * As described in ARM DDI 0487A.k, if the processing
227 * element (PE) is in debug state, or sample-based
228 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
229 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
230 * UNKNOWN state. So directly bail out for this case.
231 */
232 if (drvdata->edpcsr == EDPCSR_PROHIBITED)
233 goto out;
234
235 /*
236 * A read of the EDPCSR normally has the side-effect of
237 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
238 * at this point it's safe to read value from them.
239 */
240 if (IS_ENABLED(CONFIG_64BIT))
241 drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
242
243 if (drvdata->edcidsr_present)
244 drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
245
246 if (drvdata->edvidsr_present)
247 drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
248
249out:
250 /* Restore EDPRCR register */
251 writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
252
253 CS_LOCK(drvdata->base);
254}
255
256#ifdef CONFIG_64BIT
257static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
258{
259 return (unsigned long)drvdata->edpcsr_hi << 32 |
260 (unsigned long)drvdata->edpcsr;
261}
262#else
263static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
264{
265 unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
266 unsigned long pc;
267
268 pc = (unsigned long)drvdata->edpcsr;
269
270 if (drvdata->pc_has_offset) {
271 arm_inst_offset = 8;
272 thumb_inst_offset = 4;
273 }
274
275 /* Handle thumb instruction */
276 if (pc & EDPCSR_THUMB) {
277 pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
278 return pc;
279 }
280
281 /*
282 * Handle arm instruction offset, if the arm instruction
283 * is not 4 byte alignment then it's possible the case
284 * for implementation defined; keep original value for this
285 * case and print info for notice.
286 */
287 if (pc & BIT(1))
288 dev_emerg(drvdata->dev,
289 "Instruction offset is implementation defined\n");
290 else
291 pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
292
293 return pc;
294}
295#endif
296
297static void debug_dump_regs(struct debug_drvdata *drvdata)
298{
299 struct device *dev = drvdata->dev;
300 unsigned long pc;
301
302 dev_emerg(dev, " EDPRSR: %08x (Power:%s DLK:%s)\n",
303 drvdata->edprsr,
304 drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
305 drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
306
307 if (!debug_access_permitted(drvdata)) {
308 dev_emerg(dev, "No permission to access debug registers!\n");
309 return;
310 }
311
312 if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
313 dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
314 return;
315 }
316
317 pc = debug_adjust_pc(drvdata);
Leo Yan831c3262018-03-13 11:24:30 -0600318 dev_emerg(dev, " EDPCSR: [<%px>] %pS\n", (void *)pc, (void *)pc);
Leo Yan2227b7c2017-06-05 14:15:16 -0600319
320 if (drvdata->edcidsr_present)
321 dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
322
323 if (drvdata->edvidsr_present)
324 dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
325 drvdata->edvidsr,
326 drvdata->edvidsr & EDVIDSR_NS ?
327 "Non-secure" : "Secure",
328 drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
329 (drvdata->edvidsr & EDVIDSR_E2 ?
330 "EL2" : "EL1/0"),
331 drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
332 drvdata->edvidsr & (u32)EDVIDSR_VMID);
333}
334
335static void debug_init_arch_data(void *info)
336{
337 struct debug_drvdata *drvdata = info;
338 u32 mode, pcsr_offset;
339 u32 eddevid, eddevid1;
340
341 CS_UNLOCK(drvdata->base);
342
343 /* Read device info */
344 eddevid = readl_relaxed(drvdata->base + EDDEVID);
345 eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
346
347 CS_LOCK(drvdata->base);
348
349 /* Parse implementation feature */
350 mode = eddevid & EDDEVID_PCSAMPLE_MODE;
351 pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
352
353 drvdata->edpcsr_present = false;
354 drvdata->edcidsr_present = false;
355 drvdata->edvidsr_present = false;
356 drvdata->pc_has_offset = false;
357
358 switch (mode) {
359 case EDDEVID_IMPL_FULL:
360 drvdata->edvidsr_present = true;
361 /* Fall through */
362 case EDDEVID_IMPL_EDPCSR_EDCIDSR:
363 drvdata->edcidsr_present = true;
364 /* Fall through */
365 case EDDEVID_IMPL_EDPCSR:
366 /*
367 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
368 * define if has the offset for PC sampling value; if read
369 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
370 * module does not sample the instruction set state when
371 * armv8 CPU in AArch32 state.
372 */
373 drvdata->edpcsr_present =
374 ((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
375 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
376
377 drvdata->pc_has_offset =
378 (pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
379 break;
380 default:
381 break;
382 }
383}
384
385/*
386 * Dump out information on panic.
387 */
388static int debug_notifier_call(struct notifier_block *self,
389 unsigned long v, void *p)
390{
391 int cpu;
392 struct debug_drvdata *drvdata;
393
394 mutex_lock(&debug_lock);
395
396 /* Bail out if the functionality is disabled */
397 if (!debug_enable)
398 goto skip_dump;
399
400 pr_emerg("ARM external debug module:\n");
401
402 for_each_possible_cpu(cpu) {
403 drvdata = per_cpu(debug_drvdata, cpu);
404 if (!drvdata)
405 continue;
406
407 dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
408
409 debug_read_regs(drvdata);
410 debug_dump_regs(drvdata);
411 }
412
413skip_dump:
414 mutex_unlock(&debug_lock);
415 return 0;
416}
417
418static struct notifier_block debug_notifier = {
419 .notifier_call = debug_notifier_call,
420};
421
422static int debug_enable_func(void)
423{
424 struct debug_drvdata *drvdata;
425 int cpu, ret = 0;
426 cpumask_t mask;
427
428 /*
429 * Use cpumask to track which debug power domains have
430 * been powered on and use it to handle failure case.
431 */
432 cpumask_clear(&mask);
433
434 for_each_possible_cpu(cpu) {
435 drvdata = per_cpu(debug_drvdata, cpu);
436 if (!drvdata)
437 continue;
438
439 ret = pm_runtime_get_sync(drvdata->dev);
440 if (ret < 0)
441 goto err;
442 else
443 cpumask_set_cpu(cpu, &mask);
444 }
445
446 return 0;
447
448err:
449 /*
450 * If pm_runtime_get_sync() has failed, need rollback on
451 * all the other CPUs that have been enabled before that.
452 */
453 for_each_cpu(cpu, &mask) {
454 drvdata = per_cpu(debug_drvdata, cpu);
455 pm_runtime_put_noidle(drvdata->dev);
456 }
457
458 return ret;
459}
460
461static int debug_disable_func(void)
462{
463 struct debug_drvdata *drvdata;
464 int cpu, ret, err = 0;
465
466 /*
467 * Disable debug power domains, records the error and keep
468 * circling through all other CPUs when an error has been
469 * encountered.
470 */
471 for_each_possible_cpu(cpu) {
472 drvdata = per_cpu(debug_drvdata, cpu);
473 if (!drvdata)
474 continue;
475
476 ret = pm_runtime_put(drvdata->dev);
477 if (ret < 0)
478 err = ret;
479 }
480
481 return err;
482}
483
484static ssize_t debug_func_knob_write(struct file *f,
485 const char __user *buf, size_t count, loff_t *ppos)
486{
487 u8 val;
488 int ret;
489
490 ret = kstrtou8_from_user(buf, count, 2, &val);
491 if (ret)
492 return ret;
493
494 mutex_lock(&debug_lock);
495
496 if (val == debug_enable)
497 goto out;
498
499 if (val)
500 ret = debug_enable_func();
501 else
502 ret = debug_disable_func();
503
504 if (ret) {
505 pr_err("%s: unable to %s debug function: %d\n",
506 __func__, val ? "enable" : "disable", ret);
507 goto err;
508 }
509
510 debug_enable = val;
511out:
512 ret = count;
513err:
514 mutex_unlock(&debug_lock);
515 return ret;
516}
517
518static ssize_t debug_func_knob_read(struct file *f,
519 char __user *ubuf, size_t count, loff_t *ppos)
520{
521 ssize_t ret;
522 char buf[3];
523
524 mutex_lock(&debug_lock);
525 snprintf(buf, sizeof(buf), "%d\n", debug_enable);
526 mutex_unlock(&debug_lock);
527
528 ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
529 return ret;
530}
531
532static const struct file_operations debug_func_knob_fops = {
533 .open = simple_open,
534 .read = debug_func_knob_read,
535 .write = debug_func_knob_write,
536};
537
538static int debug_func_init(void)
539{
540 struct dentry *file;
541 int ret;
542
543 /* Create debugfs node */
544 debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
545 if (!debug_debugfs_dir) {
546 pr_err("%s: unable to create debugfs directory\n", __func__);
547 return -ENOMEM;
548 }
549
550 file = debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
551 &debug_func_knob_fops);
552 if (!file) {
553 pr_err("%s: unable to create enable knob file\n", __func__);
554 ret = -ENOMEM;
555 goto err;
556 }
557
558 /* Register function to be called for panic */
559 ret = atomic_notifier_chain_register(&panic_notifier_list,
560 &debug_notifier);
561 if (ret) {
562 pr_err("%s: unable to register notifier: %d\n",
563 __func__, ret);
564 goto err;
565 }
566
567 return 0;
568
569err:
570 debugfs_remove_recursive(debug_debugfs_dir);
571 return ret;
572}
573
574static void debug_func_exit(void)
575{
576 atomic_notifier_chain_unregister(&panic_notifier_list,
577 &debug_notifier);
578 debugfs_remove_recursive(debug_debugfs_dir);
579}
580
581static int debug_probe(struct amba_device *adev, const struct amba_id *id)
582{
583 void __iomem *base;
584 struct device *dev = &adev->dev;
585 struct debug_drvdata *drvdata;
586 struct resource *res = &adev->res;
587 struct device_node *np = adev->dev.of_node;
588 int ret;
589
590 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
591 if (!drvdata)
592 return -ENOMEM;
593
594 drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;
595 if (per_cpu(debug_drvdata, drvdata->cpu)) {
596 dev_err(dev, "CPU%d drvdata has already been initialized\n",
597 drvdata->cpu);
598 return -EBUSY;
599 }
600
601 drvdata->dev = &adev->dev;
602 amba_set_drvdata(adev, drvdata);
603
604 /* Validity for the resource is already checked by the AMBA core */
605 base = devm_ioremap_resource(dev, res);
606 if (IS_ERR(base))
607 return PTR_ERR(base);
608
609 drvdata->base = base;
610
611 get_online_cpus();
612 per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
613 ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
614 drvdata, 1);
615 put_online_cpus();
616
617 if (ret) {
618 dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
619 goto err;
620 }
621
622 if (!drvdata->edpcsr_present) {
623 dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
624 drvdata->cpu);
625 ret = -ENXIO;
626 goto err;
627 }
628
629 if (!debug_count++) {
630 ret = debug_func_init();
631 if (ret)
632 goto err_func_init;
633 }
634
635 mutex_lock(&debug_lock);
636 /* Turn off debug power domain if debugging is disabled */
637 if (!debug_enable)
638 pm_runtime_put(dev);
639 mutex_unlock(&debug_lock);
640
641 dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
642 return 0;
643
644err_func_init:
645 debug_count--;
646err:
647 per_cpu(debug_drvdata, drvdata->cpu) = NULL;
648 return ret;
649}
650
651static int debug_remove(struct amba_device *adev)
652{
653 struct device *dev = &adev->dev;
654 struct debug_drvdata *drvdata = amba_get_drvdata(adev);
655
656 per_cpu(debug_drvdata, drvdata->cpu) = NULL;
657
658 mutex_lock(&debug_lock);
659 /* Turn off debug power domain before rmmod the module */
660 if (debug_enable)
661 pm_runtime_put(dev);
662 mutex_unlock(&debug_lock);
663
664 if (!--debug_count)
665 debug_func_exit();
666
667 return 0;
668}
669
Arvind Yadav08759572017-08-24 22:05:57 +0530670static const struct amba_id debug_ids[] = {
Leo Yan2227b7c2017-06-05 14:15:16 -0600671 { /* Debug for Cortex-A53 */
672 .id = 0x000bbd03,
673 .mask = 0x000fffff,
674 },
675 { /* Debug for Cortex-A57 */
676 .id = 0x000bbd07,
677 .mask = 0x000fffff,
678 },
679 { /* Debug for Cortex-A72 */
680 .id = 0x000bbd08,
681 .mask = 0x000fffff,
682 },
683 { 0, 0 },
684};
685
686static struct amba_driver debug_driver = {
687 .drv = {
688 .name = "coresight-cpu-debug",
689 .suppress_bind_attrs = true,
690 },
691 .probe = debug_probe,
692 .remove = debug_remove,
693 .id_table = debug_ids,
694};
695
696module_amba_driver(debug_driver);
697
698MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
699MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
700MODULE_LICENSE("GPL");