PageRenderTime 50ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/arm/mach-msm/jtag.c

https://bitbucket.org/morfic/trinity_four
C | 1176 lines | 956 code | 88 blank | 132 comment | 52 complexity | 2da1903daebc84cf7b9016310ee6bf61 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/init.h>
  14. #include <linux/types.h>
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/smp.h>
  18. #include <linux/export.h>
  19. #include <linux/printk.h>
  20. #include <linux/ratelimit.h>
  21. #include <linux/coresight.h>
  22. #include <mach/scm.h>
  23. #include <mach/jtag.h>
  24. #include "cp14.h"
  25. #define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
  26. #define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
  27. #define BVAL(val, n) ((val & BIT(n)) >> n)
  28. /* no of dbg regs + 1 (for storing the reg count) */
  29. #define MAX_DBG_REGS (90)
  30. #define MAX_DBG_STATE_SIZE (MAX_DBG_REGS * num_possible_cpus())
  31. /* no of etm regs + 1 (for storing the reg count) */
  32. #define MAX_ETM_REGS (78)
  33. #define MAX_ETM_STATE_SIZE (MAX_ETM_REGS * num_possible_cpus())
  34. #define OSLOCK_MAGIC (0xC5ACCE55)
  35. #define DBGDSCR_MASK (0x6C30FC3C)
  36. #define CPMR_ETMCLKEN (0x8)
  37. #define TZ_DBG_ETM_FEAT_ID (0x8)
  38. #define TZ_DBG_ETM_VER (0x400000)
  39. uint32_t msm_jtag_save_cntr[NR_CPUS];
  40. uint32_t msm_jtag_restore_cntr[NR_CPUS];
  41. struct dbg_ctx {
  42. uint8_t arch;
  43. bool save_restore_enabled;
  44. uint8_t nr_wp;
  45. uint8_t nr_bp;
  46. uint8_t nr_ctx_cmp;
  47. uint32_t *state;
  48. };
  49. static struct dbg_ctx dbg;
  50. struct etm_ctx {
  51. uint8_t arch;
  52. bool save_restore_enabled;
  53. uint8_t nr_addr_cmp;
  54. uint8_t nr_cntr;
  55. uint8_t nr_ext_inp;
  56. uint8_t nr_ext_out;
  57. uint8_t nr_ctxid_cmp;
  58. uint32_t *state;
  59. };
  60. static struct etm_ctx etm;
  61. static int dbg_read_bxr(uint32_t *state, int i, int j)
  62. {
  63. switch (j) {
  64. case 0:
  65. state[i++] = dbg_read(DBGBVR0);
  66. state[i++] = dbg_read(DBGBCR0);
  67. break;
  68. case 1:
  69. state[i++] = dbg_read(DBGBVR1);
  70. state[i++] = dbg_read(DBGBCR1);
  71. break;
  72. case 2:
  73. state[i++] = dbg_read(DBGBVR2);
  74. state[i++] = dbg_read(DBGBCR2);
  75. break;
  76. case 3:
  77. state[i++] = dbg_read(DBGBVR3);
  78. state[i++] = dbg_read(DBGBCR3);
  79. break;
  80. case 4:
  81. state[i++] = dbg_read(DBGBVR4);
  82. state[i++] = dbg_read(DBGBCR4);
  83. break;
  84. case 5:
  85. state[i++] = dbg_read(DBGBVR5);
  86. state[i++] = dbg_read(DBGBCR5);
  87. break;
  88. case 6:
  89. state[i++] = dbg_read(DBGBVR6);
  90. state[i++] = dbg_read(DBGBCR6);
  91. break;
  92. case 7:
  93. state[i++] = dbg_read(DBGBVR7);
  94. state[i++] = dbg_read(DBGBCR7);
  95. break;
  96. case 8:
  97. state[i++] = dbg_read(DBGBVR8);
  98. state[i++] = dbg_read(DBGBCR8);
  99. break;
  100. case 9:
  101. state[i++] = dbg_read(DBGBVR9);
  102. state[i++] = dbg_read(DBGBCR9);
  103. break;
  104. case 10:
  105. state[i++] = dbg_read(DBGBVR10);
  106. state[i++] = dbg_read(DBGBCR10);
  107. break;
  108. case 11:
  109. state[i++] = dbg_read(DBGBVR11);
  110. state[i++] = dbg_read(DBGBCR11);
  111. break;
  112. case 12:
  113. state[i++] = dbg_read(DBGBVR12);
  114. state[i++] = dbg_read(DBGBCR12);
  115. break;
  116. case 13:
  117. state[i++] = dbg_read(DBGBVR13);
  118. state[i++] = dbg_read(DBGBCR13);
  119. break;
  120. case 14:
  121. state[i++] = dbg_read(DBGBVR14);
  122. state[i++] = dbg_read(DBGBCR14);
  123. break;
  124. case 15:
  125. state[i++] = dbg_read(DBGBVR15);
  126. state[i++] = dbg_read(DBGBCR15);
  127. break;
  128. default:
  129. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  130. }
  131. return i;
  132. }
  133. static int dbg_write_bxr(uint32_t *state, int i, int j)
  134. {
  135. switch (j) {
  136. case 0:
  137. dbg_write(state[i++], DBGBVR0);
  138. dbg_write(state[i++], DBGBCR0);
  139. break;
  140. case 1:
  141. dbg_write(state[i++], DBGBVR1);
  142. dbg_write(state[i++], DBGBCR1);
  143. break;
  144. case 2:
  145. dbg_write(state[i++], DBGBVR2);
  146. dbg_write(state[i++], DBGBCR2);
  147. break;
  148. case 3:
  149. dbg_write(state[i++], DBGBVR3);
  150. dbg_write(state[i++], DBGBCR3);
  151. break;
  152. case 4:
  153. dbg_write(state[i++], DBGBVR4);
  154. dbg_write(state[i++], DBGBCR4);
  155. break;
  156. case 5:
  157. dbg_write(state[i++], DBGBVR5);
  158. dbg_write(state[i++], DBGBCR5);
  159. break;
  160. case 6:
  161. dbg_write(state[i++], DBGBVR6);
  162. dbg_write(state[i++], DBGBCR6);
  163. break;
  164. case 7:
  165. dbg_write(state[i++], DBGBVR7);
  166. dbg_write(state[i++], DBGBCR7);
  167. break;
  168. case 8:
  169. dbg_write(state[i++], DBGBVR8);
  170. dbg_write(state[i++], DBGBCR8);
  171. break;
  172. case 9:
  173. dbg_write(state[i++], DBGBVR9);
  174. dbg_write(state[i++], DBGBCR9);
  175. break;
  176. case 10:
  177. dbg_write(state[i++], DBGBVR10);
  178. dbg_write(state[i++], DBGBCR10);
  179. break;
  180. case 11:
  181. dbg_write(state[i++], DBGBVR11);
  182. dbg_write(state[i++], DBGBCR11);
  183. break;
  184. case 12:
  185. dbg_write(state[i++], DBGBVR12);
  186. dbg_write(state[i++], DBGBCR12);
  187. break;
  188. case 13:
  189. dbg_write(state[i++], DBGBVR13);
  190. dbg_write(state[i++], DBGBCR13);
  191. break;
  192. case 14:
  193. dbg_write(state[i++], DBGBVR14);
  194. dbg_write(state[i++], DBGBCR14);
  195. break;
  196. case 15:
  197. dbg_write(state[i++], DBGBVR15);
  198. dbg_write(state[i++], DBGBCR15);
  199. break;
  200. default:
  201. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  202. }
  203. return i;
  204. }
  205. static int dbg_read_wxr(uint32_t *state, int i, int j)
  206. {
  207. switch (j) {
  208. case 0:
  209. state[i++] = dbg_read(DBGWVR0);
  210. state[i++] = dbg_read(DBGWCR0);
  211. break;
  212. case 1:
  213. state[i++] = dbg_read(DBGWVR1);
  214. state[i++] = dbg_read(DBGWCR1);
  215. break;
  216. case 2:
  217. state[i++] = dbg_read(DBGWVR2);
  218. state[i++] = dbg_read(DBGWCR2);
  219. break;
  220. case 3:
  221. state[i++] = dbg_read(DBGWVR3);
  222. state[i++] = dbg_read(DBGWCR3);
  223. break;
  224. case 4:
  225. state[i++] = dbg_read(DBGWVR4);
  226. state[i++] = dbg_read(DBGWCR4);
  227. break;
  228. case 5:
  229. state[i++] = dbg_read(DBGWVR5);
  230. state[i++] = dbg_read(DBGWCR5);
  231. break;
  232. case 6:
  233. state[i++] = dbg_read(DBGWVR6);
  234. state[i++] = dbg_read(DBGWCR6);
  235. break;
  236. case 7:
  237. state[i++] = dbg_read(DBGWVR7);
  238. state[i++] = dbg_read(DBGWCR7);
  239. break;
  240. case 8:
  241. state[i++] = dbg_read(DBGWVR8);
  242. state[i++] = dbg_read(DBGWCR8);
  243. break;
  244. case 9:
  245. state[i++] = dbg_read(DBGWVR9);
  246. state[i++] = dbg_read(DBGWCR9);
  247. break;
  248. case 10:
  249. state[i++] = dbg_read(DBGWVR10);
  250. state[i++] = dbg_read(DBGWCR10);
  251. break;
  252. case 11:
  253. state[i++] = dbg_read(DBGWVR11);
  254. state[i++] = dbg_read(DBGWCR11);
  255. break;
  256. case 12:
  257. state[i++] = dbg_read(DBGWVR12);
  258. state[i++] = dbg_read(DBGWCR12);
  259. break;
  260. case 13:
  261. state[i++] = dbg_read(DBGWVR13);
  262. state[i++] = dbg_read(DBGWCR13);
  263. break;
  264. case 14:
  265. state[i++] = dbg_read(DBGWVR14);
  266. state[i++] = dbg_read(DBGWCR14);
  267. break;
  268. case 15:
  269. state[i++] = dbg_read(DBGWVR15);
  270. state[i++] = dbg_read(DBGWCR15);
  271. break;
  272. default:
  273. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  274. }
  275. return i;
  276. }
  277. static int dbg_write_wxr(uint32_t *state, int i, int j)
  278. {
  279. switch (j) {
  280. case 0:
  281. dbg_write(state[i++], DBGWVR0);
  282. dbg_write(state[i++], DBGWCR0);
  283. break;
  284. case 1:
  285. dbg_write(state[i++], DBGWVR1);
  286. dbg_write(state[i++], DBGWCR1);
  287. break;
  288. case 2:
  289. dbg_write(state[i++], DBGWVR2);
  290. dbg_write(state[i++], DBGWCR2);
  291. break;
  292. case 3:
  293. dbg_write(state[i++], DBGWVR3);
  294. dbg_write(state[i++], DBGWCR3);
  295. break;
  296. case 4:
  297. dbg_write(state[i++], DBGWVR4);
  298. dbg_write(state[i++], DBGWCR4);
  299. break;
  300. case 5:
  301. dbg_write(state[i++], DBGWVR5);
  302. dbg_write(state[i++], DBGWCR5);
  303. break;
  304. case 6:
  305. dbg_write(state[i++], DBGWVR6);
  306. dbg_write(state[i++], DBGWCR6);
  307. break;
  308. case 7:
  309. dbg_write(state[i++], DBGWVR7);
  310. dbg_write(state[i++], DBGWCR7);
  311. break;
  312. case 8:
  313. dbg_write(state[i++], DBGWVR8);
  314. dbg_write(state[i++], DBGWCR8);
  315. break;
  316. case 9:
  317. dbg_write(state[i++], DBGWVR9);
  318. dbg_write(state[i++], DBGWCR9);
  319. break;
  320. case 10:
  321. dbg_write(state[i++], DBGWVR10);
  322. dbg_write(state[i++], DBGWCR10);
  323. break;
  324. case 11:
  325. dbg_write(state[i++], DBGWVR11);
  326. dbg_write(state[i++], DBGWCR11);
  327. break;
  328. case 12:
  329. dbg_write(state[i++], DBGWVR12);
  330. dbg_write(state[i++], DBGWCR12);
  331. break;
  332. case 13:
  333. dbg_write(state[i++], DBGWVR13);
  334. dbg_write(state[i++], DBGWCR13);
  335. break;
  336. case 14:
  337. dbg_write(state[i++], DBGWVR14);
  338. dbg_write(state[i++], DBGWCR14);
  339. break;
  340. case 15:
  341. dbg_write(state[i++], DBGWVR15);
  342. dbg_write(state[i++], DBGWCR15);
  343. break;
  344. default:
  345. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  346. }
  347. return i;
  348. }
  349. static inline bool dbg_arch_supported(uint8_t arch)
  350. {
  351. switch (arch) {
  352. case ARM_DEBUG_ARCH_V7_1:
  353. case ARM_DEBUG_ARCH_V7:
  354. case ARM_DEBUG_ARCH_V7B:
  355. break;
  356. default:
  357. return false;
  358. }
  359. return true;
  360. }
  361. static inline void dbg_save_state(int cpu)
  362. {
  363. int i, j, cnt;
  364. i = cpu * MAX_DBG_REGS;
  365. switch (dbg.arch) {
  366. case ARM_DEBUG_ARCH_V7_1:
  367. /* Set OS lock to inform the debugger that the OS is in the
  368. * process of saving debug registers. It prevents accidental
  369. * modification of the debug regs by the external debugger.
  370. */
  371. dbg_write(OSLOCK_MAGIC, DBGOSLAR);
  372. isb();
  373. /* We skip saving DBGBXVRn since not supported on Krait */
  374. dbg.state[i++] = dbg_read(DBGWFAR);
  375. for (j = 0; j < dbg.nr_bp; j++)
  376. i = dbg_read_bxr(dbg.state, i, j);
  377. for (j = 0; j < dbg.nr_wp; j++)
  378. i = dbg_read_wxr(dbg.state, i, j);
  379. dbg.state[i++] = dbg_read(DBGVCR);
  380. dbg.state[i++] = dbg_read(DBGCLAIMCLR);
  381. dbg.state[i++] = dbg_read(DBGDTRTXext);
  382. dbg.state[i++] = dbg_read(DBGDTRRXext);
  383. dbg.state[i++] = dbg_read(DBGDSCRext);
  384. /* Set the OS double lock */
  385. isb();
  386. dbg_write(0x1, DBGOSDLR);
  387. isb();
  388. break;
  389. case ARM_DEBUG_ARCH_V7B:
  390. case ARM_DEBUG_ARCH_V7:
  391. /* Set OS lock to inform the debugger that the OS is in the
  392. * process of saving dbg registers. It prevents accidental
  393. * modification of the dbg regs by the external debugger
  394. * and resets the internal counter.
  395. */
  396. dbg_write(OSLOCK_MAGIC, DBGOSLAR);
  397. isb();
  398. cnt = dbg_read(DBGOSSRR); /* save count for restore */
  399. /* MAX_DBG_REGS = no of dbg regs + 1 (for storing the reg count)
  400. * check for state overflow, if not enough space, don't save
  401. */
  402. if (cnt >= MAX_DBG_REGS)
  403. cnt = 0;
  404. dbg.state[i++] = cnt;
  405. for (j = 0; j < cnt; j++)
  406. dbg.state[i++] = dbg_read(DBGOSSRR);
  407. break;
  408. default:
  409. pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
  410. __func__);
  411. }
  412. }
  413. static inline void dbg_restore_state(int cpu)
  414. {
  415. int i, j, cnt;
  416. i = cpu * MAX_DBG_REGS;
  417. switch (dbg.arch) {
  418. case ARM_DEBUG_ARCH_V7_1:
  419. /* Clear the OS double lock */
  420. isb();
  421. dbg_write(0x0, DBGOSDLR);
  422. isb();
  423. /* Set OS lock. Lock will already be set after power collapse
  424. * but this write is included to ensure it is set.
  425. */
  426. dbg_write(OSLOCK_MAGIC, DBGOSLAR);
  427. isb();
  428. /* We skip restoring DBGBXVRn since not supported on Krait */
  429. dbg_write(dbg.state[i++], DBGWFAR);
  430. for (j = 0; j < dbg.nr_bp; j++)
  431. i = dbg_write_bxr(dbg.state, i, j);
  432. for (j = 0; j < dbg.nr_wp; j++)
  433. i = dbg_write_wxr(dbg.state, i, j);
  434. dbg_write(dbg.state[i++], DBGVCR);
  435. dbg_write(dbg.state[i++], DBGCLAIMSET);
  436. dbg_write(dbg.state[i++], DBGDTRTXext);
  437. dbg_write(dbg.state[i++], DBGDTRRXext);
  438. dbg_write(dbg.state[i++] & DBGDSCR_MASK, DBGDSCRext);
  439. isb();
  440. dbg_write(0x0, DBGOSLAR);
  441. isb();
  442. break;
  443. case ARM_DEBUG_ARCH_V7B:
  444. case ARM_DEBUG_ARCH_V7:
  445. /* Clear sticky bit */
  446. dbg_read(DBGPRSR);
  447. isb();
  448. /* Set OS lock. Lock will already be set after power collapse
  449. * but this write is required to reset the internal counter used
  450. * for DBG state restore.
  451. */
  452. dbg_write(OSLOCK_MAGIC, DBGOSLAR);
  453. isb();
  454. dbg_read(DBGOSSRR); /* dummy read of OSSRR */
  455. cnt = dbg.state[i++];
  456. for (j = 0; j < cnt; j++) {
  457. /* DBGDSCR special case
  458. * DBGDSCR = DBGDSCR & DBGDSCR_MASK
  459. */
  460. if (j == 20)
  461. dbg_write(dbg.state[i++] & DBGDSCR_MASK,
  462. DBGOSSRR);
  463. else
  464. dbg_write(dbg.state[i++], DBGOSSRR);
  465. }
  466. /* Clear the OS lock */
  467. isb();
  468. dbg_write(0x0, DBGOSLAR);
  469. isb();
  470. break;
  471. default:
  472. pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch,
  473. __func__);
  474. }
  475. }
  476. static int etm_read_acxr(uint32_t *state, int i, int j)
  477. {
  478. switch (j) {
  479. case 0:
  480. state[i++] = etm_read(ETMACVR0);
  481. state[i++] = etm_read(ETMACTR0);
  482. break;
  483. case 1:
  484. state[i++] = etm_read(ETMACVR1);
  485. state[i++] = etm_read(ETMACTR1);
  486. break;
  487. case 2:
  488. state[i++] = etm_read(ETMACVR2);
  489. state[i++] = etm_read(ETMACTR2);
  490. break;
  491. case 3:
  492. state[i++] = etm_read(ETMACVR3);
  493. state[i++] = etm_read(ETMACTR3);
  494. break;
  495. case 4:
  496. state[i++] = etm_read(ETMACVR4);
  497. state[i++] = etm_read(ETMACTR4);
  498. break;
  499. case 5:
  500. state[i++] = etm_read(ETMACVR5);
  501. state[i++] = etm_read(ETMACTR5);
  502. break;
  503. case 6:
  504. state[i++] = etm_read(ETMACVR6);
  505. state[i++] = etm_read(ETMACTR6);
  506. break;
  507. case 7:
  508. state[i++] = etm_read(ETMACVR7);
  509. state[i++] = etm_read(ETMACTR7);
  510. break;
  511. case 8:
  512. state[i++] = etm_read(ETMACVR8);
  513. state[i++] = etm_read(ETMACTR8);
  514. break;
  515. case 9:
  516. state[i++] = etm_read(ETMACVR9);
  517. state[i++] = etm_read(ETMACTR9);
  518. break;
  519. case 10:
  520. state[i++] = etm_read(ETMACVR10);
  521. state[i++] = etm_read(ETMACTR10);
  522. break;
  523. case 11:
  524. state[i++] = etm_read(ETMACVR11);
  525. state[i++] = etm_read(ETMACTR11);
  526. break;
  527. case 12:
  528. state[i++] = etm_read(ETMACVR12);
  529. state[i++] = etm_read(ETMACTR12);
  530. break;
  531. case 13:
  532. state[i++] = etm_read(ETMACVR13);
  533. state[i++] = etm_read(ETMACTR13);
  534. break;
  535. case 14:
  536. state[i++] = etm_read(ETMACVR14);
  537. state[i++] = etm_read(ETMACTR14);
  538. break;
  539. case 15:
  540. state[i++] = etm_read(ETMACVR15);
  541. state[i++] = etm_read(ETMACTR15);
  542. break;
  543. default:
  544. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  545. }
  546. return i;
  547. }
  548. static int etm_write_acxr(uint32_t *state, int i, int j)
  549. {
  550. switch (j) {
  551. case 0:
  552. etm_write(state[i++], ETMACVR0);
  553. etm_write(state[i++], ETMACTR0);
  554. break;
  555. case 1:
  556. etm_write(state[i++], ETMACVR1);
  557. etm_write(state[i++], ETMACTR1);
  558. break;
  559. case 2:
  560. etm_write(state[i++], ETMACVR2);
  561. etm_write(state[i++], ETMACTR2);
  562. break;
  563. case 3:
  564. etm_write(state[i++], ETMACVR3);
  565. etm_write(state[i++], ETMACTR3);
  566. break;
  567. case 4:
  568. etm_write(state[i++], ETMACVR4);
  569. etm_write(state[i++], ETMACTR4);
  570. break;
  571. case 5:
  572. etm_write(state[i++], ETMACVR5);
  573. etm_write(state[i++], ETMACTR5);
  574. break;
  575. case 6:
  576. etm_write(state[i++], ETMACVR6);
  577. etm_write(state[i++], ETMACTR6);
  578. break;
  579. case 7:
  580. etm_write(state[i++], ETMACVR7);
  581. etm_write(state[i++], ETMACTR7);
  582. break;
  583. case 8:
  584. etm_write(state[i++], ETMACVR8);
  585. etm_write(state[i++], ETMACTR8);
  586. break;
  587. case 9:
  588. etm_write(state[i++], ETMACVR9);
  589. etm_write(state[i++], ETMACTR9);
  590. break;
  591. case 10:
  592. etm_write(state[i++], ETMACVR10);
  593. etm_write(state[i++], ETMACTR10);
  594. break;
  595. case 11:
  596. etm_write(state[i++], ETMACVR11);
  597. etm_write(state[i++], ETMACTR11);
  598. break;
  599. case 12:
  600. etm_write(state[i++], ETMACVR12);
  601. etm_write(state[i++], ETMACTR12);
  602. break;
  603. case 13:
  604. etm_write(state[i++], ETMACVR13);
  605. etm_write(state[i++], ETMACTR13);
  606. break;
  607. case 14:
  608. etm_write(state[i++], ETMACVR14);
  609. etm_write(state[i++], ETMACTR14);
  610. break;
  611. case 15:
  612. etm_write(state[i++], ETMACVR15);
  613. etm_write(state[i++], ETMACTR15);
  614. break;
  615. default:
  616. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  617. }
  618. return i;
  619. }
  620. static int etm_read_cntx(uint32_t *state, int i, int j)
  621. {
  622. switch (j) {
  623. case 0:
  624. state[i++] = etm_read(ETMCNTRLDVR0);
  625. state[i++] = etm_read(ETMCNTENR0);
  626. state[i++] = etm_read(ETMCNTRLDEVR0);
  627. state[i++] = etm_read(ETMCNTVR0);
  628. break;
  629. case 1:
  630. state[i++] = etm_read(ETMCNTRLDVR1);
  631. state[i++] = etm_read(ETMCNTENR1);
  632. state[i++] = etm_read(ETMCNTRLDEVR1);
  633. state[i++] = etm_read(ETMCNTVR1);
  634. break;
  635. case 2:
  636. state[i++] = etm_read(ETMCNTRLDVR2);
  637. state[i++] = etm_read(ETMCNTENR2);
  638. state[i++] = etm_read(ETMCNTRLDEVR2);
  639. state[i++] = etm_read(ETMCNTVR2);
  640. break;
  641. case 3:
  642. state[i++] = etm_read(ETMCNTRLDVR3);
  643. state[i++] = etm_read(ETMCNTENR3);
  644. state[i++] = etm_read(ETMCNTRLDEVR3);
  645. state[i++] = etm_read(ETMCNTVR3);
  646. break;
  647. default:
  648. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  649. }
  650. return i;
  651. }
  652. static int etm_write_cntx(uint32_t *state, int i, int j)
  653. {
  654. switch (j) {
  655. case 0:
  656. etm_write(state[i++], ETMCNTRLDVR0);
  657. etm_write(state[i++], ETMCNTENR0);
  658. etm_write(state[i++], ETMCNTRLDEVR0);
  659. etm_write(state[i++], ETMCNTVR0);
  660. break;
  661. case 1:
  662. etm_write(state[i++], ETMCNTRLDVR1);
  663. etm_write(state[i++], ETMCNTENR1);
  664. etm_write(state[i++], ETMCNTRLDEVR1);
  665. etm_write(state[i++], ETMCNTVR1);
  666. break;
  667. case 2:
  668. etm_write(state[i++], ETMCNTRLDVR2);
  669. etm_write(state[i++], ETMCNTENR2);
  670. etm_write(state[i++], ETMCNTRLDEVR2);
  671. etm_write(state[i++], ETMCNTVR2);
  672. break;
  673. case 3:
  674. etm_write(state[i++], ETMCNTRLDVR3);
  675. etm_write(state[i++], ETMCNTENR3);
  676. etm_write(state[i++], ETMCNTRLDEVR3);
  677. etm_write(state[i++], ETMCNTVR3);
  678. break;
  679. default:
  680. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  681. }
  682. return i;
  683. }
  684. static int etm_read_extoutevr(uint32_t *state, int i, int j)
  685. {
  686. switch (j) {
  687. case 0:
  688. state[i++] = etm_read(ETMEXTOUTEVR0);
  689. break;
  690. case 1:
  691. state[i++] = etm_read(ETMEXTOUTEVR1);
  692. break;
  693. case 2:
  694. state[i++] = etm_read(ETMEXTOUTEVR2);
  695. break;
  696. case 3:
  697. state[i++] = etm_read(ETMEXTOUTEVR3);
  698. break;
  699. default:
  700. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  701. }
  702. return i;
  703. }
  704. static int etm_write_extoutevr(uint32_t *state, int i, int j)
  705. {
  706. switch (j) {
  707. case 0:
  708. etm_write(state[i++], ETMEXTOUTEVR0);
  709. break;
  710. case 1:
  711. etm_write(state[i++], ETMEXTOUTEVR1);
  712. break;
  713. case 2:
  714. etm_write(state[i++], ETMEXTOUTEVR2);
  715. break;
  716. case 3:
  717. etm_write(state[i++], ETMEXTOUTEVR3);
  718. break;
  719. default:
  720. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  721. }
  722. return i;
  723. }
  724. static int etm_read_cidcvr(uint32_t *state, int i, int j)
  725. {
  726. switch (j) {
  727. case 0:
  728. state[i++] = etm_read(ETMCIDCVR0);
  729. break;
  730. case 1:
  731. state[i++] = etm_read(ETMCIDCVR1);
  732. break;
  733. case 2:
  734. state[i++] = etm_read(ETMCIDCVR2);
  735. break;
  736. default:
  737. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  738. }
  739. return i;
  740. }
  741. static int etm_write_cidcvr(uint32_t *state, int i, int j)
  742. {
  743. switch (j) {
  744. case 0:
  745. etm_write(state[i++], ETMCIDCVR0);
  746. break;
  747. case 1:
  748. etm_write(state[i++], ETMCIDCVR1);
  749. break;
  750. case 2:
  751. etm_write(state[i++], ETMCIDCVR2);
  752. break;
  753. default:
  754. pr_err_ratelimited("idx %d out of bounds in %s\n", j, __func__);
  755. }
  756. return i;
  757. }
  758. static inline void etm_clk_disable(void)
  759. {
  760. uint32_t cpmr;
  761. isb();
  762. asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
  763. cpmr &= ~CPMR_ETMCLKEN;
  764. asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
  765. }
  766. static inline void etm_clk_enable(void)
  767. {
  768. uint32_t cpmr;
  769. asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
  770. cpmr |= CPMR_ETMCLKEN;
  771. asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
  772. isb();
  773. }
  774. static inline bool etm_arch_supported(uint8_t arch)
  775. {
  776. switch (arch) {
  777. case ETM_ARCH_V3_3:
  778. case PFT_ARCH_V1_1:
  779. break;
  780. default:
  781. return false;
  782. }
  783. return true;
  784. }
  785. static inline void etm_save_state(int cpu)
  786. {
  787. int i, j, cnt;
  788. i = cpu * MAX_ETM_REGS;
  789. /* Vote for ETM power/clock enable */
  790. etm_clk_enable();
  791. switch (etm.arch) {
  792. case PFT_ARCH_V1_1:
  793. /* Set OS lock to inform the debugger that the OS is in the
  794. * process of saving etm registers. It prevents accidental
  795. * modification of the etm regs by the external debugger.
  796. *
  797. * We don't poll for ETMSR[1] since it doesn't get set
  798. */
  799. etm_write(OSLOCK_MAGIC, ETMOSLAR);
  800. isb();
  801. etm.state[i++] = etm_read(ETMCR);
  802. etm.state[i++] = etm_read(ETMTRIGGER);
  803. etm.state[i++] = etm_read(ETMSR);
  804. etm.state[i++] = etm_read(ETMTSSCR);
  805. etm.state[i++] = etm_read(ETMTEEVR);
  806. etm.state[i++] = etm_read(ETMTECR1);
  807. etm.state[i++] = etm_read(ETMFFLR);
  808. for (j = 0; j < etm.nr_addr_cmp; j++)
  809. i = etm_read_acxr(etm.state, i, j);
  810. for (j = 0; j < etm.nr_cntr; j++)
  811. i = etm_read_cntx(etm.state, i, j);
  812. etm.state[i++] = etm_read(ETMSQ12EVR);
  813. etm.state[i++] = etm_read(ETMSQ21EVR);
  814. etm.state[i++] = etm_read(ETMSQ23EVR);
  815. etm.state[i++] = etm_read(ETMSQ31EVR);
  816. etm.state[i++] = etm_read(ETMSQ32EVR);
  817. etm.state[i++] = etm_read(ETMSQ13EVR);
  818. etm.state[i++] = etm_read(ETMSQR);
  819. for (j = 0; j < etm.nr_ext_out; j++)
  820. i = etm_read_extoutevr(etm.state, i, j);
  821. for (j = 0; j < etm.nr_ctxid_cmp; j++)
  822. i = etm_read_cidcvr(etm.state, i, j);
  823. etm.state[i++] = etm_read(ETMCIDCMR);
  824. etm.state[i++] = etm_read(ETMSYNCFR);
  825. etm.state[i++] = etm_read(ETMEXTINSELR);
  826. etm.state[i++] = etm_read(ETMTSEVR);
  827. etm.state[i++] = etm_read(ETMAUXCR);
  828. etm.state[i++] = etm_read(ETMTRACEIDR);
  829. etm.state[i++] = etm_read(ETMVMIDCVR);
  830. etm.state[i++] = etm_read(ETMCLAIMCLR);
  831. break;
  832. case ETM_ARCH_V3_3:
  833. /* In ETMv3.3, it is possible for the coresight lock to be
  834. * implemented for CP14 interface but we currently assume that
  835. * it is not, so no need to unlock and lock coresight lock
  836. * (ETMLAR).
  837. *
  838. * Also since save and restore is not conditional i.e. always
  839. * occurs when enabled, there is no need to clear the sticky
  840. * PDSR bit while saving. It will be cleared during boot up/init
  841. * and then by the restore procedure.
  842. */
  843. /* Set OS lock to inform the debugger that the OS is in the
  844. * process of saving etm registers. It prevents accidental
  845. * modification of the etm regs by the external debugger
  846. * and resets the internal counter.
  847. */
  848. etm_write(OSLOCK_MAGIC, ETMOSLAR);
  849. isb();
  850. cnt = etm_read(ETMOSSRR); /* save count for restore */
  851. /* MAX_ETM_REGS = no of etm regs + 1 (for storing the reg count)
  852. * check for state overflow, if not enough space, don't save
  853. */
  854. if (cnt >= MAX_ETM_REGS)
  855. cnt = 0;
  856. etm.state[i++] = cnt;
  857. for (j = 0; j < cnt; j++)
  858. etm.state[i++] = etm_read(ETMOSSRR);
  859. break;
  860. default:
  861. pr_err_ratelimited("unsupported etm arch %d in %s\n", etm.arch,
  862. __func__);
  863. }
  864. /* Vote for ETM power/clock disable */
  865. etm_clk_disable();
  866. }
  867. static inline void etm_restore_state(int cpu)
  868. {
  869. int i, j, cnt;
  870. i = cpu * MAX_ETM_REGS;
  871. /* Vote for ETM power/clock enable */
  872. etm_clk_enable();
  873. switch (etm.arch) {
  874. case PFT_ARCH_V1_1:
  875. /* Set OS lock. Lock will already be set after power collapse
  876. * but this write is included to ensure it is set.
  877. *
  878. * We don't poll for ETMSR[1] since it doesn't get set
  879. */
  880. etm_write(OSLOCK_MAGIC, ETMOSLAR);
  881. isb();
  882. etm_write(etm.state[i++], ETMCR);
  883. etm_write(etm.state[i++], ETMTRIGGER);
  884. etm_write(etm.state[i++], ETMSR);
  885. etm_write(etm.state[i++], ETMTSSCR);
  886. etm_write(etm.state[i++], ETMTEEVR);
  887. etm_write(etm.state[i++], ETMTECR1);
  888. etm_write(etm.state[i++], ETMFFLR);
  889. for (j = 0; j < etm.nr_addr_cmp; j++)
  890. i = etm_write_acxr(etm.state, i, j);
  891. for (j = 0; j < etm.nr_cntr; j++)
  892. i = etm_write_cntx(etm.state, i, j);
  893. etm_write(etm.state[i++], ETMSQ12EVR);
  894. etm_write(etm.state[i++], ETMSQ21EVR);
  895. etm_write(etm.state[i++], ETMSQ23EVR);
  896. etm_write(etm.state[i++], ETMSQ31EVR);
  897. etm_write(etm.state[i++], ETMSQ32EVR);
  898. etm_write(etm.state[i++], ETMSQ13EVR);
  899. etm_write(etm.state[i++], ETMSQR);
  900. for (j = 0; j < etm.nr_ext_out; j++)
  901. i = etm_write_extoutevr(etm.state, i, j);
  902. for (j = 0; j < etm.nr_ctxid_cmp; j++)
  903. i = etm_write_cidcvr(etm.state, i, j);
  904. etm_write(etm.state[i++], ETMCIDCMR);
  905. etm_write(etm.state[i++], ETMSYNCFR);
  906. etm_write(etm.state[i++], ETMEXTINSELR);
  907. etm_write(etm.state[i++], ETMTSEVR);
  908. etm_write(etm.state[i++], ETMAUXCR);
  909. etm_write(etm.state[i++], ETMTRACEIDR);
  910. etm_write(etm.state[i++], ETMVMIDCVR);
  911. etm_write(etm.state[i++], ETMCLAIMSET);
  912. /* Clear the OS lock */
  913. isb();
  914. etm_write(0x0, ETMOSLAR);
  915. isb();
  916. break;
  917. case ETM_ARCH_V3_3:
  918. /* In ETMv3.3, it is possible for the coresight lock to be
  919. * implemented for CP14 interface but we currently assume that
  920. * it is not, so no need to unlock and lock coresight lock
  921. * (ETMLAR).
  922. */
  923. /* Clear sticky bit */
  924. etm_read(ETMPDSR);
  925. isb();
  926. /* Set OS lock. Lock will already be set after power collapse
  927. * but this write is required to reset the internal counter used
  928. * for ETM state restore.
  929. */
  930. etm_write(OSLOCK_MAGIC, ETMOSLAR);
  931. isb();
  932. etm_read(ETMOSSRR); /* dummy read of OSSRR */
  933. cnt = etm.state[i++];
  934. for (j = 0; j < cnt; j++)
  935. etm_write(etm.state[i++], ETMOSSRR);
  936. /* Clear the OS lock */
  937. isb();
  938. etm_write(0x0, ETMOSLAR);
  939. isb();
  940. break;
  941. default:
  942. pr_err_ratelimited("unsupported etm arch %d in %s\n", etm.arch,
  943. __func__);
  944. }
  945. /* Vote for ETM power/clock disable */
  946. etm_clk_disable();
  947. }
  948. /**
  949. * msm_jtag_save_state - save debug and etm registers
  950. *
  951. * Debug and etm registers are saved before power collapse if debug
  952. * and etm architecture is supported respectively and TZ isn't supporting
  953. * the save and restore of debug and etm registers.
  954. *
  955. * CONTEXT:
  956. * Called with preemption off and interrupts locked from:
  957. * 1. per_cpu idle thread context for idle power collapses
  958. * or
  959. * 2. per_cpu idle thread context for hotplug/suspend power collapse
  960. * for nonboot cpus
  961. * or
  962. * 3. suspend thread context for suspend power collapse for core0
  963. *
  964. * In all cases we will run on the same cpu for the entire duration.
  965. */
  966. void msm_jtag_save_state(void)
  967. {
  968. int cpu;
  969. cpu = raw_smp_processor_id();
  970. msm_jtag_save_cntr[cpu]++;
  971. /* ensure counter is updated before moving forward */
  972. mb();
  973. if (dbg.save_restore_enabled)
  974. dbg_save_state(cpu);
  975. if (etm.save_restore_enabled)
  976. etm_save_state(cpu);
  977. }
  978. EXPORT_SYMBOL(msm_jtag_save_state);
  979. /**
  980. * msm_jtag_restore_state - restore debug and etm registers
  981. *
  982. * Debug and etm registers are restored after power collapse if debug
  983. * and etm architecture is supported respectively and TZ isn't supporting
  984. * the save and restore of debug and etm registers.
  985. *
  986. * CONTEXT:
  987. * Called with preemption off and interrupts locked from:
  988. * 1. per_cpu idle thread context for idle power collapses
  989. * or
  990. * 2. per_cpu idle thread context for hotplug/suspend power collapse
  991. * for nonboot cpus
  992. * or
  993. * 3. suspend thread context for suspend power collapse for core0
  994. *
  995. * In all cases we will run on the same cpu for the entire duration.
  996. */
  997. void msm_jtag_restore_state(void)
  998. {
  999. int cpu;
  1000. cpu = raw_smp_processor_id();
  1001. msm_jtag_restore_cntr[cpu]++;
  1002. /* ensure counter is updated before moving forward */
  1003. mb();
  1004. if (dbg.save_restore_enabled)
  1005. dbg_restore_state(cpu);
  1006. if (etm.save_restore_enabled)
  1007. etm_restore_state(cpu);
  1008. }
  1009. EXPORT_SYMBOL(msm_jtag_restore_state);
  1010. static int __init msm_jtag_dbg_init(void)
  1011. {
  1012. int ret;
  1013. uint32_t dbgdidr;
  1014. /* This will run on core0 so use it to populate parameters */
  1015. /* Populate dbg_ctx data */
  1016. dbgdidr = dbg_read(DBGDIDR);
  1017. dbg.arch = BMVAL(dbgdidr, 16, 19);
  1018. dbg.nr_ctx_cmp = BMVAL(dbgdidr, 20, 23) + 1;
  1019. dbg.nr_bp = BMVAL(dbgdidr, 24, 27) + 1;
  1020. dbg.nr_wp = BMVAL(dbgdidr, 28, 31) + 1;
  1021. if (dbg_arch_supported(dbg.arch)) {
  1022. if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) {
  1023. dbg.save_restore_enabled = true;
  1024. } else {
  1025. pr_info("dbg save-restore supported by TZ\n");
  1026. goto dbg_out;
  1027. }
  1028. } else {
  1029. pr_info("dbg arch %u not supported\n", dbg.arch);
  1030. goto dbg_out;
  1031. }
  1032. /* Allocate dbg state save space */
  1033. dbg.state = kzalloc(MAX_DBG_STATE_SIZE * sizeof(uint32_t), GFP_KERNEL);
  1034. if (!dbg.state) {
  1035. ret = -ENOMEM;
  1036. goto dbg_err;
  1037. }
  1038. dbg_out:
  1039. return 0;
  1040. dbg_err:
  1041. return ret;
  1042. }
  1043. arch_initcall(msm_jtag_dbg_init);
  1044. static int __init msm_jtag_etm_init(void)
  1045. {
  1046. int ret;
  1047. uint32_t etmidr;
  1048. uint32_t etmccr;
  1049. /* Vote for ETM power/clock enable */
  1050. etm_clk_enable();
  1051. /* Clear sticky bit in PDSR - required for ETMv3.3 (8660) */
  1052. etm_read(ETMPDSR);
  1053. isb();
  1054. /* Populate etm_ctx data */
  1055. etmidr = etm_read(ETMIDR);
  1056. etm.arch = BMVAL(etmidr, 4, 11);
  1057. etmccr = etm_read(ETMCCR);
  1058. etm.nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
  1059. etm.nr_cntr = BMVAL(etmccr, 13, 15);
  1060. etm.nr_ext_inp = BMVAL(etmccr, 17, 19);
  1061. etm.nr_ext_out = BMVAL(etmccr, 20, 22);
  1062. etm.nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
  1063. if (etm_arch_supported(etm.arch)) {
  1064. if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) {
  1065. etm.save_restore_enabled = true;
  1066. } else {
  1067. pr_info("etm save-restore supported by TZ\n");
  1068. goto etm_out;
  1069. }
  1070. } else {
  1071. pr_info("etm arch %u not supported\n", etm.arch);
  1072. goto etm_out;
  1073. }
  1074. /* Vote for ETM power/clock disable */
  1075. etm_clk_disable();
  1076. /* Allocate etm state save space */
  1077. etm.state = kzalloc(MAX_ETM_STATE_SIZE * sizeof(uint32_t), GFP_KERNEL);
  1078. if (!etm.state) {
  1079. ret = -ENOMEM;
  1080. goto etm_err;
  1081. }
  1082. etm_out:
  1083. etm_clk_disable();
  1084. return 0;
  1085. etm_err:
  1086. return ret;
  1087. }
  1088. arch_initcall(msm_jtag_etm_init);