PageRenderTime 49ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/linux-3.5.0/arch/arm/mach-exynos/mct.c

https://bitbucket.org/jpaglier/cse536-kernel-dev
C | 493 lines | 381 code | 90 blank | 22 comment | 33 complexity | d1dea89d474c50bfebb0044530380d5b MD5 | raw file
  1. /* linux/arch/arm/mach-exynos4/mct.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * EXYNOS4 MCT(Multi-Core Timer) support
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/delay.h>
  20. #include <linux/percpu.h>
  21. #include <asm/hardware/gic.h>
  22. #include <asm/localtimer.h>
  23. #include <plat/cpu.h>
  24. #include <mach/map.h>
  25. #include <mach/irqs.h>
  26. #include <mach/regs-mct.h>
  27. #include <asm/mach/time.h>
  28. #define TICK_BASE_CNT 1
  29. enum {
  30. MCT_INT_SPI,
  31. MCT_INT_PPI
  32. };
  33. static unsigned long clk_rate;
  34. static unsigned int mct_int_type;
  35. struct mct_clock_event_device {
  36. struct clock_event_device *evt;
  37. void __iomem *base;
  38. char name[10];
  39. };
  40. static void exynos4_mct_write(unsigned int value, void *addr)
  41. {
  42. void __iomem *stat_addr;
  43. u32 mask;
  44. u32 i;
  45. __raw_writel(value, addr);
  46. if (likely(addr >= EXYNOS4_MCT_L_BASE(0))) {
  47. u32 base = (u32) addr & EXYNOS4_MCT_L_MASK;
  48. switch ((u32) addr & ~EXYNOS4_MCT_L_MASK) {
  49. case (u32) MCT_L_TCON_OFFSET:
  50. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  51. mask = 1 << 3; /* L_TCON write status */
  52. break;
  53. case (u32) MCT_L_ICNTB_OFFSET:
  54. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  55. mask = 1 << 1; /* L_ICNTB write status */
  56. break;
  57. case (u32) MCT_L_TCNTB_OFFSET:
  58. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  59. mask = 1 << 0; /* L_TCNTB write status */
  60. break;
  61. default:
  62. return;
  63. }
  64. } else {
  65. switch ((u32) addr) {
  66. case (u32) EXYNOS4_MCT_G_TCON:
  67. stat_addr = EXYNOS4_MCT_G_WSTAT;
  68. mask = 1 << 16; /* G_TCON write status */
  69. break;
  70. case (u32) EXYNOS4_MCT_G_COMP0_L:
  71. stat_addr = EXYNOS4_MCT_G_WSTAT;
  72. mask = 1 << 0; /* G_COMP0_L write status */
  73. break;
  74. case (u32) EXYNOS4_MCT_G_COMP0_U:
  75. stat_addr = EXYNOS4_MCT_G_WSTAT;
  76. mask = 1 << 1; /* G_COMP0_U write status */
  77. break;
  78. case (u32) EXYNOS4_MCT_G_COMP0_ADD_INCR:
  79. stat_addr = EXYNOS4_MCT_G_WSTAT;
  80. mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
  81. break;
  82. case (u32) EXYNOS4_MCT_G_CNT_L:
  83. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  84. mask = 1 << 0; /* G_CNT_L write status */
  85. break;
  86. case (u32) EXYNOS4_MCT_G_CNT_U:
  87. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  88. mask = 1 << 1; /* G_CNT_U write status */
  89. break;
  90. default:
  91. return;
  92. }
  93. }
  94. /* Wait maximum 1 ms until written values are applied */
  95. for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
  96. if (__raw_readl(stat_addr) & mask) {
  97. __raw_writel(mask, stat_addr);
  98. return;
  99. }
  100. panic("MCT hangs after writing %d (addr:0x%08x)\n", value, (u32)addr);
  101. }
  102. /* Clocksource handling */
  103. static void exynos4_mct_frc_start(u32 hi, u32 lo)
  104. {
  105. u32 reg;
  106. exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
  107. exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
  108. reg = __raw_readl(EXYNOS4_MCT_G_TCON);
  109. reg |= MCT_G_TCON_START;
  110. exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
  111. }
  112. static cycle_t exynos4_frc_read(struct clocksource *cs)
  113. {
  114. unsigned int lo, hi;
  115. u32 hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  116. do {
  117. hi = hi2;
  118. lo = __raw_readl(EXYNOS4_MCT_G_CNT_L);
  119. hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  120. } while (hi != hi2);
  121. return ((cycle_t)hi << 32) | lo;
  122. }
  123. static void exynos4_frc_resume(struct clocksource *cs)
  124. {
  125. exynos4_mct_frc_start(0, 0);
  126. }
  127. struct clocksource mct_frc = {
  128. .name = "mct-frc",
  129. .rating = 400,
  130. .read = exynos4_frc_read,
  131. .mask = CLOCKSOURCE_MASK(64),
  132. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  133. .resume = exynos4_frc_resume,
  134. };
  135. static void __init exynos4_clocksource_init(void)
  136. {
  137. exynos4_mct_frc_start(0, 0);
  138. if (clocksource_register_hz(&mct_frc, clk_rate))
  139. panic("%s: can't register clocksource\n", mct_frc.name);
  140. }
  141. static void exynos4_mct_comp0_stop(void)
  142. {
  143. unsigned int tcon;
  144. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  145. tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
  146. exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
  147. exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
  148. }
  149. static void exynos4_mct_comp0_start(enum clock_event_mode mode,
  150. unsigned long cycles)
  151. {
  152. unsigned int tcon;
  153. cycle_t comp_cycle;
  154. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  155. if (mode == CLOCK_EVT_MODE_PERIODIC) {
  156. tcon |= MCT_G_TCON_COMP0_AUTO_INC;
  157. exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
  158. }
  159. comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
  160. exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
  161. exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
  162. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
  163. tcon |= MCT_G_TCON_COMP0_ENABLE;
  164. exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
  165. }
  166. static int exynos4_comp_set_next_event(unsigned long cycles,
  167. struct clock_event_device *evt)
  168. {
  169. exynos4_mct_comp0_start(evt->mode, cycles);
  170. return 0;
  171. }
  172. static void exynos4_comp_set_mode(enum clock_event_mode mode,
  173. struct clock_event_device *evt)
  174. {
  175. unsigned long cycles_per_jiffy;
  176. exynos4_mct_comp0_stop();
  177. switch (mode) {
  178. case CLOCK_EVT_MODE_PERIODIC:
  179. cycles_per_jiffy =
  180. (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
  181. exynos4_mct_comp0_start(mode, cycles_per_jiffy);
  182. break;
  183. case CLOCK_EVT_MODE_ONESHOT:
  184. case CLOCK_EVT_MODE_UNUSED:
  185. case CLOCK_EVT_MODE_SHUTDOWN:
  186. case CLOCK_EVT_MODE_RESUME:
  187. break;
  188. }
  189. }
  190. static struct clock_event_device mct_comp_device = {
  191. .name = "mct-comp",
  192. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  193. .rating = 250,
  194. .set_next_event = exynos4_comp_set_next_event,
  195. .set_mode = exynos4_comp_set_mode,
  196. };
  197. static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
  198. {
  199. struct clock_event_device *evt = dev_id;
  200. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
  201. evt->event_handler(evt);
  202. return IRQ_HANDLED;
  203. }
  204. static struct irqaction mct_comp_event_irq = {
  205. .name = "mct_comp_irq",
  206. .flags = IRQF_TIMER | IRQF_IRQPOLL,
  207. .handler = exynos4_mct_comp_isr,
  208. .dev_id = &mct_comp_device,
  209. };
  210. static void exynos4_clockevent_init(void)
  211. {
  212. clockevents_calc_mult_shift(&mct_comp_device, clk_rate, 5);
  213. mct_comp_device.max_delta_ns =
  214. clockevent_delta2ns(0xffffffff, &mct_comp_device);
  215. mct_comp_device.min_delta_ns =
  216. clockevent_delta2ns(0xf, &mct_comp_device);
  217. mct_comp_device.cpumask = cpumask_of(0);
  218. clockevents_register_device(&mct_comp_device);
  219. if (soc_is_exynos5250())
  220. setup_irq(EXYNOS5_IRQ_MCT_G0, &mct_comp_event_irq);
  221. else
  222. setup_irq(EXYNOS4_IRQ_MCT_G0, &mct_comp_event_irq);
  223. }
  224. #ifdef CONFIG_LOCAL_TIMERS
  225. static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
  226. /* Clock event handling */
  227. static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
  228. {
  229. unsigned long tmp;
  230. unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
  231. void __iomem *addr = mevt->base + MCT_L_TCON_OFFSET;
  232. tmp = __raw_readl(addr);
  233. if (tmp & mask) {
  234. tmp &= ~mask;
  235. exynos4_mct_write(tmp, addr);
  236. }
  237. }
  238. static void exynos4_mct_tick_start(unsigned long cycles,
  239. struct mct_clock_event_device *mevt)
  240. {
  241. unsigned long tmp;
  242. exynos4_mct_tick_stop(mevt);
  243. tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
  244. /* update interrupt count buffer */
  245. exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
  246. /* enable MCT tick interrupt */
  247. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
  248. tmp = __raw_readl(mevt->base + MCT_L_TCON_OFFSET);
  249. tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
  250. MCT_L_TCON_INTERVAL_MODE;
  251. exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
  252. }
  253. static int exynos4_tick_set_next_event(unsigned long cycles,
  254. struct clock_event_device *evt)
  255. {
  256. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  257. exynos4_mct_tick_start(cycles, mevt);
  258. return 0;
  259. }
  260. static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
  261. struct clock_event_device *evt)
  262. {
  263. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  264. unsigned long cycles_per_jiffy;
  265. exynos4_mct_tick_stop(mevt);
  266. switch (mode) {
  267. case CLOCK_EVT_MODE_PERIODIC:
  268. cycles_per_jiffy =
  269. (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
  270. exynos4_mct_tick_start(cycles_per_jiffy, mevt);
  271. break;
  272. case CLOCK_EVT_MODE_ONESHOT:
  273. case CLOCK_EVT_MODE_UNUSED:
  274. case CLOCK_EVT_MODE_SHUTDOWN:
  275. case CLOCK_EVT_MODE_RESUME:
  276. break;
  277. }
  278. }
  279. static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
  280. {
  281. struct clock_event_device *evt = mevt->evt;
  282. /*
  283. * This is for supporting oneshot mode.
  284. * Mct would generate interrupt periodically
  285. * without explicit stopping.
  286. */
  287. if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
  288. exynos4_mct_tick_stop(mevt);
  289. /* Clear the MCT tick interrupt */
  290. if (__raw_readl(mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
  291. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  292. return 1;
  293. } else {
  294. return 0;
  295. }
  296. }
  297. static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
  298. {
  299. struct mct_clock_event_device *mevt = dev_id;
  300. struct clock_event_device *evt = mevt->evt;
  301. exynos4_mct_tick_clear(mevt);
  302. evt->event_handler(evt);
  303. return IRQ_HANDLED;
  304. }
  305. static struct irqaction mct_tick0_event_irq = {
  306. .name = "mct_tick0_irq",
  307. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  308. .handler = exynos4_mct_tick_isr,
  309. };
  310. static struct irqaction mct_tick1_event_irq = {
  311. .name = "mct_tick1_irq",
  312. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  313. .handler = exynos4_mct_tick_isr,
  314. };
  315. static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
  316. {
  317. struct mct_clock_event_device *mevt;
  318. unsigned int cpu = smp_processor_id();
  319. int mct_lx_irq;
  320. mevt = this_cpu_ptr(&percpu_mct_tick);
  321. mevt->evt = evt;
  322. mevt->base = EXYNOS4_MCT_L_BASE(cpu);
  323. sprintf(mevt->name, "mct_tick%d", cpu);
  324. evt->name = mevt->name;
  325. evt->cpumask = cpumask_of(cpu);
  326. evt->set_next_event = exynos4_tick_set_next_event;
  327. evt->set_mode = exynos4_tick_set_mode;
  328. evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
  329. evt->rating = 450;
  330. clockevents_calc_mult_shift(evt, clk_rate / (TICK_BASE_CNT + 1), 5);
  331. evt->max_delta_ns =
  332. clockevent_delta2ns(0x7fffffff, evt);
  333. evt->min_delta_ns =
  334. clockevent_delta2ns(0xf, evt);
  335. clockevents_register_device(evt);
  336. exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
  337. if (mct_int_type == MCT_INT_SPI) {
  338. if (cpu == 0) {
  339. mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L0 :
  340. EXYNOS5_IRQ_MCT_L0;
  341. mct_tick0_event_irq.dev_id = mevt;
  342. evt->irq = mct_lx_irq;
  343. setup_irq(mct_lx_irq, &mct_tick0_event_irq);
  344. } else {
  345. mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L1 :
  346. EXYNOS5_IRQ_MCT_L1;
  347. mct_tick1_event_irq.dev_id = mevt;
  348. evt->irq = mct_lx_irq;
  349. setup_irq(mct_lx_irq, &mct_tick1_event_irq);
  350. irq_set_affinity(mct_lx_irq, cpumask_of(1));
  351. }
  352. } else {
  353. enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
  354. }
  355. return 0;
  356. }
  357. static void exynos4_local_timer_stop(struct clock_event_device *evt)
  358. {
  359. unsigned int cpu = smp_processor_id();
  360. evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
  361. if (mct_int_type == MCT_INT_SPI)
  362. if (cpu == 0)
  363. remove_irq(evt->irq, &mct_tick0_event_irq);
  364. else
  365. remove_irq(evt->irq, &mct_tick1_event_irq);
  366. else
  367. disable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER);
  368. }
  369. static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
  370. .setup = exynos4_local_timer_setup,
  371. .stop = exynos4_local_timer_stop,
  372. };
  373. #endif /* CONFIG_LOCAL_TIMERS */
  374. static void __init exynos4_timer_resources(void)
  375. {
  376. struct clk *mct_clk;
  377. mct_clk = clk_get(NULL, "xtal");
  378. clk_rate = clk_get_rate(mct_clk);
  379. #ifdef CONFIG_LOCAL_TIMERS
  380. if (mct_int_type == MCT_INT_PPI) {
  381. int err;
  382. err = request_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER,
  383. exynos4_mct_tick_isr, "MCT",
  384. &percpu_mct_tick);
  385. WARN(err, "MCT: can't request IRQ %d (%d)\n",
  386. EXYNOS_IRQ_MCT_LOCALTIMER, err);
  387. }
  388. local_timer_register(&exynos4_mct_tick_ops);
  389. #endif /* CONFIG_LOCAL_TIMERS */
  390. }
  391. static void __init exynos4_timer_init(void)
  392. {
  393. if ((soc_is_exynos4210()) || (soc_is_exynos5250()))
  394. mct_int_type = MCT_INT_SPI;
  395. else
  396. mct_int_type = MCT_INT_PPI;
  397. exynos4_timer_resources();
  398. exynos4_clocksource_init();
  399. exynos4_clockevent_init();
  400. }
  401. struct sys_timer exynos4_timer = {
  402. .init = exynos4_timer_init,
  403. };