PageRenderTime 74ms CodeModel.GetById 38ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c

https://github.com/gby/linux
C | 480 lines | 391 code | 57 blank | 32 comment | 47 complexity | de8ce5f147a9ee60f70ca081d217dbf5 MD5 | raw file
  1. /*
  2. * Copyright 2012 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #define gf100_clk(p) container_of((p), struct gf100_clk, base)
  25. #include "priv.h"
  26. #include "pll.h"
  27. #include <subdev/bios.h>
  28. #include <subdev/bios/pll.h>
  29. #include <subdev/timer.h>
  30. struct gf100_clk_info {
  31. u32 freq;
  32. u32 ssel;
  33. u32 mdiv;
  34. u32 dsrc;
  35. u32 ddiv;
  36. u32 coef;
  37. };
  38. struct gf100_clk {
  39. struct nvkm_clk base;
  40. struct gf100_clk_info eng[16];
  41. };
  42. static u32 read_div(struct gf100_clk *, int, u32, u32);
  43. static u32
  44. read_vco(struct gf100_clk *clk, u32 dsrc)
  45. {
  46. struct nvkm_device *device = clk->base.subdev.device;
  47. u32 ssrc = nvkm_rd32(device, dsrc);
  48. if (!(ssrc & 0x00000100))
  49. return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
  50. return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
  51. }
  52. static u32
  53. read_pll(struct gf100_clk *clk, u32 pll)
  54. {
  55. struct nvkm_device *device = clk->base.subdev.device;
  56. u32 ctrl = nvkm_rd32(device, pll + 0x00);
  57. u32 coef = nvkm_rd32(device, pll + 0x04);
  58. u32 P = (coef & 0x003f0000) >> 16;
  59. u32 N = (coef & 0x0000ff00) >> 8;
  60. u32 M = (coef & 0x000000ff) >> 0;
  61. u32 sclk;
  62. if (!(ctrl & 0x00000001))
  63. return 0;
  64. switch (pll) {
  65. case 0x00e800:
  66. case 0x00e820:
  67. sclk = device->crystal;
  68. P = 1;
  69. break;
  70. case 0x132000:
  71. sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
  72. break;
  73. case 0x132020:
  74. sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
  75. break;
  76. case 0x137000:
  77. case 0x137020:
  78. case 0x137040:
  79. case 0x1370e0:
  80. sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
  81. break;
  82. default:
  83. return 0;
  84. }
  85. return sclk * N / M / P;
  86. }
  87. static u32
  88. read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
  89. {
  90. struct nvkm_device *device = clk->base.subdev.device;
  91. u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
  92. u32 sclk, sctl, sdiv = 2;
  93. switch (ssrc & 0x00000003) {
  94. case 0:
  95. if ((ssrc & 0x00030000) != 0x00030000)
  96. return device->crystal;
  97. return 108000;
  98. case 2:
  99. return 100000;
  100. case 3:
  101. sclk = read_vco(clk, dsrc + (doff * 4));
  102. /* Memclk has doff of 0 despite its alt. location */
  103. if (doff <= 2) {
  104. sctl = nvkm_rd32(device, dctl + (doff * 4));
  105. if (sctl & 0x80000000) {
  106. if (ssrc & 0x100)
  107. sctl >>= 8;
  108. sdiv = (sctl & 0x3f) + 2;
  109. }
  110. }
  111. return (sclk * 2) / sdiv;
  112. default:
  113. return 0;
  114. }
  115. }
  116. static u32
  117. read_clk(struct gf100_clk *clk, int idx)
  118. {
  119. struct nvkm_device *device = clk->base.subdev.device;
  120. u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
  121. u32 ssel = nvkm_rd32(device, 0x137100);
  122. u32 sclk, sdiv;
  123. if (ssel & (1 << idx)) {
  124. if (idx < 7)
  125. sclk = read_pll(clk, 0x137000 + (idx * 0x20));
  126. else
  127. sclk = read_pll(clk, 0x1370e0);
  128. sdiv = ((sctl & 0x00003f00) >> 8) + 2;
  129. } else {
  130. sclk = read_div(clk, idx, 0x137160, 0x1371d0);
  131. sdiv = ((sctl & 0x0000003f) >> 0) + 2;
  132. }
  133. if (sctl & 0x80000000)
  134. return (sclk * 2) / sdiv;
  135. return sclk;
  136. }
  137. static int
  138. gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
  139. {
  140. struct gf100_clk *clk = gf100_clk(base);
  141. struct nvkm_subdev *subdev = &clk->base.subdev;
  142. struct nvkm_device *device = subdev->device;
  143. switch (src) {
  144. case nv_clk_src_crystal:
  145. return device->crystal;
  146. case nv_clk_src_href:
  147. return 100000;
  148. case nv_clk_src_sppll0:
  149. return read_pll(clk, 0x00e800);
  150. case nv_clk_src_sppll1:
  151. return read_pll(clk, 0x00e820);
  152. case nv_clk_src_mpllsrcref:
  153. return read_div(clk, 0, 0x137320, 0x137330);
  154. case nv_clk_src_mpllsrc:
  155. return read_pll(clk, 0x132020);
  156. case nv_clk_src_mpll:
  157. return read_pll(clk, 0x132000);
  158. case nv_clk_src_mdiv:
  159. return read_div(clk, 0, 0x137300, 0x137310);
  160. case nv_clk_src_mem:
  161. if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
  162. return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
  163. return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
  164. case nv_clk_src_gpc:
  165. return read_clk(clk, 0x00);
  166. case nv_clk_src_rop:
  167. return read_clk(clk, 0x01);
  168. case nv_clk_src_hubk07:
  169. return read_clk(clk, 0x02);
  170. case nv_clk_src_hubk06:
  171. return read_clk(clk, 0x07);
  172. case nv_clk_src_hubk01:
  173. return read_clk(clk, 0x08);
  174. case nv_clk_src_copy:
  175. return read_clk(clk, 0x09);
  176. case nv_clk_src_pmu:
  177. return read_clk(clk, 0x0c);
  178. case nv_clk_src_vdec:
  179. return read_clk(clk, 0x0e);
  180. default:
  181. nvkm_error(subdev, "invalid clock source %d\n", src);
  182. return -EINVAL;
  183. }
  184. }
  185. static u32
  186. calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
  187. {
  188. u32 div = min((ref * 2) / freq, (u32)65);
  189. if (div < 2)
  190. div = 2;
  191. *ddiv = div - 2;
  192. return (ref * 2) / div;
  193. }
  194. static u32
  195. calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
  196. {
  197. u32 sclk;
  198. /* use one of the fixed frequencies if possible */
  199. *ddiv = 0x00000000;
  200. switch (freq) {
  201. case 27000:
  202. case 108000:
  203. *dsrc = 0x00000000;
  204. if (freq == 108000)
  205. *dsrc |= 0x00030000;
  206. return freq;
  207. case 100000:
  208. *dsrc = 0x00000002;
  209. return freq;
  210. default:
  211. *dsrc = 0x00000003;
  212. break;
  213. }
  214. /* otherwise, calculate the closest divider */
  215. sclk = read_vco(clk, 0x137160 + (idx * 4));
  216. if (idx < 7)
  217. sclk = calc_div(clk, idx, sclk, freq, ddiv);
  218. return sclk;
  219. }
  220. static u32
  221. calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
  222. {
  223. struct nvkm_subdev *subdev = &clk->base.subdev;
  224. struct nvkm_bios *bios = subdev->device->bios;
  225. struct nvbios_pll limits;
  226. int N, M, P, ret;
  227. ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
  228. if (ret)
  229. return 0;
  230. limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
  231. if (!limits.refclk)
  232. return 0;
  233. ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
  234. if (ret <= 0)
  235. return 0;
  236. *coef = (P << 16) | (N << 8) | M;
  237. return ret;
  238. }
  239. static int
  240. calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
  241. {
  242. struct gf100_clk_info *info = &clk->eng[idx];
  243. u32 freq = cstate->domain[dom];
  244. u32 src0, div0, div1D, div1P = 0;
  245. u32 clk0, clk1 = 0;
  246. /* invalid clock domain */
  247. if (!freq)
  248. return 0;
  249. /* first possible path, using only dividers */
  250. clk0 = calc_src(clk, idx, freq, &src0, &div0);
  251. clk0 = calc_div(clk, idx, clk0, freq, &div1D);
  252. /* see if we can get any closer using PLLs */
  253. if (clk0 != freq && (0x00004387 & (1 << idx))) {
  254. if (idx <= 7)
  255. clk1 = calc_pll(clk, idx, freq, &info->coef);
  256. else
  257. clk1 = cstate->domain[nv_clk_src_hubk06];
  258. clk1 = calc_div(clk, idx, clk1, freq, &div1P);
  259. }
  260. /* select the method which gets closest to target freq */
  261. if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
  262. info->dsrc = src0;
  263. if (div0) {
  264. info->ddiv |= 0x80000000;
  265. info->ddiv |= div0 << 8;
  266. info->ddiv |= div0;
  267. }
  268. if (div1D) {
  269. info->mdiv |= 0x80000000;
  270. info->mdiv |= div1D;
  271. }
  272. info->ssel = info->coef = 0;
  273. info->freq = clk0;
  274. } else {
  275. if (div1P) {
  276. info->mdiv |= 0x80000000;
  277. info->mdiv |= div1P << 8;
  278. }
  279. info->ssel = (1 << idx);
  280. info->freq = clk1;
  281. }
  282. return 0;
  283. }
  284. static int
  285. gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
  286. {
  287. struct gf100_clk *clk = gf100_clk(base);
  288. int ret;
  289. if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
  290. (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
  291. (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
  292. (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
  293. (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
  294. (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
  295. (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
  296. (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
  297. return ret;
  298. return 0;
  299. }
  300. static void
  301. gf100_clk_prog_0(struct gf100_clk *clk, int idx)
  302. {
  303. struct gf100_clk_info *info = &clk->eng[idx];
  304. struct nvkm_device *device = clk->base.subdev.device;
  305. if (idx < 7 && !info->ssel) {
  306. nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
  307. nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
  308. }
  309. }
  310. static void
  311. gf100_clk_prog_1(struct gf100_clk *clk, int idx)
  312. {
  313. struct nvkm_device *device = clk->base.subdev.device;
  314. nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
  315. nvkm_msec(device, 2000,
  316. if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
  317. break;
  318. );
  319. }
  320. static void
  321. gf100_clk_prog_2(struct gf100_clk *clk, int idx)
  322. {
  323. struct gf100_clk_info *info = &clk->eng[idx];
  324. struct nvkm_device *device = clk->base.subdev.device;
  325. const u32 addr = 0x137000 + (idx * 0x20);
  326. if (idx <= 7) {
  327. nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
  328. nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
  329. if (info->coef) {
  330. nvkm_wr32(device, addr + 0x04, info->coef);
  331. nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
  332. /* Test PLL lock */
  333. nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
  334. nvkm_msec(device, 2000,
  335. if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
  336. break;
  337. );
  338. nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
  339. /* Enable sync mode */
  340. nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
  341. }
  342. }
  343. }
  344. static void
  345. gf100_clk_prog_3(struct gf100_clk *clk, int idx)
  346. {
  347. struct gf100_clk_info *info = &clk->eng[idx];
  348. struct nvkm_device *device = clk->base.subdev.device;
  349. if (info->ssel) {
  350. nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
  351. nvkm_msec(device, 2000,
  352. u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
  353. if (tmp == info->ssel)
  354. break;
  355. );
  356. }
  357. }
  358. static void
  359. gf100_clk_prog_4(struct gf100_clk *clk, int idx)
  360. {
  361. struct gf100_clk_info *info = &clk->eng[idx];
  362. struct nvkm_device *device = clk->base.subdev.device;
  363. nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
  364. }
  365. static int
  366. gf100_clk_prog(struct nvkm_clk *base)
  367. {
  368. struct gf100_clk *clk = gf100_clk(base);
  369. struct {
  370. void (*exec)(struct gf100_clk *, int);
  371. } stage[] = {
  372. { gf100_clk_prog_0 }, /* div programming */
  373. { gf100_clk_prog_1 }, /* select div mode */
  374. { gf100_clk_prog_2 }, /* (maybe) program pll */
  375. { gf100_clk_prog_3 }, /* (maybe) select pll mode */
  376. { gf100_clk_prog_4 }, /* final divider */
  377. };
  378. int i, j;
  379. for (i = 0; i < ARRAY_SIZE(stage); i++) {
  380. for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
  381. if (!clk->eng[j].freq)
  382. continue;
  383. stage[i].exec(clk, j);
  384. }
  385. }
  386. return 0;
  387. }
  388. static void
  389. gf100_clk_tidy(struct nvkm_clk *base)
  390. {
  391. struct gf100_clk *clk = gf100_clk(base);
  392. memset(clk->eng, 0x00, sizeof(clk->eng));
  393. }
  394. static const struct nvkm_clk_func
  395. gf100_clk = {
  396. .read = gf100_clk_read,
  397. .calc = gf100_clk_calc,
  398. .prog = gf100_clk_prog,
  399. .tidy = gf100_clk_tidy,
  400. .domains = {
  401. { nv_clk_src_crystal, 0xff },
  402. { nv_clk_src_href , 0xff },
  403. { nv_clk_src_hubk06 , 0x00 },
  404. { nv_clk_src_hubk01 , 0x01 },
  405. { nv_clk_src_copy , 0x02 },
  406. { nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
  407. { nv_clk_src_rop , 0x04 },
  408. { nv_clk_src_mem , 0x05, 0, "memory", 1000 },
  409. { nv_clk_src_vdec , 0x06 },
  410. { nv_clk_src_pmu , 0x0a },
  411. { nv_clk_src_hubk07 , 0x0b },
  412. { nv_clk_src_max }
  413. }
  414. };
  415. int
  416. gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
  417. {
  418. struct gf100_clk *clk;
  419. if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
  420. return -ENOMEM;
  421. *pclk = &clk->base;
  422. return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
  423. }