/drivers/video/tegra/host/host1x/host1x_channel.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t · C · 680 lines · 515 code · 100 blank · 65 comment · 62 complexity · fd4cf0d8c61982f695620d4f1a9c4119 MD5 · raw file

  1. /*
  2. * drivers/video/tegra/host/host1x/channel_host1x.c
  3. *
  4. * Tegra Graphics Host Channel
  5. *
  6. * Copyright (c) 2010-2012, NVIDIA Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "nvhost_channel.h"
  21. #include "dev.h"
  22. #include "nvhost_acm.h"
  23. #include "nvhost_job.h"
  24. #include "nvhost_hwctx.h"
  25. #include <trace/events/nvhost.h>
  26. #include <linux/slab.h>
  27. #include "host1x_hwctx.h"
  28. #include "nvhost_intr.h"
  29. #define NV_FIFO_READ_TIMEOUT 200000
  30. static int host1x_drain_read_fifo(struct nvhost_channel *ch,
  31. u32 *ptr, unsigned int count, unsigned int *pending);
  32. static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val)
  33. {
  34. unsigned long waitbase;
  35. unsigned long int waitbase_mask = ch->dev->waitbases;
  36. if (ch->dev->waitbasesync) {
  37. waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG);
  38. nvhost_cdma_push(&ch->cdma,
  39. nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
  40. host1x_uclass_load_syncpt_base_r(),
  41. 1),
  42. nvhost_class_host_load_syncpt_base(waitbase,
  43. syncpt_val));
  44. }
  45. }
  46. static void *pre_submit_ctxsave(struct nvhost_job *job,
  47. struct nvhost_hwctx *cur_ctx)
  48. {
  49. struct nvhost_channel *ch = job->ch;
  50. void *ctxsave_waiter = NULL;
  51. /* Is a save needed? */
  52. if (!cur_ctx || ch->cur_ctx == job->hwctx)
  53. return NULL;
  54. if (cur_ctx->has_timedout) {
  55. dev_dbg(&ch->dev->dev,
  56. "%s: skip save of timed out context (0x%p)\n",
  57. __func__, ch->cur_ctx);
  58. return NULL;
  59. }
  60. /* Allocate save waiter if needed */
  61. if (ch->ctxhandler->save_service) {
  62. ctxsave_waiter = nvhost_intr_alloc_waiter();
  63. if (!ctxsave_waiter)
  64. return ERR_PTR(-ENOMEM);
  65. }
  66. return ctxsave_waiter;
  67. }
  68. static void submit_ctxsave(struct nvhost_job *job, void *ctxsave_waiter,
  69. struct nvhost_hwctx *cur_ctx)
  70. {
  71. struct nvhost_master *host = nvhost_get_host(job->ch->dev);
  72. struct nvhost_channel *ch = job->ch;
  73. u32 syncval;
  74. int err;
  75. u32 save_thresh = 0;
  76. /* Is a save needed? */
  77. if (!cur_ctx || cur_ctx == job->hwctx || cur_ctx->has_timedout)
  78. return;
  79. /* Retrieve save threshold if we have a waiter */
  80. if (ctxsave_waiter)
  81. save_thresh =
  82. nvhost_syncpt_read_max(&host->syncpt, job->syncpt_id)
  83. + to_host1x_hwctx(cur_ctx)->save_thresh;
  84. /* Adjust the syncpoint max */
  85. job->syncpt_incrs += to_host1x_hwctx(cur_ctx)->save_incrs;
  86. syncval = nvhost_syncpt_incr_max(&host->syncpt,
  87. job->syncpt_id,
  88. to_host1x_hwctx(cur_ctx)->save_incrs);
  89. /* Send the save to channel */
  90. cur_ctx->valid = true;
  91. ch->ctxhandler->save_push(cur_ctx, &ch->cdma);
  92. nvhost_job_get_hwctx(job, cur_ctx);
  93. /* Notify save service */
  94. if (ctxsave_waiter) {
  95. err = nvhost_intr_add_action(&host->intr,
  96. job->syncpt_id,
  97. save_thresh,
  98. NVHOST_INTR_ACTION_CTXSAVE, cur_ctx,
  99. ctxsave_waiter,
  100. NULL);
  101. ctxsave_waiter = NULL;
  102. WARN(err, "Failed to set ctx save interrupt");
  103. }
  104. trace_nvhost_channel_context_save(ch->dev->name, cur_ctx);
  105. }
  106. static void submit_ctxrestore(struct nvhost_job *job)
  107. {
  108. struct nvhost_master *host = nvhost_get_host(job->ch->dev);
  109. struct nvhost_channel *ch = job->ch;
  110. u32 syncval;
  111. struct host1x_hwctx *ctx =
  112. job->hwctx ? to_host1x_hwctx(job->hwctx) : NULL;
  113. /* First check if we have a valid context to restore */
  114. if(ch->cur_ctx == job->hwctx || !job->hwctx || !job->hwctx->valid)
  115. return;
  116. /* Increment syncpt max */
  117. job->syncpt_incrs += ctx->restore_incrs;
  118. syncval = nvhost_syncpt_incr_max(&host->syncpt,
  119. job->syncpt_id,
  120. ctx->restore_incrs);
  121. /* Send restore buffer to channel */
  122. nvhost_cdma_push_gather(&ch->cdma,
  123. host->memmgr,
  124. ctx->restore,
  125. 0,
  126. nvhost_opcode_gather(ctx->restore_size),
  127. ctx->restore_phys);
  128. trace_nvhost_channel_context_restore(ch->dev->name, &ctx->hwctx);
  129. }
  130. static void submit_nullkickoff(struct nvhost_job *job, int user_syncpt_incrs)
  131. {
  132. struct nvhost_channel *ch = job->ch;
  133. int incr;
  134. u32 op_incr;
  135. /* push increments that correspond to nulled out commands */
  136. op_incr = nvhost_opcode_imm_incr_syncpt(
  137. host1x_uclass_incr_syncpt_cond_op_done_v(),
  138. job->syncpt_id);
  139. for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++)
  140. nvhost_cdma_push(&ch->cdma, op_incr, op_incr);
  141. if (user_syncpt_incrs & 1)
  142. nvhost_cdma_push(&ch->cdma, op_incr, NVHOST_OPCODE_NOOP);
  143. /* for 3d, waitbase needs to be incremented after each submit */
  144. if (ch->dev->class == NV_GRAPHICS_3D_CLASS_ID) {
  145. u32 waitbase = to_host1x_hwctx_handler(job->hwctx->h)->waitbase;
  146. nvhost_cdma_push(&ch->cdma,
  147. nvhost_opcode_setclass(
  148. NV_HOST1X_CLASS_ID,
  149. host1x_uclass_incr_syncpt_base_r(),
  150. 1),
  151. nvhost_class_host_incr_syncpt_base(
  152. waitbase,
  153. user_syncpt_incrs));
  154. }
  155. }
  156. static void submit_gathers(struct nvhost_job *job)
  157. {
  158. /* push user gathers */
  159. int i;
  160. for (i = 0 ; i < job->num_gathers; i++) {
  161. u32 op1 = nvhost_opcode_gather(job->gathers[i].words);
  162. u32 op2 = job->gathers[i].mem;
  163. nvhost_cdma_push_gather(&job->ch->cdma,
  164. job->memmgr,
  165. job->gathers[i].ref,
  166. job->gathers[i].offset,
  167. op1, op2);
  168. }
  169. }
  170. static int host1x_channel_submit(struct nvhost_job *job)
  171. {
  172. struct nvhost_channel *ch = job->ch;
  173. struct nvhost_syncpt *sp = &nvhost_get_host(job->ch->dev)->syncpt;
  174. u32 user_syncpt_incrs = job->syncpt_incrs;
  175. u32 prev_max = 0;
  176. u32 syncval;
  177. int err;
  178. void *completed_waiter = NULL, *ctxsave_waiter = NULL;
  179. struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
  180. /* Bail out on timed out contexts */
  181. if (job->hwctx && job->hwctx->has_timedout)
  182. return -ETIMEDOUT;
  183. /* Turn on the client module and host1x */
  184. nvhost_module_busy(ch->dev);
  185. if (drv->busy)
  186. drv->busy(ch->dev);
  187. /* before error checks, return current max */
  188. prev_max = job->syncpt_end =
  189. nvhost_syncpt_read_max(sp, job->syncpt_id);
  190. /* get submit lock */
  191. err = mutex_lock_interruptible(&ch->submitlock);
  192. if (err) {
  193. nvhost_module_idle(ch->dev);
  194. goto error;
  195. }
  196. /* Do the needed allocations */
  197. ctxsave_waiter = pre_submit_ctxsave(job, ch->cur_ctx);
  198. if (IS_ERR(ctxsave_waiter)) {
  199. err = PTR_ERR(ctxsave_waiter);
  200. nvhost_module_idle(ch->dev);
  201. mutex_unlock(&ch->submitlock);
  202. goto error;
  203. }
  204. completed_waiter = nvhost_intr_alloc_waiter();
  205. if (!completed_waiter) {
  206. nvhost_module_idle(ch->dev);
  207. mutex_unlock(&ch->submitlock);
  208. err = -ENOMEM;
  209. goto error;
  210. }
  211. /* begin a CDMA submit */
  212. err = nvhost_cdma_begin(&ch->cdma, job);
  213. if (err) {
  214. mutex_unlock(&ch->submitlock);
  215. nvhost_module_idle(ch->dev);
  216. goto error;
  217. }
  218. if (ch->dev->serialize) {
  219. /* Force serialization by inserting a host wait for the
  220. * previous job to finish before this one can commence. */
  221. nvhost_cdma_push(&ch->cdma,
  222. nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
  223. host1x_uclass_wait_syncpt_r(),
  224. 1),
  225. nvhost_class_host_wait_syncpt(job->syncpt_id,
  226. nvhost_syncpt_read_max(sp,
  227. job->syncpt_id)));
  228. }
  229. submit_ctxsave(job, ctxsave_waiter, ch->cur_ctx);
  230. submit_ctxrestore(job);
  231. ch->cur_ctx = job->hwctx;
  232. syncval = nvhost_syncpt_incr_max(sp,
  233. job->syncpt_id, user_syncpt_incrs);
  234. job->syncpt_end = syncval;
  235. /* add a setclass for modules that require it */
  236. if (ch->dev->class)
  237. nvhost_cdma_push(&ch->cdma,
  238. nvhost_opcode_setclass(ch->dev->class, 0, 0),
  239. NVHOST_OPCODE_NOOP);
  240. if (job->null_kickoff)
  241. submit_nullkickoff(job, user_syncpt_incrs);
  242. else
  243. submit_gathers(job);
  244. sync_waitbases(ch, job->syncpt_end);
  245. /* end CDMA submit & stash pinned hMems into sync queue */
  246. nvhost_cdma_end(&ch->cdma, job);
  247. trace_nvhost_channel_submitted(ch->dev->name,
  248. prev_max, syncval);
  249. /* schedule a submit complete interrupt */
  250. err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
  251. job->syncpt_id, syncval,
  252. NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ch,
  253. completed_waiter,
  254. NULL);
  255. completed_waiter = NULL;
  256. WARN(err, "Failed to set submit complete interrupt");
  257. mutex_unlock(&ch->submitlock);
  258. return 0;
  259. error:
  260. kfree(ctxsave_waiter);
  261. kfree(completed_waiter);
  262. return err;
  263. }
  264. static int host1x_channel_read_3d_reg(
  265. struct nvhost_channel *channel,
  266. struct nvhost_hwctx *hwctx,
  267. u32 offset,
  268. u32 *value)
  269. {
  270. struct host1x_hwctx *hwctx_to_save = NULL;
  271. struct nvhost_hwctx_handler *h = hwctx->h;
  272. struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
  273. bool need_restore = false;
  274. u32 syncpt_incrs = 4;
  275. unsigned int pending = 0;
  276. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
  277. void *ref;
  278. void *ctx_waiter, *read_waiter, *completed_waiter;
  279. struct nvhost_job *job;
  280. u32 syncval;
  281. int err;
  282. if (hwctx && hwctx->has_timedout)
  283. return -ETIMEDOUT;
  284. ctx_waiter = nvhost_intr_alloc_waiter();
  285. read_waiter = nvhost_intr_alloc_waiter();
  286. completed_waiter = nvhost_intr_alloc_waiter();
  287. if (!ctx_waiter || !read_waiter || !completed_waiter) {
  288. err = -ENOMEM;
  289. goto done;
  290. }
  291. job = nvhost_job_alloc(channel, hwctx,
  292. NULL,
  293. nvhost_get_host(channel->dev)->memmgr, 0, 0);
  294. if (!job) {
  295. err = -ENOMEM;
  296. goto done;
  297. }
  298. /* keep module powered */
  299. nvhost_module_busy(channel->dev);
  300. /* get submit lock */
  301. err = mutex_lock_interruptible(&channel->submitlock);
  302. if (err) {
  303. nvhost_module_idle(channel->dev);
  304. return err;
  305. }
  306. /* context switch */
  307. if (channel->cur_ctx != hwctx) {
  308. hwctx_to_save = channel->cur_ctx ?
  309. to_host1x_hwctx(channel->cur_ctx) : NULL;
  310. if (hwctx_to_save) {
  311. syncpt_incrs += hwctx_to_save->save_incrs;
  312. hwctx_to_save->hwctx.valid = true;
  313. channel->ctxhandler->get(&hwctx_to_save->hwctx);
  314. }
  315. channel->cur_ctx = hwctx;
  316. if (channel->cur_ctx && channel->cur_ctx->valid) {
  317. need_restore = true;
  318. syncpt_incrs += to_host1x_hwctx(channel->cur_ctx)
  319. ->restore_incrs;
  320. }
  321. }
  322. syncval = nvhost_syncpt_incr_max(&nvhost_get_host(channel->dev)->syncpt,
  323. p->syncpt, syncpt_incrs);
  324. job->syncpt_id = p->syncpt;
  325. job->syncpt_incrs = syncpt_incrs;
  326. job->syncpt_end = syncval;
  327. /* begin a CDMA submit */
  328. nvhost_cdma_begin(&channel->cdma, job);
  329. /* push save buffer (pre-gather setup depends on unit) */
  330. if (hwctx_to_save)
  331. h->save_push(&hwctx_to_save->hwctx, &channel->cdma);
  332. /* gather restore buffer */
  333. if (need_restore)
  334. nvhost_cdma_push(&channel->cdma,
  335. nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx)
  336. ->restore_size),
  337. to_host1x_hwctx(channel->cur_ctx)->restore_phys);
  338. /* Switch to 3D - wait for it to complete what it was doing */
  339. nvhost_cdma_push(&channel->cdma,
  340. nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
  341. nvhost_opcode_imm_incr_syncpt(
  342. host1x_uclass_incr_syncpt_cond_op_done_v(),
  343. p->syncpt));
  344. nvhost_cdma_push(&channel->cdma,
  345. nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
  346. host1x_uclass_wait_syncpt_base_r(), 1),
  347. nvhost_class_host_wait_syncpt_base(p->syncpt,
  348. p->waitbase, 1));
  349. /* Tell 3D to send register value to FIFO */
  350. nvhost_cdma_push(&channel->cdma,
  351. nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1),
  352. nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
  353. offset, false));
  354. nvhost_cdma_push(&channel->cdma,
  355. nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
  356. NVHOST_OPCODE_NOOP);
  357. /* Increment syncpt to indicate that FIFO can be read */
  358. nvhost_cdma_push(&channel->cdma,
  359. nvhost_opcode_imm_incr_syncpt(
  360. host1x_uclass_incr_syncpt_cond_immediate_v(),
  361. p->syncpt),
  362. NVHOST_OPCODE_NOOP);
  363. /* Wait for value to be read from FIFO */
  364. nvhost_cdma_push(&channel->cdma,
  365. nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1),
  366. nvhost_class_host_wait_syncpt_base(p->syncpt,
  367. p->waitbase, 3));
  368. /* Indicate submit complete */
  369. nvhost_cdma_push(&channel->cdma,
  370. nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1),
  371. nvhost_class_host_incr_syncpt_base(p->waitbase, 4));
  372. nvhost_cdma_push(&channel->cdma,
  373. NVHOST_OPCODE_NOOP,
  374. nvhost_opcode_imm_incr_syncpt(
  375. host1x_uclass_incr_syncpt_cond_immediate_v(),
  376. p->syncpt));
  377. /* end CDMA submit */
  378. nvhost_cdma_end(&channel->cdma, job);
  379. nvhost_job_put(job);
  380. job = NULL;
  381. /*
  382. * schedule a context save interrupt (to drain the host FIFO
  383. * if necessary, and to release the restore buffer)
  384. */
  385. if (hwctx_to_save) {
  386. err = nvhost_intr_add_action(
  387. &nvhost_get_host(channel->dev)->intr,
  388. p->syncpt,
  389. syncval - syncpt_incrs
  390. + hwctx_to_save->save_incrs
  391. - 1,
  392. NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
  393. ctx_waiter,
  394. NULL);
  395. ctx_waiter = NULL;
  396. WARN(err, "Failed to set context save interrupt");
  397. }
  398. /* Wait for FIFO to be ready */
  399. err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
  400. p->syncpt, syncval - 2,
  401. NVHOST_INTR_ACTION_WAKEUP, &wq,
  402. read_waiter,
  403. &ref);
  404. read_waiter = NULL;
  405. WARN(err, "Failed to set wakeup interrupt");
  406. wait_event(wq,
  407. nvhost_syncpt_is_expired(&nvhost_get_host(channel->dev)->syncpt,
  408. p->syncpt, syncval - 2));
  409. nvhost_intr_put_ref(&nvhost_get_host(channel->dev)->intr, ref);
  410. /* Read the register value from FIFO */
  411. err = host1x_drain_read_fifo(channel, value, 1, &pending);
  412. /* Indicate we've read the value */
  413. nvhost_syncpt_cpu_incr(&nvhost_get_host(channel->dev)->syncpt,
  414. p->syncpt);
  415. /* Schedule a submit complete interrupt */
  416. err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
  417. p->syncpt, syncval,
  418. NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
  419. completed_waiter, NULL);
  420. completed_waiter = NULL;
  421. WARN(err, "Failed to set submit complete interrupt");
  422. mutex_unlock(&channel->submitlock);
  423. done:
  424. kfree(ctx_waiter);
  425. kfree(read_waiter);
  426. kfree(completed_waiter);
  427. return err;
  428. }
  429. static int host1x_drain_read_fifo(struct nvhost_channel *ch,
  430. u32 *ptr, unsigned int count, unsigned int *pending)
  431. {
  432. unsigned int entries = *pending;
  433. unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT;
  434. void __iomem *chan_regs = ch->aperture;
  435. while (count) {
  436. unsigned int num;
  437. while (!entries && time_before(jiffies, timeout)) {
  438. /* query host for number of entries in fifo */
  439. entries = host1x_channel_fifostat_outfentries_v(
  440. readl(chan_regs + host1x_channel_fifostat_r()));
  441. if (!entries)
  442. cpu_relax();
  443. }
  444. /* timeout -> return error */
  445. if (!entries)
  446. return -EIO;
  447. num = min(entries, count);
  448. entries -= num;
  449. count -= num;
  450. while (num & ~0x3) {
  451. u32 arr[4];
  452. arr[0] = readl(chan_regs + host1x_channel_inddata_r());
  453. arr[1] = readl(chan_regs + host1x_channel_inddata_r());
  454. arr[2] = readl(chan_regs + host1x_channel_inddata_r());
  455. arr[3] = readl(chan_regs + host1x_channel_inddata_r());
  456. memcpy(ptr, arr, 4*sizeof(u32));
  457. ptr += 4;
  458. num -= 4;
  459. }
  460. while (num--)
  461. *ptr++ = readl(chan_regs + host1x_channel_inddata_r());
  462. }
  463. *pending = entries;
  464. return 0;
  465. }
  466. static int host1x_save_context(struct nvhost_channel *ch)
  467. {
  468. struct nvhost_device *dev = ch->dev;
  469. struct nvhost_hwctx *hwctx_to_save;
  470. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
  471. u32 syncpt_incrs, syncpt_val;
  472. int err = 0;
  473. void *ref;
  474. void *ctx_waiter = NULL, *wakeup_waiter = NULL;
  475. struct nvhost_job *job;
  476. struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
  477. u32 syncpt_id;
  478. ctx_waiter = nvhost_intr_alloc_waiter();
  479. wakeup_waiter = nvhost_intr_alloc_waiter();
  480. if (!ctx_waiter || !wakeup_waiter) {
  481. err = -ENOMEM;
  482. goto done;
  483. }
  484. if (drv->busy)
  485. drv->busy(dev);
  486. mutex_lock(&ch->submitlock);
  487. hwctx_to_save = ch->cur_ctx;
  488. if (!hwctx_to_save) {
  489. mutex_unlock(&ch->submitlock);
  490. goto done;
  491. }
  492. job = nvhost_job_alloc(ch, hwctx_to_save,
  493. NULL,
  494. nvhost_get_host(ch->dev)->memmgr, 0, 0);
  495. if (IS_ERR_OR_NULL(job)) {
  496. err = PTR_ERR(job);
  497. mutex_unlock(&ch->submitlock);
  498. goto done;
  499. }
  500. hwctx_to_save->valid = true;
  501. ch->cur_ctx = NULL;
  502. syncpt_id = to_host1x_hwctx_handler(hwctx_to_save->h)->syncpt;
  503. syncpt_incrs = to_host1x_hwctx(hwctx_to_save)->save_incrs;
  504. syncpt_val = nvhost_syncpt_incr_max(&nvhost_get_host(ch->dev)->syncpt,
  505. syncpt_id, syncpt_incrs);
  506. job->syncpt_id = syncpt_id;
  507. job->syncpt_incrs = syncpt_incrs;
  508. job->syncpt_end = syncpt_val;
  509. err = nvhost_cdma_begin(&ch->cdma, job);
  510. if (err) {
  511. mutex_unlock(&ch->submitlock);
  512. goto done;
  513. }
  514. ch->ctxhandler->save_push(hwctx_to_save, &ch->cdma);
  515. nvhost_cdma_end(&ch->cdma, job);
  516. nvhost_job_put(job);
  517. job = NULL;
  518. err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, syncpt_id,
  519. syncpt_val - syncpt_incrs +
  520. to_host1x_hwctx(hwctx_to_save)->save_thresh,
  521. NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
  522. ctx_waiter,
  523. NULL);
  524. ctx_waiter = NULL;
  525. WARN(err, "Failed to set context save interrupt");
  526. err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
  527. syncpt_id, syncpt_val,
  528. NVHOST_INTR_ACTION_WAKEUP, &wq,
  529. wakeup_waiter,
  530. &ref);
  531. wakeup_waiter = NULL;
  532. WARN(err, "Failed to set wakeup interrupt");
  533. wait_event(wq,
  534. nvhost_syncpt_is_expired(&nvhost_get_host(ch->dev)->syncpt,
  535. syncpt_id, syncpt_val));
  536. nvhost_intr_put_ref(&nvhost_get_host(ch->dev)->intr, ref);
  537. nvhost_cdma_update(&ch->cdma);
  538. mutex_unlock(&ch->submitlock);
  539. done:
  540. kfree(ctx_waiter);
  541. kfree(wakeup_waiter);
  542. return err;
  543. }
  544. static inline void __iomem *host1x_channel_aperture(void __iomem *p, int ndx)
  545. {
  546. p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
  547. return p;
  548. }
  549. static inline int host1x_hwctx_handler_init(struct nvhost_channel *ch)
  550. {
  551. int err = 0;
  552. unsigned long syncpts = ch->dev->syncpts;
  553. unsigned long waitbases = ch->dev->waitbases;
  554. u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG);
  555. u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG);
  556. struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
  557. if (drv->alloc_hwctx_handler) {
  558. ch->ctxhandler = drv->alloc_hwctx_handler(syncpt,
  559. waitbase, ch);
  560. if (!ch->ctxhandler)
  561. err = -ENOMEM;
  562. }
  563. return err;
  564. }
  565. static int host1x_channel_init(struct nvhost_channel *ch,
  566. struct nvhost_master *dev, int index)
  567. {
  568. ch->chid = index;
  569. mutex_init(&ch->reflock);
  570. mutex_init(&ch->submitlock);
  571. ch->aperture = host1x_channel_aperture(dev->aperture, index);
  572. return host1x_hwctx_handler_init(ch);
  573. }
  574. static const struct nvhost_channel_ops host1x_channel_ops = {
  575. .init = host1x_channel_init,
  576. .submit = host1x_channel_submit,
  577. .read3dreg = host1x_channel_read_3d_reg,
  578. .save_context = host1x_save_context,
  579. .drain_read_fifo = host1x_drain_read_fifo,
  580. };