PageRenderTime 29ms CodeModel.GetById 4ms app.highlight 20ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/video/tegra/host/host1x/host1x_channel.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t
C | 680 lines | 515 code | 100 blank | 65 comment | 62 complexity | fd4cf0d8c61982f695620d4f1a9c4119 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1/*
  2 * drivers/video/tegra/host/host1x/channel_host1x.c
  3 *
  4 * Tegra Graphics Host Channel
  5 *
  6 * Copyright (c) 2010-2012, NVIDIA Corporation.
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms and conditions of the GNU General Public License,
 10 * version 2, as published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope it will be useful, but WITHOUT
 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 15 * more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 19 */
 20
 21#include "nvhost_channel.h"
 22#include "dev.h"
 23#include "nvhost_acm.h"
 24#include "nvhost_job.h"
 25#include "nvhost_hwctx.h"
 26#include <trace/events/nvhost.h>
 27#include <linux/slab.h>
 28
 29#include "host1x_hwctx.h"
 30#include "nvhost_intr.h"
 31
 32#define NV_FIFO_READ_TIMEOUT 200000
 33
 34static int host1x_drain_read_fifo(struct nvhost_channel *ch,
 35	u32 *ptr, unsigned int count, unsigned int *pending);
 36
 37static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val)
 38{
 39	unsigned long waitbase;
 40	unsigned long int waitbase_mask = ch->dev->waitbases;
 41	if (ch->dev->waitbasesync) {
 42		waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG);
 43		nvhost_cdma_push(&ch->cdma,
 44			nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
 45				host1x_uclass_load_syncpt_base_r(),
 46				1),
 47				nvhost_class_host_load_syncpt_base(waitbase,
 48						syncpt_val));
 49	}
 50}
 51
 52static void *pre_submit_ctxsave(struct nvhost_job *job,
 53		struct nvhost_hwctx *cur_ctx)
 54{
 55	struct nvhost_channel *ch = job->ch;
 56	void *ctxsave_waiter = NULL;
 57
 58	/* Is a save needed? */
 59	if (!cur_ctx || ch->cur_ctx == job->hwctx)
 60		return NULL;
 61
 62	if (cur_ctx->has_timedout) {
 63		dev_dbg(&ch->dev->dev,
 64			"%s: skip save of timed out context (0x%p)\n",
 65			__func__, ch->cur_ctx);
 66
 67		return NULL;
 68	}
 69
 70	/* Allocate save waiter if needed */
 71	if (ch->ctxhandler->save_service) {
 72		ctxsave_waiter = nvhost_intr_alloc_waiter();
 73		if (!ctxsave_waiter)
 74			return ERR_PTR(-ENOMEM);
 75	}
 76
 77	return ctxsave_waiter;
 78}
 79
 80static void submit_ctxsave(struct nvhost_job *job, void *ctxsave_waiter,
 81		struct nvhost_hwctx *cur_ctx)
 82{
 83	struct nvhost_master *host = nvhost_get_host(job->ch->dev);
 84	struct nvhost_channel *ch = job->ch;
 85	u32 syncval;
 86	int err;
 87	u32 save_thresh = 0;
 88
 89	/* Is a save needed? */
 90	if (!cur_ctx || cur_ctx == job->hwctx || cur_ctx->has_timedout)
 91		return;
 92
 93	/* Retrieve save threshold if we have a waiter */
 94	if (ctxsave_waiter)
 95		save_thresh =
 96			nvhost_syncpt_read_max(&host->syncpt, job->syncpt_id)
 97			+ to_host1x_hwctx(cur_ctx)->save_thresh;
 98
 99	/* Adjust the syncpoint max */
100	job->syncpt_incrs += to_host1x_hwctx(cur_ctx)->save_incrs;
101	syncval = nvhost_syncpt_incr_max(&host->syncpt,
102			job->syncpt_id,
103			to_host1x_hwctx(cur_ctx)->save_incrs);
104
105	/* Send the save to channel */
106	cur_ctx->valid = true;
107	ch->ctxhandler->save_push(cur_ctx, &ch->cdma);
108	nvhost_job_get_hwctx(job, cur_ctx);
109
110	/* Notify save service */
111	if (ctxsave_waiter) {
112		err = nvhost_intr_add_action(&host->intr,
113			job->syncpt_id,
114			save_thresh,
115			NVHOST_INTR_ACTION_CTXSAVE, cur_ctx,
116			ctxsave_waiter,
117			NULL);
118		ctxsave_waiter = NULL;
119		WARN(err, "Failed to set ctx save interrupt");
120	}
121
122	trace_nvhost_channel_context_save(ch->dev->name, cur_ctx);
123}
124
125static void submit_ctxrestore(struct nvhost_job *job)
126{
127	struct nvhost_master *host = nvhost_get_host(job->ch->dev);
128	struct nvhost_channel *ch = job->ch;
129	u32 syncval;
130	struct host1x_hwctx *ctx =
131		job->hwctx ? to_host1x_hwctx(job->hwctx) : NULL;
132
133	/* First check if we have a valid context to restore */
134	if(ch->cur_ctx == job->hwctx || !job->hwctx || !job->hwctx->valid)
135		return;
136
137	/* Increment syncpt max */
138	job->syncpt_incrs += ctx->restore_incrs;
139	syncval = nvhost_syncpt_incr_max(&host->syncpt,
140			job->syncpt_id,
141			ctx->restore_incrs);
142
143	/* Send restore buffer to channel */
144	nvhost_cdma_push_gather(&ch->cdma,
145		host->memmgr,
146		ctx->restore,
147		0,
148		nvhost_opcode_gather(ctx->restore_size),
149		ctx->restore_phys);
150
151	trace_nvhost_channel_context_restore(ch->dev->name, &ctx->hwctx);
152}
153
154static void submit_nullkickoff(struct nvhost_job *job, int user_syncpt_incrs)
155{
156	struct nvhost_channel *ch = job->ch;
157	int incr;
158	u32 op_incr;
159
160	/* push increments that correspond to nulled out commands */
161	op_incr = nvhost_opcode_imm_incr_syncpt(
162			host1x_uclass_incr_syncpt_cond_op_done_v(),
163			job->syncpt_id);
164	for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++)
165		nvhost_cdma_push(&ch->cdma, op_incr, op_incr);
166	if (user_syncpt_incrs & 1)
167		nvhost_cdma_push(&ch->cdma, op_incr, NVHOST_OPCODE_NOOP);
168
169	/* for 3d, waitbase needs to be incremented after each submit */
170	if (ch->dev->class == NV_GRAPHICS_3D_CLASS_ID) {
171		u32 waitbase = to_host1x_hwctx_handler(job->hwctx->h)->waitbase;
172		nvhost_cdma_push(&ch->cdma,
173			nvhost_opcode_setclass(
174				NV_HOST1X_CLASS_ID,
175				host1x_uclass_incr_syncpt_base_r(),
176				1),
177			nvhost_class_host_incr_syncpt_base(
178				waitbase,
179				user_syncpt_incrs));
180	}
181}
182
183static void submit_gathers(struct nvhost_job *job)
184{
185	/* push user gathers */
186	int i;
187	for (i = 0 ; i < job->num_gathers; i++) {
188		u32 op1 = nvhost_opcode_gather(job->gathers[i].words);
189		u32 op2 = job->gathers[i].mem;
190		nvhost_cdma_push_gather(&job->ch->cdma,
191				job->memmgr,
192				job->gathers[i].ref,
193				job->gathers[i].offset,
194				op1, op2);
195	}
196}
197
198static int host1x_channel_submit(struct nvhost_job *job)
199{
200	struct nvhost_channel *ch = job->ch;
201	struct nvhost_syncpt *sp = &nvhost_get_host(job->ch->dev)->syncpt;
202	u32 user_syncpt_incrs = job->syncpt_incrs;
203	u32 prev_max = 0;
204	u32 syncval;
205	int err;
206	void *completed_waiter = NULL, *ctxsave_waiter = NULL;
207	struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
208
209	/* Bail out on timed out contexts */
210	if (job->hwctx && job->hwctx->has_timedout)
211		return -ETIMEDOUT;
212
213	/* Turn on the client module and host1x */
214	nvhost_module_busy(ch->dev);
215	if (drv->busy)
216		drv->busy(ch->dev);
217
218	/* before error checks, return current max */
219	prev_max = job->syncpt_end =
220		nvhost_syncpt_read_max(sp, job->syncpt_id);
221
222	/* get submit lock */
223	err = mutex_lock_interruptible(&ch->submitlock);
224	if (err) {
225		nvhost_module_idle(ch->dev);
226		goto error;
227	}
228
229	/* Do the needed allocations */
230	ctxsave_waiter = pre_submit_ctxsave(job, ch->cur_ctx);
231	if (IS_ERR(ctxsave_waiter)) {
232		err = PTR_ERR(ctxsave_waiter);
233		nvhost_module_idle(ch->dev);
234		mutex_unlock(&ch->submitlock);
235		goto error;
236	}
237
238	completed_waiter = nvhost_intr_alloc_waiter();
239	if (!completed_waiter) {
240		nvhost_module_idle(ch->dev);
241		mutex_unlock(&ch->submitlock);
242		err = -ENOMEM;
243		goto error;
244	}
245
246	/* begin a CDMA submit */
247	err = nvhost_cdma_begin(&ch->cdma, job);
248	if (err) {
249		mutex_unlock(&ch->submitlock);
250		nvhost_module_idle(ch->dev);
251		goto error;
252	}
253
254	if (ch->dev->serialize) {
255		/* Force serialization by inserting a host wait for the
256		 * previous job to finish before this one can commence. */
257		nvhost_cdma_push(&ch->cdma,
258				nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
259					host1x_uclass_wait_syncpt_r(),
260					1),
261				nvhost_class_host_wait_syncpt(job->syncpt_id,
262					nvhost_syncpt_read_max(sp,
263						job->syncpt_id)));
264	}
265
266	submit_ctxsave(job, ctxsave_waiter, ch->cur_ctx);
267	submit_ctxrestore(job);
268	ch->cur_ctx = job->hwctx;
269
270	syncval = nvhost_syncpt_incr_max(sp,
271			job->syncpt_id, user_syncpt_incrs);
272
273	job->syncpt_end = syncval;
274
275	/* add a setclass for modules that require it */
276	if (ch->dev->class)
277		nvhost_cdma_push(&ch->cdma,
278			nvhost_opcode_setclass(ch->dev->class, 0, 0),
279			NVHOST_OPCODE_NOOP);
280
281	if (job->null_kickoff)
282		submit_nullkickoff(job, user_syncpt_incrs);
283	else
284		submit_gathers(job);
285
286	sync_waitbases(ch, job->syncpt_end);
287
288	/* end CDMA submit & stash pinned hMems into sync queue */
289	nvhost_cdma_end(&ch->cdma, job);
290
291	trace_nvhost_channel_submitted(ch->dev->name,
292			prev_max, syncval);
293
294	/* schedule a submit complete interrupt */
295	err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
296			job->syncpt_id, syncval,
297			NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ch,
298			completed_waiter,
299			NULL);
300	completed_waiter = NULL;
301	WARN(err, "Failed to set submit complete interrupt");
302
303	mutex_unlock(&ch->submitlock);
304
305	return 0;
306
307error:
308	kfree(ctxsave_waiter);
309	kfree(completed_waiter);
310	return err;
311}
312
313static int host1x_channel_read_3d_reg(
314	struct nvhost_channel *channel,
315	struct nvhost_hwctx *hwctx,
316	u32 offset,
317	u32 *value)
318{
319	struct host1x_hwctx *hwctx_to_save = NULL;
320	struct nvhost_hwctx_handler *h = hwctx->h;
321	struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
322	bool need_restore = false;
323	u32 syncpt_incrs = 4;
324	unsigned int pending = 0;
325	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
326	void *ref;
327	void *ctx_waiter, *read_waiter, *completed_waiter;
328	struct nvhost_job *job;
329	u32 syncval;
330	int err;
331
332	if (hwctx && hwctx->has_timedout)
333		return -ETIMEDOUT;
334
335	ctx_waiter = nvhost_intr_alloc_waiter();
336	read_waiter = nvhost_intr_alloc_waiter();
337	completed_waiter = nvhost_intr_alloc_waiter();
338	if (!ctx_waiter || !read_waiter || !completed_waiter) {
339		err = -ENOMEM;
340		goto done;
341	}
342
343	job = nvhost_job_alloc(channel, hwctx,
344			NULL,
345			nvhost_get_host(channel->dev)->memmgr, 0, 0);
346	if (!job) {
347		err = -ENOMEM;
348		goto done;
349	}
350
351	/* keep module powered */
352	nvhost_module_busy(channel->dev);
353
354	/* get submit lock */
355	err = mutex_lock_interruptible(&channel->submitlock);
356	if (err) {
357		nvhost_module_idle(channel->dev);
358		return err;
359	}
360
361	/* context switch */
362	if (channel->cur_ctx != hwctx) {
363		hwctx_to_save = channel->cur_ctx ?
364			to_host1x_hwctx(channel->cur_ctx) : NULL;
365		if (hwctx_to_save) {
366			syncpt_incrs += hwctx_to_save->save_incrs;
367			hwctx_to_save->hwctx.valid = true;
368			channel->ctxhandler->get(&hwctx_to_save->hwctx);
369		}
370		channel->cur_ctx = hwctx;
371		if (channel->cur_ctx && channel->cur_ctx->valid) {
372			need_restore = true;
373			syncpt_incrs += to_host1x_hwctx(channel->cur_ctx)
374				->restore_incrs;
375		}
376	}
377
378	syncval = nvhost_syncpt_incr_max(&nvhost_get_host(channel->dev)->syncpt,
379		p->syncpt, syncpt_incrs);
380
381	job->syncpt_id = p->syncpt;
382	job->syncpt_incrs = syncpt_incrs;
383	job->syncpt_end = syncval;
384
385	/* begin a CDMA submit */
386	nvhost_cdma_begin(&channel->cdma, job);
387
388	/* push save buffer (pre-gather setup depends on unit) */
389	if (hwctx_to_save)
390		h->save_push(&hwctx_to_save->hwctx, &channel->cdma);
391
392	/* gather restore buffer */
393	if (need_restore)
394		nvhost_cdma_push(&channel->cdma,
395			nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx)
396				->restore_size),
397			to_host1x_hwctx(channel->cur_ctx)->restore_phys);
398
399	/* Switch to 3D - wait for it to complete what it was doing */
400	nvhost_cdma_push(&channel->cdma,
401		nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
402		nvhost_opcode_imm_incr_syncpt(
403			host1x_uclass_incr_syncpt_cond_op_done_v(),
404			p->syncpt));
405	nvhost_cdma_push(&channel->cdma,
406		nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
407			host1x_uclass_wait_syncpt_base_r(), 1),
408		nvhost_class_host_wait_syncpt_base(p->syncpt,
409			p->waitbase, 1));
410	/*  Tell 3D to send register value to FIFO */
411	nvhost_cdma_push(&channel->cdma,
412		nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1),
413		nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
414			offset, false));
415	nvhost_cdma_push(&channel->cdma,
416		nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
417		NVHOST_OPCODE_NOOP);
418	/*  Increment syncpt to indicate that FIFO can be read */
419	nvhost_cdma_push(&channel->cdma,
420		nvhost_opcode_imm_incr_syncpt(
421			host1x_uclass_incr_syncpt_cond_immediate_v(),
422			p->syncpt),
423		NVHOST_OPCODE_NOOP);
424	/*  Wait for value to be read from FIFO */
425	nvhost_cdma_push(&channel->cdma,
426		nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1),
427		nvhost_class_host_wait_syncpt_base(p->syncpt,
428			p->waitbase, 3));
429	/*  Indicate submit complete */
430	nvhost_cdma_push(&channel->cdma,
431		nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1),
432		nvhost_class_host_incr_syncpt_base(p->waitbase, 4));
433	nvhost_cdma_push(&channel->cdma,
434		NVHOST_OPCODE_NOOP,
435		nvhost_opcode_imm_incr_syncpt(
436			host1x_uclass_incr_syncpt_cond_immediate_v(),
437			p->syncpt));
438
439	/* end CDMA submit  */
440	nvhost_cdma_end(&channel->cdma, job);
441	nvhost_job_put(job);
442	job = NULL;
443
444	/*
445	 * schedule a context save interrupt (to drain the host FIFO
446	 * if necessary, and to release the restore buffer)
447	 */
448	if (hwctx_to_save) {
449		err = nvhost_intr_add_action(
450			&nvhost_get_host(channel->dev)->intr,
451			p->syncpt,
452			syncval - syncpt_incrs
453				+ hwctx_to_save->save_incrs
454				- 1,
455			NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
456			ctx_waiter,
457			NULL);
458		ctx_waiter = NULL;
459		WARN(err, "Failed to set context save interrupt");
460	}
461
462	/* Wait for FIFO to be ready */
463	err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
464			p->syncpt, syncval - 2,
465			NVHOST_INTR_ACTION_WAKEUP, &wq,
466			read_waiter,
467			&ref);
468	read_waiter = NULL;
469	WARN(err, "Failed to set wakeup interrupt");
470	wait_event(wq,
471		nvhost_syncpt_is_expired(&nvhost_get_host(channel->dev)->syncpt,
472				p->syncpt, syncval - 2));
473	nvhost_intr_put_ref(&nvhost_get_host(channel->dev)->intr, ref);
474
475	/* Read the register value from FIFO */
476	err = host1x_drain_read_fifo(channel, value, 1, &pending);
477
478	/* Indicate we've read the value */
479	nvhost_syncpt_cpu_incr(&nvhost_get_host(channel->dev)->syncpt,
480			p->syncpt);
481
482	/* Schedule a submit complete interrupt */
483	err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
484			p->syncpt, syncval,
485			NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
486			completed_waiter, NULL);
487	completed_waiter = NULL;
488	WARN(err, "Failed to set submit complete interrupt");
489
490	mutex_unlock(&channel->submitlock);
491
492done:
493	kfree(ctx_waiter);
494	kfree(read_waiter);
495	kfree(completed_waiter);
496	return err;
497}
498
499
500static int host1x_drain_read_fifo(struct nvhost_channel *ch,
501	u32 *ptr, unsigned int count, unsigned int *pending)
502{
503	unsigned int entries = *pending;
504	unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT;
505	void __iomem *chan_regs = ch->aperture;
506	while (count) {
507		unsigned int num;
508
509		while (!entries && time_before(jiffies, timeout)) {
510			/* query host for number of entries in fifo */
511			entries = host1x_channel_fifostat_outfentries_v(
512				readl(chan_regs + host1x_channel_fifostat_r()));
513			if (!entries)
514				cpu_relax();
515		}
516
517		/*  timeout -> return error */
518		if (!entries)
519			return -EIO;
520
521		num = min(entries, count);
522		entries -= num;
523		count -= num;
524
525		while (num & ~0x3) {
526			u32 arr[4];
527			arr[0] = readl(chan_regs + host1x_channel_inddata_r());
528			arr[1] = readl(chan_regs + host1x_channel_inddata_r());
529			arr[2] = readl(chan_regs + host1x_channel_inddata_r());
530			arr[3] = readl(chan_regs + host1x_channel_inddata_r());
531			memcpy(ptr, arr, 4*sizeof(u32));
532			ptr += 4;
533			num -= 4;
534		}
535		while (num--)
536			*ptr++ = readl(chan_regs + host1x_channel_inddata_r());
537	}
538	*pending = entries;
539
540	return 0;
541}
542
543static int host1x_save_context(struct nvhost_channel *ch)
544{
545	struct nvhost_device *dev = ch->dev;
546	struct nvhost_hwctx *hwctx_to_save;
547	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
548	u32 syncpt_incrs, syncpt_val;
549	int err = 0;
550	void *ref;
551	void *ctx_waiter = NULL, *wakeup_waiter = NULL;
552	struct nvhost_job *job;
553	struct nvhost_driver *drv = to_nvhost_driver(dev->dev.driver);
554	u32 syncpt_id;
555
556	ctx_waiter = nvhost_intr_alloc_waiter();
557	wakeup_waiter = nvhost_intr_alloc_waiter();
558	if (!ctx_waiter || !wakeup_waiter) {
559		err = -ENOMEM;
560		goto done;
561	}
562
563	if (drv->busy)
564		drv->busy(dev);
565
566	mutex_lock(&ch->submitlock);
567	hwctx_to_save = ch->cur_ctx;
568	if (!hwctx_to_save) {
569		mutex_unlock(&ch->submitlock);
570		goto done;
571	}
572
573	job = nvhost_job_alloc(ch, hwctx_to_save,
574			NULL,
575			nvhost_get_host(ch->dev)->memmgr, 0, 0);
576	if (IS_ERR_OR_NULL(job)) {
577		err = PTR_ERR(job);
578		mutex_unlock(&ch->submitlock);
579		goto done;
580	}
581
582	hwctx_to_save->valid = true;
583	ch->cur_ctx = NULL;
584	syncpt_id = to_host1x_hwctx_handler(hwctx_to_save->h)->syncpt;
585
586	syncpt_incrs = to_host1x_hwctx(hwctx_to_save)->save_incrs;
587	syncpt_val = nvhost_syncpt_incr_max(&nvhost_get_host(ch->dev)->syncpt,
588					syncpt_id, syncpt_incrs);
589
590	job->syncpt_id = syncpt_id;
591	job->syncpt_incrs = syncpt_incrs;
592	job->syncpt_end = syncpt_val;
593
594	err = nvhost_cdma_begin(&ch->cdma, job);
595	if (err) {
596		mutex_unlock(&ch->submitlock);
597		goto done;
598	}
599
600	ch->ctxhandler->save_push(hwctx_to_save, &ch->cdma);
601	nvhost_cdma_end(&ch->cdma, job);
602	nvhost_job_put(job);
603	job = NULL;
604
605	err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, syncpt_id,
606			syncpt_val - syncpt_incrs +
607				to_host1x_hwctx(hwctx_to_save)->save_thresh,
608			NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
609			ctx_waiter,
610			NULL);
611	ctx_waiter = NULL;
612	WARN(err, "Failed to set context save interrupt");
613
614	err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
615			syncpt_id, syncpt_val,
616			NVHOST_INTR_ACTION_WAKEUP, &wq,
617			wakeup_waiter,
618			&ref);
619	wakeup_waiter = NULL;
620	WARN(err, "Failed to set wakeup interrupt");
621	wait_event(wq,
622		nvhost_syncpt_is_expired(&nvhost_get_host(ch->dev)->syncpt,
623				syncpt_id, syncpt_val));
624
625	nvhost_intr_put_ref(&nvhost_get_host(ch->dev)->intr, ref);
626
627	nvhost_cdma_update(&ch->cdma);
628
629	mutex_unlock(&ch->submitlock);
630
631done:
632	kfree(ctx_waiter);
633	kfree(wakeup_waiter);
634	return err;
635}
636
637static inline void __iomem *host1x_channel_aperture(void __iomem *p, int ndx)
638{
639	p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
640	return p;
641}
642
643static inline int host1x_hwctx_handler_init(struct nvhost_channel *ch)
644{
645	int err = 0;
646	unsigned long syncpts = ch->dev->syncpts;
647	unsigned long waitbases = ch->dev->waitbases;
648	u32 syncpt = find_first_bit(&syncpts, BITS_PER_LONG);
649	u32 waitbase = find_first_bit(&waitbases, BITS_PER_LONG);
650	struct nvhost_driver *drv = to_nvhost_driver(ch->dev->dev.driver);
651
652	if (drv->alloc_hwctx_handler) {
653		ch->ctxhandler = drv->alloc_hwctx_handler(syncpt,
654				waitbase, ch);
655		if (!ch->ctxhandler)
656			err = -ENOMEM;
657	}
658
659	return err;
660}
661
662static int host1x_channel_init(struct nvhost_channel *ch,
663	struct nvhost_master *dev, int index)
664{
665	ch->chid = index;
666	mutex_init(&ch->reflock);
667	mutex_init(&ch->submitlock);
668
669	ch->aperture = host1x_channel_aperture(dev->aperture, index);
670
671	return host1x_hwctx_handler_init(ch);
672}
673
674static const struct nvhost_channel_ops host1x_channel_ops = {
675	.init = host1x_channel_init,
676	.submit = host1x_channel_submit,
677	.read3dreg = host1x_channel_read_3d_reg,
678	.save_context = host1x_save_context,
679	.drain_read_fifo = host1x_drain_read_fifo,
680};