/drivers/video/omap2/dsscomp/queue.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 807 lines · 567 code · 124 blank · 116 comment · 144 complexity · 0c995781fe14bc13eaeb49080eae0339 MD5 · raw file

  1. /*
  2. * linux/drivers/video/omap2/dsscomp/queue.c
  3. *
  4. * DSS Composition queueing support
  5. *
  6. * Copyright (C) 2011 Texas Instruments, Inc
  7. * Author: Lajos Molnar <molnar@ti.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License version 2 as published by
  11. * the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/sched.h>
  24. #include <linux/slab.h>
  25. #include <linux/ratelimit.h>
  26. #include <video/omapdss.h>
  27. #include <video/dsscomp.h>
  28. #include <plat/dsscomp.h>
  29. #include <linux/debugfs.h>
  30. #include "dsscomp.h"
  31. /* queue state */
  32. static DEFINE_MUTEX(mtx);
  33. /* free overlay structs */
  34. struct maskref {
  35. u32 mask;
  36. u32 refs[MAX_OVERLAYS];
  37. };
  38. static struct {
  39. struct workqueue_struct *apply_workq;
  40. u32 ovl_mask; /* overlays used on this display */
  41. struct maskref ovl_qmask; /* overlays queued to this display */
  42. bool blanking;
  43. } mgrq[MAX_MANAGERS];
  44. static struct workqueue_struct *cb_wkq; /* callback work queue */
  45. static struct dsscomp_dev *cdev;
  46. #ifdef CONFIG_DEBUG_FS
  47. LIST_HEAD(dbg_comps);
  48. DEFINE_MUTEX(dbg_mtx);
  49. #endif
  50. #ifdef CONFIG_DSSCOMP_DEBUG_LOG
  51. struct dbg_event_t dbg_events[128];
  52. u32 dbg_event_ix;
  53. #endif
  54. static inline void __log_state(dsscomp_t c, void *fn, u32 ev)
  55. {
  56. #ifdef CONFIG_DSSCOMP_DEBUG_LOG
  57. if (c->dbg_used < ARRAY_SIZE(c->dbg_log)) {
  58. u32 t = (u32) ktime_to_ms(ktime_get());
  59. c->dbg_log[c->dbg_used].t = t;
  60. c->dbg_log[c->dbg_used++].state = c->state;
  61. __log_event(20 * c->ix + 20, t, c, ev ? "%pf on %s" : "%pf",
  62. (u32) fn, (u32) log_status_str(ev));
  63. }
  64. #endif
  65. }
  66. #define log_state(c, fn, ev) DO_IF_DEBUG_FS(__log_state(c, fn, ev))
  67. static inline void maskref_incbit(struct maskref *om, u32 ix)
  68. {
  69. om->refs[ix]++;
  70. om->mask |= 1 << ix;
  71. }
  72. static void maskref_decmask(struct maskref *om, u32 mask)
  73. {
  74. while (mask) {
  75. u32 ix = fls(mask) - 1, m = 1 << ix;
  76. if (!--om->refs[ix])
  77. om->mask &= ~m;
  78. mask &= ~m;
  79. }
  80. }
  81. /*
  82. * ===========================================================================
  83. * EXIT
  84. * ===========================================================================
  85. */
  86. /* Initialize queue structures, and set up state of the displays */
  87. int dsscomp_queue_init(struct dsscomp_dev *cdev_)
  88. {
  89. u32 i, j;
  90. cdev = cdev_;
  91. if (ARRAY_SIZE(mgrq) < cdev->num_mgrs)
  92. return -EINVAL;
  93. ZERO(mgrq);
  94. for (i = 0; i < cdev->num_mgrs; i++) {
  95. struct omap_overlay_manager *mgr;
  96. mgrq[i].apply_workq = create_singlethread_workqueue("dsscomp_apply");
  97. if (!mgrq[i].apply_workq)
  98. goto error;
  99. /* record overlays on this display */
  100. mgr = cdev->mgrs[i];
  101. for (j = 0; j < cdev->num_ovls; j++) {
  102. if (cdev->ovls[j]->info.enabled &&
  103. mgr &&
  104. cdev->ovls[j]->manager == mgr)
  105. mgrq[i].ovl_mask |= 1 << j;
  106. }
  107. }
  108. cb_wkq = create_singlethread_workqueue("dsscomp_cb");
  109. if (!cb_wkq)
  110. goto error;
  111. return 0;
  112. error:
  113. while (i--)
  114. destroy_workqueue(mgrq[i].apply_workq);
  115. return -ENOMEM;
  116. }
  117. /* get display index from manager */
  118. static u32 get_display_ix(struct omap_overlay_manager *mgr)
  119. {
  120. u32 i;
  121. /* handle if manager is not attached to a display */
  122. if (!mgr || !mgr->device)
  123. return cdev->num_displays;
  124. /* find manager's display */
  125. for (i = 0; i < cdev->num_displays; i++)
  126. if (cdev->displays[i] == mgr->device)
  127. break;
  128. return i;
  129. }
  130. /*
  131. * ===========================================================================
  132. * QUEUING SETUP OPERATIONS
  133. * ===========================================================================
  134. */
  135. /* create a new composition for a display */
  136. dsscomp_t dsscomp_new(struct omap_overlay_manager *mgr)
  137. {
  138. struct dsscomp_data *comp = NULL;
  139. u32 display_ix = get_display_ix(mgr);
  140. /* check manager */
  141. u32 ix = mgr ? mgr->id : cdev->num_mgrs;
  142. if (ix >= cdev->num_mgrs || display_ix >= cdev->num_displays)
  143. return ERR_PTR(-EINVAL);
  144. /* allocate composition */
  145. comp = kzalloc(sizeof(*comp), GFP_KERNEL);
  146. if (!comp)
  147. return NULL;
  148. /* initialize new composition */
  149. comp->ix = ix; /* save where this composition came from */
  150. comp->ovl_mask = comp->ovl_dmask = 0;
  151. comp->frm.sync_id = 0;
  152. comp->frm.mgr.ix = display_ix;
  153. comp->state = DSSCOMP_STATE_ACTIVE;
  154. DO_IF_DEBUG_FS({
  155. __log_state(comp, dsscomp_new, 0);
  156. list_add(&comp->dbg_q, &dbg_comps);
  157. });
  158. return comp;
  159. }
  160. EXPORT_SYMBOL(dsscomp_new);
  161. /* returns overlays used in a composition */
  162. u32 dsscomp_get_ovls(dsscomp_t comp)
  163. {
  164. u32 mask;
  165. mutex_lock(&mtx);
  166. BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
  167. mask = comp->ovl_mask;
  168. mutex_unlock(&mtx);
  169. return mask;
  170. }
  171. EXPORT_SYMBOL(dsscomp_get_ovls);
  172. /* set overlay info */
  173. int dsscomp_set_ovl(dsscomp_t comp, struct dss2_ovl_info *ovl)
  174. {
  175. int r = -EBUSY;
  176. u32 i, mask, oix, ix;
  177. struct omap_overlay *o;
  178. mutex_lock(&mtx);
  179. BUG_ON(!ovl);
  180. BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
  181. ix = comp->ix;
  182. if (ovl->cfg.ix >= cdev->num_ovls) {
  183. r = -EINVAL;
  184. goto done;
  185. }
  186. /* if overlay is already part of the composition */
  187. mask = 1 << ovl->cfg.ix;
  188. if (mask & comp->ovl_mask) {
  189. /* look up overlay */
  190. for (oix = 0; oix < comp->frm.num_ovls; oix++) {
  191. if (comp->ovls[oix].cfg.ix == ovl->cfg.ix)
  192. break;
  193. }
  194. BUG_ON(oix == comp->frm.num_ovls);
  195. } else {
  196. /* check if ovl is free to use */
  197. if (comp->frm.num_ovls >= ARRAY_SIZE(comp->ovls))
  198. goto done;
  199. /* not in any other displays queue */
  200. if (mask & ~mgrq[ix].ovl_qmask.mask) {
  201. for (i = 0; i < cdev->num_mgrs; i++) {
  202. if (i == ix)
  203. continue;
  204. if (mgrq[i].ovl_qmask.mask & mask)
  205. goto done;
  206. }
  207. }
  208. /* and disabled (unless forced) if on another manager */
  209. o = cdev->ovls[ovl->cfg.ix];
  210. if (o->info.enabled && (!o->manager || o->manager->id != ix))
  211. goto done;
  212. /* add overlay to composition & display */
  213. comp->ovl_mask |= mask;
  214. oix = comp->frm.num_ovls++;
  215. maskref_incbit(&mgrq[ix].ovl_qmask, ovl->cfg.ix);
  216. }
  217. comp->ovls[oix] = *ovl;
  218. r = 0;
  219. done:
  220. mutex_unlock(&mtx);
  221. return r;
  222. }
  223. EXPORT_SYMBOL(dsscomp_set_ovl);
  224. /* get overlay info */
  225. int dsscomp_get_ovl(dsscomp_t comp, u32 ix, struct dss2_ovl_info *ovl)
  226. {
  227. int r;
  228. u32 oix;
  229. mutex_lock(&mtx);
  230. BUG_ON(!ovl);
  231. BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
  232. if (ix >= cdev->num_ovls) {
  233. r = -EINVAL;
  234. } else if (comp->ovl_mask & (1 << ix)) {
  235. r = 0;
  236. for (oix = 0; oix < comp->frm.num_ovls; oix++)
  237. if (comp->ovls[oix].cfg.ix == ovl->cfg.ix) {
  238. *ovl = comp->ovls[oix];
  239. break;
  240. }
  241. BUG_ON(oix == comp->frm.num_ovls);
  242. } else {
  243. r = -ENOENT;
  244. }
  245. mutex_unlock(&mtx);
  246. return r;
  247. }
  248. EXPORT_SYMBOL(dsscomp_get_ovl);
  249. /* set manager info */
  250. int dsscomp_set_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr)
  251. {
  252. mutex_lock(&mtx);
  253. BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
  254. BUG_ON(mgr->ix != comp->frm.mgr.ix);
  255. comp->frm.mgr = *mgr;
  256. mutex_unlock(&mtx);
  257. return 0;
  258. }
  259. EXPORT_SYMBOL(dsscomp_set_mgr);
  260. /* get manager info */
  261. int dsscomp_get_mgr(dsscomp_t comp, struct dss2_mgr_info *mgr)
  262. {
  263. mutex_lock(&mtx);
  264. BUG_ON(!mgr);
  265. BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
  266. *mgr = comp->frm.mgr;
  267. mutex_unlock(&mtx);
  268. return 0;
  269. }
  270. EXPORT_SYMBOL(dsscomp_get_mgr);
  271. /* get manager info */
  272. int dsscomp_setup(dsscomp_t comp, enum dsscomp_setup_mode mode,
  273. struct dss2_rect_t win)
  274. {
  275. mutex_lock(&mtx);
  276. BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
  277. comp->frm.mode = mode;
  278. comp->frm.win = win;
  279. mutex_unlock(&mtx);
  280. return 0;
  281. }
  282. EXPORT_SYMBOL(dsscomp_setup);
  283. /*
  284. * ===========================================================================
  285. * QUEUING COMMITTING OPERATIONS
  286. * ===========================================================================
  287. */
  288. void dsscomp_drop(dsscomp_t comp)
  289. {
  290. /* decrement unprogrammed references */
  291. if (comp->state < DSSCOMP_STATE_PROGRAMMED)
  292. maskref_decmask(&mgrq[comp->ix].ovl_qmask, comp->ovl_mask);
  293. comp->state = 0;
  294. if (debug & DEBUG_COMPOSITIONS)
  295. dev_info(DEV(cdev), "[%p] released\n", comp);
  296. DO_IF_DEBUG_FS(list_del(&comp->dbg_q));
  297. kfree(comp);
  298. }
  299. EXPORT_SYMBOL(dsscomp_drop);
  300. struct dsscomp_cb_work {
  301. struct work_struct work;
  302. struct dsscomp_data *comp;
  303. int status;
  304. };
  305. static void dsscomp_mgr_delayed_cb(struct work_struct *work)
  306. {
  307. struct dsscomp_cb_work *wk = container_of(work, typeof(*wk), work);
  308. struct dsscomp_data *comp = wk->comp;
  309. int status = wk->status;
  310. u32 ix;
  311. kfree(work);
  312. mutex_lock(&mtx);
  313. BUG_ON(comp->state == DSSCOMP_STATE_ACTIVE);
  314. ix = comp->ix;
  315. /* call extra callbacks if requested */
  316. if (comp->extra_cb)
  317. comp->extra_cb(comp->extra_cb_data, status);
  318. /* handle programming & release */
  319. if (status == DSS_COMPLETION_PROGRAMMED) {
  320. comp->state = DSSCOMP_STATE_PROGRAMMED;
  321. log_state(comp, dsscomp_mgr_delayed_cb, status);
  322. /* update used overlay mask */
  323. mgrq[ix].ovl_mask = comp->ovl_mask & ~comp->ovl_dmask;
  324. maskref_decmask(&mgrq[ix].ovl_qmask, comp->ovl_mask);
  325. if (debug & DEBUG_PHASES)
  326. dev_info(DEV(cdev), "[%p] programmed\n", comp);
  327. } else if ((status == DSS_COMPLETION_DISPLAYED) &&
  328. comp->state == DSSCOMP_STATE_PROGRAMMED) {
  329. /* composition is 1st displayed */
  330. comp->state = DSSCOMP_STATE_DISPLAYED;
  331. log_state(comp, dsscomp_mgr_delayed_cb, status);
  332. if (debug & DEBUG_PHASES)
  333. dev_info(DEV(cdev), "[%p] displayed\n", comp);
  334. } else if (status & DSS_COMPLETION_RELEASED) {
  335. /* composition is no longer displayed */
  336. log_event(20 * comp->ix + 20, 0, comp, "%pf on %s",
  337. (u32) dsscomp_mgr_delayed_cb,
  338. (u32) log_status_str(status));
  339. dsscomp_drop(comp);
  340. }
  341. mutex_unlock(&mtx);
  342. }
  343. static u32 dsscomp_mgr_callback(void *data, int id, int status)
  344. {
  345. struct dsscomp_data *comp = data;
  346. if (status == DSS_COMPLETION_PROGRAMMED ||
  347. (status == DSS_COMPLETION_DISPLAYED &&
  348. comp->state != DSSCOMP_STATE_DISPLAYED) ||
  349. (status & DSS_COMPLETION_RELEASED)) {
  350. struct dsscomp_cb_work *wk = kzalloc(sizeof(*wk), GFP_ATOMIC);
  351. wk->comp = comp;
  352. wk->status = status;
  353. INIT_WORK(&wk->work, dsscomp_mgr_delayed_cb);
  354. queue_work(cb_wkq, &wk->work);
  355. }
  356. /* get each callback only once */
  357. return ~status;
  358. }
  359. static inline bool dssdev_manually_updated(struct omap_dss_device *dev)
  360. {
  361. return dev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
  362. dev->driver->get_update_mode(dev) != OMAP_DSS_UPDATE_AUTO;
  363. }
  364. /* apply composition */
  365. /* at this point the composition is not on any queue */
  366. static int dsscomp_apply(dsscomp_t comp)
  367. {
  368. int i, r = -EFAULT;
  369. u32 dmask, display_ix;
  370. struct omap_dss_device *dssdev;
  371. struct omap_dss_driver *drv;
  372. struct omap_overlay_manager *mgr;
  373. struct omap_overlay *ovl;
  374. struct dsscomp_setup_mgr_data *d;
  375. u32 oix;
  376. bool cb_programmed = false;
  377. struct omapdss_ovl_cb cb = {
  378. .fn = dsscomp_mgr_callback,
  379. .data = comp,
  380. .mask = DSS_COMPLETION_DISPLAYED |
  381. DSS_COMPLETION_PROGRAMMED | DSS_COMPLETION_RELEASED,
  382. };
  383. BUG_ON(comp->state != DSSCOMP_STATE_APPLYING);
  384. /* check if the display is valid and used */
  385. r = -ENODEV;
  386. d = &comp->frm;
  387. display_ix = d->mgr.ix;
  388. if (display_ix >= cdev->num_displays)
  389. goto done;
  390. dssdev = cdev->displays[display_ix];
  391. if (!dssdev)
  392. goto done;
  393. drv = dssdev->driver;
  394. mgr = dssdev->manager;
  395. if (!mgr || !drv || mgr->id >= cdev->num_mgrs)
  396. goto done;
  397. dump_comp_info(cdev, d, "apply");
  398. r = 0;
  399. dmask = 0;
  400. for (oix = 0; oix < comp->frm.num_ovls; oix++) {
  401. struct dss2_ovl_info *oi = comp->ovls + oix;
  402. /* keep track of disabled overlays */
  403. if (!oi->cfg.enabled)
  404. dmask |= 1 << oi->cfg.ix;
  405. if (r && !comp->must_apply)
  406. continue;
  407. dump_ovl_info(cdev, oi);
  408. if (oi->cfg.ix >= cdev->num_ovls) {
  409. r = -EINVAL;
  410. continue;
  411. }
  412. ovl = cdev->ovls[oi->cfg.ix];
  413. /* set overlays' manager & info */
  414. if (ovl->info.enabled && ovl->manager != mgr) {
  415. r = -EBUSY;
  416. goto skip_ovl_set;
  417. }
  418. if (ovl->manager != mgr) {
  419. /*
  420. * Ideally, we should call ovl->unset_manager(ovl),
  421. * but it may block on go even though the disabling
  422. * of the overlay already went through. So instead,
  423. * we are just clearing the manager.
  424. */
  425. ovl->manager = NULL;
  426. r = ovl->set_manager(ovl, mgr);
  427. if (r)
  428. goto skip_ovl_set;
  429. }
  430. r = set_dss_ovl_info(oi);
  431. skip_ovl_set:
  432. if (r && comp->must_apply) {
  433. dev_err(DEV(cdev), "[%p] set ovl%d failed %d", comp,
  434. oi->cfg.ix, r);
  435. oi->cfg.enabled = false;
  436. dmask |= 1 << oi->cfg.ix;
  437. set_dss_ovl_info(oi);
  438. }
  439. }
  440. /*
  441. * set manager's info - this also sets the completion callback,
  442. * so if it succeeds, we will use the callback to complete the
  443. * composition. Otherwise, we can skip the composition now.
  444. */
  445. if (!r || comp->must_apply) {
  446. r = set_dss_mgr_info(&d->mgr, &cb);
  447. cb_programmed = r == 0;
  448. }
  449. if (r && !comp->must_apply) {
  450. dev_err(DEV(cdev), "[%p] set failed %d\n", comp, r);
  451. goto done;
  452. } else {
  453. if (r)
  454. dev_warn(DEV(cdev), "[%p] ignoring set failure %d\n",
  455. comp, r);
  456. comp->blank = dmask == comp->ovl_mask;
  457. comp->ovl_dmask = dmask;
  458. /*
  459. * Check other overlays that may also use this display.
  460. * NOTE: This is only needed in case someone changes
  461. * overlays via sysfs. We use comp->ovl_mask to refresh
  462. * the overlays actually used on a manager when the
  463. * composition is programmed.
  464. */
  465. for (i = 0; i < cdev->num_ovls; i++) {
  466. u32 mask = 1 << i;
  467. if ((~comp->ovl_mask & mask) &&
  468. cdev->ovls[i]->info.enabled &&
  469. cdev->ovls[i]->manager == mgr) {
  470. mutex_lock(&mtx);
  471. comp->ovl_mask |= mask;
  472. maskref_incbit(&mgrq[comp->ix].ovl_qmask, i);
  473. mutex_unlock(&mtx);
  474. }
  475. }
  476. }
  477. /* apply changes and call update on manual panels */
  478. /* no need for mutex as no callbacks are scheduled yet */
  479. comp->state = DSSCOMP_STATE_APPLIED;
  480. log_state(comp, dsscomp_apply, 0);
  481. if (!d->win.w && !d->win.x)
  482. d->win.w = dssdev->panel.timings.x_res - d->win.x;
  483. if (!d->win.h && !d->win.y)
  484. d->win.h = dssdev->panel.timings.y_res - d->win.y;
  485. mutex_lock(&mtx);
  486. if (mgrq[comp->ix].blanking) {
  487. pr_info_ratelimited("ignoring apply mgr(%s) while blanking\n",
  488. mgr->name);
  489. r = -ENODEV;
  490. } else {
  491. r = mgr->apply(mgr);
  492. if (r)
  493. dev_err(DEV(cdev), "failed while applying %d", r);
  494. /* keep error if set_mgr_info failed */
  495. if (!r && !cb_programmed)
  496. r = -EINVAL;
  497. }
  498. mutex_unlock(&mtx);
  499. /*
  500. * TRICKY: try to unregister callback to see if callbacks have
  501. * been applied (moved into DSS2 pipeline). Unregistering also
  502. * avoids having to unnecessarily kick out compositions (which
  503. * would result in screen blinking). If callbacks failed to apply,
  504. * (e.g. could not set them or apply them) we will need to call
  505. * them ourselves (we note this by returning an error).
  506. */
  507. if (cb_programmed && r) {
  508. /* clear error if callback already registered */
  509. if (omap_dss_manager_unregister_callback(mgr, &cb))
  510. r = 0;
  511. }
  512. /* if failed to apply, kick out prior composition */
  513. if (comp->must_apply && r)
  514. mgr->blank(mgr, true);
  515. if (!r && (d->mode & DSSCOMP_SETUP_MODE_DISPLAY)) {
  516. /* cannot handle update errors, so ignore them */
  517. if (dssdev_manually_updated(dssdev) && drv->update)
  518. drv->update(dssdev, d->win.x,
  519. d->win.y, d->win.w, d->win.h);
  520. else
  521. /* wait for sync to do smooth animations */
  522. mgr->wait_for_vsync(mgr);
  523. }
  524. done:
  525. return r;
  526. }
  527. struct dsscomp_apply_work {
  528. struct work_struct work;
  529. dsscomp_t comp;
  530. };
  531. int dsscomp_state_notifier(struct notifier_block *nb,
  532. unsigned long arg, void *ptr)
  533. {
  534. struct omap_dss_device *dssdev = ptr;
  535. enum omap_dss_display_state state = arg;
  536. struct omap_overlay_manager *mgr = dssdev->manager;
  537. if (mgr) {
  538. mutex_lock(&mtx);
  539. if (state == OMAP_DSS_DISPLAY_DISABLED) {
  540. mgr->blank(mgr, true);
  541. mgrq[mgr->id].blanking = true;
  542. } else if (state == OMAP_DSS_DISPLAY_ACTIVE) {
  543. mgrq[mgr->id].blanking = false;
  544. }
  545. mutex_unlock(&mtx);
  546. }
  547. return 0;
  548. }
  549. static void dsscomp_do_apply(struct work_struct *work)
  550. {
  551. struct dsscomp_apply_work *wk = container_of(work, typeof(*wk), work);
  552. /* complete compositions that failed to apply */
  553. if (dsscomp_apply(wk->comp))
  554. dsscomp_mgr_callback(wk->comp, -1, DSS_COMPLETION_ECLIPSED_SET);
  555. kfree(wk);
  556. }
  557. int dsscomp_delayed_apply(dsscomp_t comp)
  558. {
  559. /* don't block in case we are called from interrupt context */
  560. struct dsscomp_apply_work *wk = kzalloc(sizeof(*wk), GFP_NOWAIT);
  561. if (!wk)
  562. return -ENOMEM;
  563. mutex_lock(&mtx);
  564. BUG_ON(comp->state != DSSCOMP_STATE_ACTIVE);
  565. comp->state = DSSCOMP_STATE_APPLYING;
  566. log_state(comp, dsscomp_delayed_apply, 0);
  567. if (debug & DEBUG_PHASES)
  568. dev_info(DEV(cdev), "[%p] applying\n", comp);
  569. mutex_unlock(&mtx);
  570. wk->comp = comp;
  571. INIT_WORK(&wk->work, dsscomp_do_apply);
  572. return queue_work(mgrq[comp->ix].apply_workq, &wk->work) ? 0 : -EBUSY;
  573. }
  574. EXPORT_SYMBOL(dsscomp_delayed_apply);
  575. /*
  576. * ===========================================================================
  577. * DEBUGFS
  578. * ===========================================================================
  579. */
  580. #ifdef CONFIG_DEBUG_FS
  581. void seq_print_comp(struct seq_file *s, dsscomp_t c)
  582. {
  583. struct dsscomp_setup_mgr_data *d = &c->frm;
  584. int i;
  585. seq_printf(s, " [%p]: %s%s\n", c, c->blank ? "blank " : "",
  586. c->state == DSSCOMP_STATE_ACTIVE ? "ACTIVE" :
  587. c->state == DSSCOMP_STATE_APPLYING ? "APPLYING" :
  588. c->state == DSSCOMP_STATE_APPLIED ? "APPLIED" :
  589. c->state == DSSCOMP_STATE_PROGRAMMED ? "PROGRAMMED" :
  590. c->state == DSSCOMP_STATE_DISPLAYED ? "DISPLAYED" :
  591. "???");
  592. seq_printf(s, " sync_id=%x, flags=%c%c%c\n",
  593. d->sync_id,
  594. (d->mode & DSSCOMP_SETUP_MODE_APPLY) ? 'A' : '-',
  595. (d->mode & DSSCOMP_SETUP_MODE_DISPLAY) ? 'D' : '-',
  596. (d->mode & DSSCOMP_SETUP_MODE_CAPTURE) ? 'C' : '-');
  597. for (i = 0; i < d->num_ovls; i++) {
  598. struct dss2_ovl_info *oi;
  599. struct dss2_ovl_cfg *g;
  600. oi = d->ovls + i;
  601. g = &oi->cfg;
  602. if (g->zonly) {
  603. seq_printf(s, " ovl%d={%s z%d}\n",
  604. g->ix, g->enabled ? "ON" : "off", g->zorder);
  605. } else {
  606. seq_printf(s, " ovl%d={%s z%d %s%s *%d%%"
  607. " %d*%d:%d,%d+%d,%d rot%d%s"
  608. " => %d,%d+%d,%d %p/%p|%d}\n",
  609. g->ix, g->enabled ? "ON" : "off", g->zorder,
  610. dsscomp_get_color_name(g->color_mode) ? : "N/A",
  611. g->pre_mult_alpha ? " premult" : "",
  612. (g->global_alpha * 100 + 128) / 255,
  613. g->width, g->height, g->crop.x, g->crop.y,
  614. g->crop.w, g->crop.h,
  615. g->rotation, g->mirror ? "+mir" : "",
  616. g->win.x, g->win.y, g->win.w, g->win.h,
  617. (void *) oi->ba, (void *) oi->uv, g->stride);
  618. }
  619. }
  620. if (c->extra_cb)
  621. seq_printf(s, " gsync=[%p] %pf\n\n", c->extra_cb_data,
  622. c->extra_cb);
  623. else
  624. seq_printf(s, " gsync=[%p] (called)\n\n", c->extra_cb_data);
  625. }
  626. #endif
  627. void dsscomp_dbg_comps(struct seq_file *s)
  628. {
  629. #ifdef CONFIG_DEBUG_FS
  630. dsscomp_t c;
  631. u32 i;
  632. mutex_lock(&dbg_mtx);
  633. for (i = 0; i < cdev->num_mgrs; i++) {
  634. struct omap_overlay_manager *mgr = cdev->mgrs[i];
  635. seq_printf(s, "ACTIVE COMPOSITIONS on %s\n\n", mgr->name);
  636. list_for_each_entry(c, &dbg_comps, dbg_q) {
  637. struct dss2_mgr_info *mi = &c->frm.mgr;
  638. if (mi->ix < cdev->num_displays &&
  639. cdev->displays[mi->ix]->manager == mgr)
  640. seq_print_comp(s, c);
  641. }
  642. /* print manager cache */
  643. mgr->dump_cb(mgr, s);
  644. }
  645. mutex_unlock(&dbg_mtx);
  646. #endif
  647. }
  648. void dsscomp_dbg_events(struct seq_file *s)
  649. {
  650. #ifdef CONFIG_DSSCOMP_DEBUG_LOG
  651. u32 i;
  652. struct dbg_event_t *d;
  653. mutex_lock(&dbg_mtx);
  654. for (i = dbg_event_ix; i < dbg_event_ix + ARRAY_SIZE(dbg_events); i++) {
  655. d = dbg_events + (i % ARRAY_SIZE(dbg_events));
  656. if (!d->ms)
  657. continue;
  658. seq_printf(s, "[% 5d.%03d] %*s[%08x] ",
  659. d->ms / 1000, d->ms % 1000,
  660. d->ix + ((u32) d->data) % 7,
  661. "", (u32) d->data);
  662. seq_printf(s, d->fmt, d->a1, d->a2);
  663. seq_printf(s, "\n");
  664. }
  665. mutex_unlock(&dbg_mtx);
  666. #endif
  667. }
  668. /*
  669. * ===========================================================================
  670. * EXIT
  671. * ===========================================================================
  672. */
  673. void dsscomp_queue_exit(void)
  674. {
  675. if (cdev) {
  676. int i;
  677. for (i = 0; i < cdev->num_displays; i++)
  678. destroy_workqueue(mgrq[i].apply_workq);
  679. destroy_workqueue(cb_wkq);
  680. cdev = NULL;
  681. }
  682. }
  683. EXPORT_SYMBOL(dsscomp_queue_exit);