PageRenderTime 287ms CodeModel.GetById 41ms app.highlight 183ms RepoModel.GetById 1ms app.codeStats 2ms

/drivers/video/omap2/dss/dsi.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 4892 lines | 3688 code | 975 blank | 229 comment | 517 complexity | 651627daad8025e184a206d72d55e37f MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * linux/drivers/video/omap2/dss/dsi.c
   3 *
   4 * Copyright (C) 2009 Nokia Corporation
   5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License version 2 as published by
   9 * the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#define DSS_SUBSYS_NAME "DSI"
  21
  22#include <linux/kernel.h>
  23#include <linux/io.h>
  24#include <linux/clk.h>
  25#include <linux/device.h>
  26#include <linux/err.h>
  27#include <linux/interrupt.h>
  28#include <linux/delay.h>
  29#include <linux/mutex.h>
  30#include <linux/semaphore.h>
  31#include <linux/seq_file.h>
  32#include <linux/platform_device.h>
  33#include <linux/regulator/consumer.h>
  34#include <linux/wait.h>
  35#include <linux/workqueue.h>
  36#include <linux/sched.h>
  37#include <linux/slab.h>
  38#include <linux/debugfs.h>
  39#include <linux/pm_runtime.h>
  40
  41#include <video/omapdss.h>
  42#include <plat/clock.h>
  43
  44#include "dss.h"
  45#include "dss_features.h"
  46
  47/*#define VERBOSE_IRQ*/
  48#define DSI_CATCH_MISSING_TE
  49
  50struct dsi_reg { u16 idx; };
  51
  52#define DSI_REG(idx)		((const struct dsi_reg) { idx })
  53
  54#define DSI_SZ_REGS		SZ_1K
  55/* DSI Protocol Engine */
  56
  57#define DSI_REVISION			DSI_REG(0x0000)
  58#define DSI_SYSCONFIG			DSI_REG(0x0010)
  59#define DSI_SYSSTATUS			DSI_REG(0x0014)
  60#define DSI_IRQSTATUS			DSI_REG(0x0018)
  61#define DSI_IRQENABLE			DSI_REG(0x001C)
  62#define DSI_CTRL			DSI_REG(0x0040)
  63#define DSI_GNQ				DSI_REG(0x0044)
  64#define DSI_COMPLEXIO_CFG1		DSI_REG(0x0048)
  65#define DSI_COMPLEXIO_IRQ_STATUS	DSI_REG(0x004C)
  66#define DSI_COMPLEXIO_IRQ_ENABLE	DSI_REG(0x0050)
  67#define DSI_CLK_CTRL			DSI_REG(0x0054)
  68#define DSI_TIMING1			DSI_REG(0x0058)
  69#define DSI_TIMING2			DSI_REG(0x005C)
  70#define DSI_VM_TIMING1			DSI_REG(0x0060)
  71#define DSI_VM_TIMING2			DSI_REG(0x0064)
  72#define DSI_VM_TIMING3			DSI_REG(0x0068)
  73#define DSI_CLK_TIMING			DSI_REG(0x006C)
  74#define DSI_TX_FIFO_VC_SIZE		DSI_REG(0x0070)
  75#define DSI_RX_FIFO_VC_SIZE		DSI_REG(0x0074)
  76#define DSI_COMPLEXIO_CFG2		DSI_REG(0x0078)
  77#define DSI_RX_FIFO_VC_FULLNESS		DSI_REG(0x007C)
  78#define DSI_VM_TIMING4			DSI_REG(0x0080)
  79#define DSI_TX_FIFO_VC_EMPTINESS	DSI_REG(0x0084)
  80#define DSI_VM_TIMING5			DSI_REG(0x0088)
  81#define DSI_VM_TIMING6			DSI_REG(0x008C)
  82#define DSI_VM_TIMING7			DSI_REG(0x0090)
  83#define DSI_STOPCLK_TIMING		DSI_REG(0x0094)
  84#define DSI_VC_CTRL(n)			DSI_REG(0x0100 + (n * 0x20))
  85#define DSI_VC_TE(n)			DSI_REG(0x0104 + (n * 0x20))
  86#define DSI_VC_LONG_PACKET_HEADER(n)	DSI_REG(0x0108 + (n * 0x20))
  87#define DSI_VC_LONG_PACKET_PAYLOAD(n)	DSI_REG(0x010C + (n * 0x20))
  88#define DSI_VC_SHORT_PACKET_HEADER(n)	DSI_REG(0x0110 + (n * 0x20))
  89#define DSI_VC_IRQSTATUS(n)		DSI_REG(0x0118 + (n * 0x20))
  90#define DSI_VC_IRQENABLE(n)		DSI_REG(0x011C + (n * 0x20))
  91
  92/* DSIPHY_SCP */
  93
  94#define DSI_DSIPHY_CFG0			DSI_REG(0x200 + 0x0000)
  95#define DSI_DSIPHY_CFG1			DSI_REG(0x200 + 0x0004)
  96#define DSI_DSIPHY_CFG2			DSI_REG(0x200 + 0x0008)
  97#define DSI_DSIPHY_CFG5			DSI_REG(0x200 + 0x0014)
  98#define DSI_DSIPHY_CFG10		DSI_REG(0x200 + 0x0028)
  99
 100/* DSI_PLL_CTRL_SCP */
 101
 102#define DSI_PLL_CONTROL			DSI_REG(0x300 + 0x0000)
 103#define DSI_PLL_STATUS			DSI_REG(0x300 + 0x0004)
 104#define DSI_PLL_GO			DSI_REG(0x300 + 0x0008)
 105#define DSI_PLL_CONFIGURATION1		DSI_REG(0x300 + 0x000C)
 106#define DSI_PLL_CONFIGURATION2		DSI_REG(0x300 + 0x0010)
 107
 108#define REG_GET(dsidev, idx, start, end) \
 109	FLD_GET(dsi_read_reg(dsidev, idx), start, end)
 110
 111#define REG_FLD_MOD(dsidev, idx, val, start, end) \
 112	dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
 113
 114/* Global interrupts */
 115#define DSI_IRQ_VC0		(1 << 0)
 116#define DSI_IRQ_VC1		(1 << 1)
 117#define DSI_IRQ_VC2		(1 << 2)
 118#define DSI_IRQ_VC3		(1 << 3)
 119#define DSI_IRQ_WAKEUP		(1 << 4)
 120#define DSI_IRQ_RESYNC		(1 << 5)
 121#define DSI_IRQ_PLL_LOCK	(1 << 7)
 122#define DSI_IRQ_PLL_UNLOCK	(1 << 8)
 123#define DSI_IRQ_PLL_RECALL	(1 << 9)
 124#define DSI_IRQ_COMPLEXIO_ERR	(1 << 10)
 125#define DSI_IRQ_HS_TX_TIMEOUT	(1 << 14)
 126#define DSI_IRQ_LP_RX_TIMEOUT	(1 << 15)
 127#define DSI_IRQ_TE_TRIGGER	(1 << 16)
 128#define DSI_IRQ_ACK_TRIGGER	(1 << 17)
 129#define DSI_IRQ_SYNC_LOST	(1 << 18)
 130#define DSI_IRQ_LDO_POWER_GOOD	(1 << 19)
 131#define DSI_IRQ_TA_TIMEOUT	(1 << 20)
 132#define DSI_IRQ_ERROR_MASK \
 133	(DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
 134	DSI_IRQ_TA_TIMEOUT)
 135#define DSI_IRQ_CHANNEL_MASK	0xf
 136
 137/* Virtual channel interrupts */
 138#define DSI_VC_IRQ_CS		(1 << 0)
 139#define DSI_VC_IRQ_ECC_CORR	(1 << 1)
 140#define DSI_VC_IRQ_PACKET_SENT	(1 << 2)
 141#define DSI_VC_IRQ_FIFO_TX_OVF	(1 << 3)
 142#define DSI_VC_IRQ_FIFO_RX_OVF	(1 << 4)
 143#define DSI_VC_IRQ_BTA		(1 << 5)
 144#define DSI_VC_IRQ_ECC_NO_CORR	(1 << 6)
 145#define DSI_VC_IRQ_FIFO_TX_UDF	(1 << 7)
 146#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
 147#define DSI_VC_IRQ_ERROR_MASK \
 148	(DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
 149	DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
 150	DSI_VC_IRQ_FIFO_TX_UDF)
 151
 152/* ComplexIO interrupts */
 153#define DSI_CIO_IRQ_ERRSYNCESC1		(1 << 0)
 154#define DSI_CIO_IRQ_ERRSYNCESC2		(1 << 1)
 155#define DSI_CIO_IRQ_ERRSYNCESC3		(1 << 2)
 156#define DSI_CIO_IRQ_ERRSYNCESC4		(1 << 3)
 157#define DSI_CIO_IRQ_ERRSYNCESC5		(1 << 4)
 158#define DSI_CIO_IRQ_ERRESC1		(1 << 5)
 159#define DSI_CIO_IRQ_ERRESC2		(1 << 6)
 160#define DSI_CIO_IRQ_ERRESC3		(1 << 7)
 161#define DSI_CIO_IRQ_ERRESC4		(1 << 8)
 162#define DSI_CIO_IRQ_ERRESC5		(1 << 9)
 163#define DSI_CIO_IRQ_ERRCONTROL1		(1 << 10)
 164#define DSI_CIO_IRQ_ERRCONTROL2		(1 << 11)
 165#define DSI_CIO_IRQ_ERRCONTROL3		(1 << 12)
 166#define DSI_CIO_IRQ_ERRCONTROL4		(1 << 13)
 167#define DSI_CIO_IRQ_ERRCONTROL5		(1 << 14)
 168#define DSI_CIO_IRQ_STATEULPS1		(1 << 15)
 169#define DSI_CIO_IRQ_STATEULPS2		(1 << 16)
 170#define DSI_CIO_IRQ_STATEULPS3		(1 << 17)
 171#define DSI_CIO_IRQ_STATEULPS4		(1 << 18)
 172#define DSI_CIO_IRQ_STATEULPS5		(1 << 19)
 173#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1	(1 << 20)
 174#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1	(1 << 21)
 175#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2	(1 << 22)
 176#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2	(1 << 23)
 177#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3	(1 << 24)
 178#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3	(1 << 25)
 179#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4	(1 << 26)
 180#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4	(1 << 27)
 181#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5	(1 << 28)
 182#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5	(1 << 29)
 183#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0	(1 << 30)
 184#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1	(1 << 31)
 185#define DSI_CIO_IRQ_ERROR_MASK \
 186	(DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
 187	 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
 188	 DSI_CIO_IRQ_ERRSYNCESC5 | \
 189	 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
 190	 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
 191	 DSI_CIO_IRQ_ERRESC5 | \
 192	 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
 193	 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
 194	 DSI_CIO_IRQ_ERRCONTROL5 | \
 195	 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
 196	 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
 197	 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
 198	 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
 199	 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
 200
 201#define DSI_DT_DCS_SHORT_WRITE_0	0x05
 202#define DSI_DT_DCS_SHORT_WRITE_1	0x15
 203#define DSI_DT_DCS_READ			0x06
 204#define DSI_DT_SET_MAX_RET_PKG_SIZE	0x37
 205#define DSI_DT_NULL_PACKET		0x09
 206#define DSI_DT_DCS_LONG_WRITE		0x39
 207
 208#define DSI_DT_RX_ACK_WITH_ERR		0x02
 209#define DSI_DT_RX_LONG_READ		0x1a
 210#define DSI_DT_RX_DCS_LONG_READ		0x1c
 211#define DSI_DT_RX_SHORT_READ_1		0x21
 212#define DSI_DT_RX_SHORT_READ_2		0x22
 213
 214typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
 215
 216#define DSI_MAX_NR_ISRS                2
 217
 218struct dsi_isr_data {
 219	omap_dsi_isr_t	isr;
 220	void		*arg;
 221	u32		mask;
 222};
 223
 224enum fifo_size {
 225	DSI_FIFO_SIZE_0		= 0,
 226	DSI_FIFO_SIZE_32	= 1,
 227	DSI_FIFO_SIZE_64	= 2,
 228	DSI_FIFO_SIZE_96	= 3,
 229	DSI_FIFO_SIZE_128	= 4,
 230};
 231
 232enum dsi_vc_mode {
 233	DSI_VC_MODE_L4 = 0,
 234	DSI_VC_MODE_VP,
 235};
 236
 237enum dsi_lane {
 238	DSI_CLK_P	= 1 << 0,
 239	DSI_CLK_N	= 1 << 1,
 240	DSI_DATA1_P	= 1 << 2,
 241	DSI_DATA1_N	= 1 << 3,
 242	DSI_DATA2_P	= 1 << 4,
 243	DSI_DATA2_N	= 1 << 5,
 244	DSI_DATA3_P	= 1 << 6,
 245	DSI_DATA3_N	= 1 << 7,
 246	DSI_DATA4_P	= 1 << 8,
 247	DSI_DATA4_N	= 1 << 9,
 248};
 249
 250struct dsi_update_region {
 251	u16 x, y, w, h;
 252	struct omap_dss_device *device;
 253};
 254
 255struct dsi_irq_stats {
 256	unsigned long last_reset;
 257	unsigned irq_count;
 258	unsigned dsi_irqs[32];
 259	unsigned vc_irqs[4][32];
 260	unsigned cio_irqs[32];
 261};
 262
 263struct dsi_isr_tables {
 264	struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
 265	struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
 266	struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
 267};
 268
 269struct dsi_data {
 270	struct platform_device *pdev;
 271	void __iomem	*base;
 272
 273	struct mutex	runtime_lock;
 274	int		runtime_count;
 275
 276	int irq;
 277
 278	struct clk *dss_clk;
 279	struct clk *sys_clk;
 280
 281	void (*dsi_mux_pads)(bool enable);
 282
 283	struct dsi_clock_info current_cinfo;
 284
 285	bool vdds_dsi_enabled;
 286	struct regulator *vdds_dsi_reg;
 287
 288	struct {
 289		enum dsi_vc_mode mode;
 290		struct omap_dss_device *dssdev;
 291		enum fifo_size fifo_size;
 292		int vc_id;
 293	} vc[4];
 294
 295	struct mutex lock;
 296	struct semaphore bus_lock;
 297
 298	unsigned pll_locked;
 299
 300	spinlock_t irq_lock;
 301	struct dsi_isr_tables isr_tables;
 302	/* space for a copy used by the interrupt handler */
 303	struct dsi_isr_tables isr_tables_copy;
 304
 305	int update_channel;
 306	struct dsi_update_region update_region;
 307
 308	bool te_enabled;
 309	bool ulps_enabled;
 310
 311	void (*framedone_callback)(int, void *);
 312	void *framedone_data;
 313
 314	struct delayed_work framedone_timeout_work;
 315
 316#ifdef DSI_CATCH_MISSING_TE
 317	struct timer_list te_timer;
 318#endif
 319
 320	unsigned long cache_req_pck;
 321	unsigned long cache_clk_freq;
 322	struct dsi_clock_info cache_cinfo;
 323
 324	u32		errors;
 325	spinlock_t	errors_lock;
 326#ifdef DEBUG
 327	ktime_t perf_setup_time;
 328	ktime_t perf_start_time;
 329#endif
 330	int debug_read;
 331	int debug_write;
 332
 333#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
 334	spinlock_t irq_stats_lock;
 335	struct dsi_irq_stats irq_stats;
 336#endif
 337	/* DSI PLL Parameter Ranges */
 338	unsigned long regm_max, regn_max;
 339	unsigned long  regm_dispc_max, regm_dsi_max;
 340	unsigned long  fint_min, fint_max;
 341	unsigned long lpdiv_max;
 342
 343	int num_data_lanes;
 344
 345	unsigned scp_clk_refcount;
 346};
 347
 348struct dsi_packet_sent_handler_data {
 349	struct platform_device *dsidev;
 350	struct completion *completion;
 351};
 352
 353static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
 354
 355#ifdef DEBUG
 356static unsigned int dsi_perf;
 357module_param_named(dsi_perf, dsi_perf, bool, 0644);
 358#endif
 359
 360static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
 361{
 362	return dev_get_drvdata(&dsidev->dev);
 363}
 364
 365static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
 366{
 367	return dsi_pdev_map[dssdev->phy.dsi.module];
 368}
 369
 370struct platform_device *dsi_get_dsidev_from_id(int module)
 371{
 372	return dsi_pdev_map[module];
 373}
 374
 375static int dsi_get_dsidev_id(struct platform_device *dsidev)
 376{
 377	/* TEMP: Pass 0 as the dsi module index till the time the dsi platform
 378	 * device names aren't changed to the form "omapdss_dsi.0",
 379	 * "omapdss_dsi.1" and so on */
 380	BUG_ON(dsidev->id != -1);
 381
 382	return 0;
 383}
 384
 385static inline void dsi_write_reg(struct platform_device *dsidev,
 386		const struct dsi_reg idx, u32 val)
 387{
 388	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 389
 390	__raw_writel(val, dsi->base + idx.idx);
 391}
 392
 393static inline u32 dsi_read_reg(struct platform_device *dsidev,
 394		const struct dsi_reg idx)
 395{
 396	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 397
 398	return __raw_readl(dsi->base + idx.idx);
 399}
 400
 401void dsi_bus_lock(struct omap_dss_device *dssdev)
 402{
 403	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 404	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 405
 406	down(&dsi->bus_lock);
 407}
 408EXPORT_SYMBOL(dsi_bus_lock);
 409
 410void dsi_bus_unlock(struct omap_dss_device *dssdev)
 411{
 412	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
 413	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 414
 415	up(&dsi->bus_lock);
 416}
 417EXPORT_SYMBOL(dsi_bus_unlock);
 418
 419static bool dsi_bus_is_locked(struct platform_device *dsidev)
 420{
 421	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 422
 423	return dsi->bus_lock.count == 0;
 424}
 425
 426static void dsi_completion_handler(void *data, u32 mask)
 427{
 428	complete((struct completion *)data);
 429}
 430
 431static inline int wait_for_bit_change(struct platform_device *dsidev,
 432		const struct dsi_reg idx, int bitnum, int value)
 433{
 434	int t = 100000;
 435
 436	while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
 437		if (--t == 0)
 438			return !value;
 439	}
 440
 441	return value;
 442}
 443
 444#ifdef DEBUG
 445static void dsi_perf_mark_setup(struct platform_device *dsidev)
 446{
 447	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 448	dsi->perf_setup_time = ktime_get();
 449}
 450
 451static void dsi_perf_mark_start(struct platform_device *dsidev)
 452{
 453	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 454	dsi->perf_start_time = ktime_get();
 455}
 456
 457static void dsi_perf_show(struct platform_device *dsidev, const char *name)
 458{
 459	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 460	ktime_t t, setup_time, trans_time;
 461	u32 total_bytes;
 462	u32 setup_us, trans_us, total_us;
 463
 464	if (!dsi_perf)
 465		return;
 466
 467	t = ktime_get();
 468
 469	setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
 470	setup_us = (u32)ktime_to_us(setup_time);
 471	if (setup_us == 0)
 472		setup_us = 1;
 473
 474	trans_time = ktime_sub(t, dsi->perf_start_time);
 475	trans_us = (u32)ktime_to_us(trans_time);
 476	if (trans_us == 0)
 477		trans_us = 1;
 478
 479	total_us = setup_us + trans_us;
 480
 481	total_bytes = dsi->update_region.w *
 482		dsi->update_region.h *
 483		dsi->update_region.device->ctrl.pixel_size / 8;
 484
 485	printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
 486			"%u bytes, %u kbytes/sec\n",
 487			name,
 488			setup_us,
 489			trans_us,
 490			total_us,
 491			1000*1000 / total_us,
 492			total_bytes,
 493			total_bytes * 1000 / total_us);
 494}
 495#else
 496static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
 497{
 498}
 499
 500static inline void dsi_perf_mark_start(struct platform_device *dsidev)
 501{
 502}
 503
 504static inline void dsi_perf_show(struct platform_device *dsidev,
 505		const char *name)
 506{
 507}
 508#endif
 509
 510static void print_irq_status(u32 status)
 511{
 512	if (status == 0)
 513		return;
 514
 515#ifndef VERBOSE_IRQ
 516	if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
 517		return;
 518#endif
 519	printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
 520
 521#define PIS(x) \
 522	if (status & DSI_IRQ_##x) \
 523		printk(#x " ");
 524#ifdef VERBOSE_IRQ
 525	PIS(VC0);
 526	PIS(VC1);
 527	PIS(VC2);
 528	PIS(VC3);
 529#endif
 530	PIS(WAKEUP);
 531	PIS(RESYNC);
 532	PIS(PLL_LOCK);
 533	PIS(PLL_UNLOCK);
 534	PIS(PLL_RECALL);
 535	PIS(COMPLEXIO_ERR);
 536	PIS(HS_TX_TIMEOUT);
 537	PIS(LP_RX_TIMEOUT);
 538	PIS(TE_TRIGGER);
 539	PIS(ACK_TRIGGER);
 540	PIS(SYNC_LOST);
 541	PIS(LDO_POWER_GOOD);
 542	PIS(TA_TIMEOUT);
 543#undef PIS
 544
 545	printk("\n");
 546}
 547
 548static void print_irq_status_vc(int channel, u32 status)
 549{
 550	if (status == 0)
 551		return;
 552
 553#ifndef VERBOSE_IRQ
 554	if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
 555		return;
 556#endif
 557	printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
 558
 559#define PIS(x) \
 560	if (status & DSI_VC_IRQ_##x) \
 561		printk(#x " ");
 562	PIS(CS);
 563	PIS(ECC_CORR);
 564#ifdef VERBOSE_IRQ
 565	PIS(PACKET_SENT);
 566#endif
 567	PIS(FIFO_TX_OVF);
 568	PIS(FIFO_RX_OVF);
 569	PIS(BTA);
 570	PIS(ECC_NO_CORR);
 571	PIS(FIFO_TX_UDF);
 572	PIS(PP_BUSY_CHANGE);
 573#undef PIS
 574	printk("\n");
 575}
 576
 577static void print_irq_status_cio(u32 status)
 578{
 579	if (status == 0)
 580		return;
 581
 582	printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
 583
 584#define PIS(x) \
 585	if (status & DSI_CIO_IRQ_##x) \
 586		printk(#x " ");
 587	PIS(ERRSYNCESC1);
 588	PIS(ERRSYNCESC2);
 589	PIS(ERRSYNCESC3);
 590	PIS(ERRESC1);
 591	PIS(ERRESC2);
 592	PIS(ERRESC3);
 593	PIS(ERRCONTROL1);
 594	PIS(ERRCONTROL2);
 595	PIS(ERRCONTROL3);
 596	PIS(STATEULPS1);
 597	PIS(STATEULPS2);
 598	PIS(STATEULPS3);
 599	PIS(ERRCONTENTIONLP0_1);
 600	PIS(ERRCONTENTIONLP1_1);
 601	PIS(ERRCONTENTIONLP0_2);
 602	PIS(ERRCONTENTIONLP1_2);
 603	PIS(ERRCONTENTIONLP0_3);
 604	PIS(ERRCONTENTIONLP1_3);
 605	PIS(ULPSACTIVENOT_ALL0);
 606	PIS(ULPSACTIVENOT_ALL1);
 607#undef PIS
 608
 609	printk("\n");
 610}
 611
 612#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
 613static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
 614		u32 *vcstatus, u32 ciostatus)
 615{
 616	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 617	int i;
 618
 619	spin_lock(&dsi->irq_stats_lock);
 620
 621	dsi->irq_stats.irq_count++;
 622	dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
 623
 624	for (i = 0; i < 4; ++i)
 625		dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
 626
 627	dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
 628
 629	spin_unlock(&dsi->irq_stats_lock);
 630}
 631#else
 632#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
 633#endif
 634
 635static int debug_irq;
 636
 637static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
 638		u32 *vcstatus, u32 ciostatus)
 639{
 640	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 641	int i;
 642
 643	if (irqstatus & DSI_IRQ_ERROR_MASK) {
 644		DSSERR("DSI error, irqstatus %x\n", irqstatus);
 645		print_irq_status(irqstatus);
 646		spin_lock(&dsi->errors_lock);
 647		dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
 648		spin_unlock(&dsi->errors_lock);
 649	} else if (debug_irq) {
 650		print_irq_status(irqstatus);
 651	}
 652
 653	for (i = 0; i < 4; ++i) {
 654		if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
 655			DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
 656				       i, vcstatus[i]);
 657			print_irq_status_vc(i, vcstatus[i]);
 658		} else if (debug_irq) {
 659			print_irq_status_vc(i, vcstatus[i]);
 660		}
 661	}
 662
 663	if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
 664		DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
 665		print_irq_status_cio(ciostatus);
 666	} else if (debug_irq) {
 667		print_irq_status_cio(ciostatus);
 668	}
 669}
 670
 671static void dsi_call_isrs(struct dsi_isr_data *isr_array,
 672		unsigned isr_array_size, u32 irqstatus)
 673{
 674	struct dsi_isr_data *isr_data;
 675	int i;
 676
 677	for (i = 0; i < isr_array_size; i++) {
 678		isr_data = &isr_array[i];
 679		if (isr_data->isr && isr_data->mask & irqstatus)
 680			isr_data->isr(isr_data->arg, irqstatus);
 681	}
 682}
 683
 684static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
 685		u32 irqstatus, u32 *vcstatus, u32 ciostatus)
 686{
 687	int i;
 688
 689	dsi_call_isrs(isr_tables->isr_table,
 690			ARRAY_SIZE(isr_tables->isr_table),
 691			irqstatus);
 692
 693	for (i = 0; i < 4; ++i) {
 694		if (vcstatus[i] == 0)
 695			continue;
 696		dsi_call_isrs(isr_tables->isr_table_vc[i],
 697				ARRAY_SIZE(isr_tables->isr_table_vc[i]),
 698				vcstatus[i]);
 699	}
 700
 701	if (ciostatus != 0)
 702		dsi_call_isrs(isr_tables->isr_table_cio,
 703				ARRAY_SIZE(isr_tables->isr_table_cio),
 704				ciostatus);
 705}
 706
 707static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
 708{
 709	struct platform_device *dsidev;
 710	struct dsi_data *dsi;
 711	u32 irqstatus, vcstatus[4], ciostatus;
 712	int i;
 713
 714	dsidev = (struct platform_device *) arg;
 715	dsi = dsi_get_dsidrv_data(dsidev);
 716
 717	spin_lock(&dsi->irq_lock);
 718
 719	irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
 720
 721	/* IRQ is not for us */
 722	if (!irqstatus) {
 723		spin_unlock(&dsi->irq_lock);
 724		return IRQ_NONE;
 725	}
 726
 727	dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
 728	/* flush posted write */
 729	dsi_read_reg(dsidev, DSI_IRQSTATUS);
 730
 731	for (i = 0; i < 4; ++i) {
 732		if ((irqstatus & (1 << i)) == 0) {
 733			vcstatus[i] = 0;
 734			continue;
 735		}
 736
 737		vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
 738
 739		dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
 740		/* flush posted write */
 741		dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
 742	}
 743
 744	if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
 745		ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
 746
 747		dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
 748		/* flush posted write */
 749		dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
 750	} else {
 751		ciostatus = 0;
 752	}
 753
 754#ifdef DSI_CATCH_MISSING_TE
 755	if (irqstatus & DSI_IRQ_TE_TRIGGER)
 756		del_timer(&dsi->te_timer);
 757#endif
 758
 759	/* make a copy and unlock, so that isrs can unregister
 760	 * themselves */
 761	memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
 762		sizeof(dsi->isr_tables));
 763
 764	spin_unlock(&dsi->irq_lock);
 765
 766	dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
 767
 768	dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
 769
 770	dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
 771
 772	return IRQ_HANDLED;
 773}
 774
 775/* dsi->irq_lock has to be locked by the caller */
 776static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
 777		struct dsi_isr_data *isr_array,
 778		unsigned isr_array_size, u32 default_mask,
 779		const struct dsi_reg enable_reg,
 780		const struct dsi_reg status_reg)
 781{
 782	struct dsi_isr_data *isr_data;
 783	u32 mask;
 784	u32 old_mask;
 785	int i;
 786
 787	mask = default_mask;
 788
 789	for (i = 0; i < isr_array_size; i++) {
 790		isr_data = &isr_array[i];
 791
 792		if (isr_data->isr == NULL)
 793			continue;
 794
 795		mask |= isr_data->mask;
 796	}
 797
 798	old_mask = dsi_read_reg(dsidev, enable_reg);
 799	/* clear the irqstatus for newly enabled irqs */
 800	dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
 801	dsi_write_reg(dsidev, enable_reg, mask);
 802
 803	/* flush posted writes */
 804	dsi_read_reg(dsidev, enable_reg);
 805	dsi_read_reg(dsidev, status_reg);
 806}
 807
 808/* dsi->irq_lock has to be locked by the caller */
 809static void _omap_dsi_set_irqs(struct platform_device *dsidev)
 810{
 811	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 812	u32 mask = DSI_IRQ_ERROR_MASK;
 813#ifdef DSI_CATCH_MISSING_TE
 814	mask |= DSI_IRQ_TE_TRIGGER;
 815#endif
 816	_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
 817			ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
 818			DSI_IRQENABLE, DSI_IRQSTATUS);
 819}
 820
 821/* dsi->irq_lock has to be locked by the caller */
 822static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
 823{
 824	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 825
 826	_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
 827			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
 828			DSI_VC_IRQ_ERROR_MASK,
 829			DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
 830}
 831
 832/* dsi->irq_lock has to be locked by the caller */
 833static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
 834{
 835	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 836
 837	_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
 838			ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
 839			DSI_CIO_IRQ_ERROR_MASK,
 840			DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
 841}
 842
 843static void _dsi_initialize_irq(struct platform_device *dsidev)
 844{
 845	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 846	unsigned long flags;
 847	int vc;
 848
 849	spin_lock_irqsave(&dsi->irq_lock, flags);
 850
 851	memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
 852
 853	_omap_dsi_set_irqs(dsidev);
 854	for (vc = 0; vc < 4; ++vc)
 855		_omap_dsi_set_irqs_vc(dsidev, vc);
 856	_omap_dsi_set_irqs_cio(dsidev);
 857
 858	spin_unlock_irqrestore(&dsi->irq_lock, flags);
 859}
 860
 861static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
 862		struct dsi_isr_data *isr_array, unsigned isr_array_size)
 863{
 864	struct dsi_isr_data *isr_data;
 865	int free_idx;
 866	int i;
 867
 868	BUG_ON(isr == NULL);
 869
 870	/* check for duplicate entry and find a free slot */
 871	free_idx = -1;
 872	for (i = 0; i < isr_array_size; i++) {
 873		isr_data = &isr_array[i];
 874
 875		if (isr_data->isr == isr && isr_data->arg == arg &&
 876				isr_data->mask == mask) {
 877			return -EINVAL;
 878		}
 879
 880		if (isr_data->isr == NULL && free_idx == -1)
 881			free_idx = i;
 882	}
 883
 884	if (free_idx == -1)
 885		return -EBUSY;
 886
 887	isr_data = &isr_array[free_idx];
 888	isr_data->isr = isr;
 889	isr_data->arg = arg;
 890	isr_data->mask = mask;
 891
 892	return 0;
 893}
 894
 895static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
 896		struct dsi_isr_data *isr_array, unsigned isr_array_size)
 897{
 898	struct dsi_isr_data *isr_data;
 899	int i;
 900
 901	for (i = 0; i < isr_array_size; i++) {
 902		isr_data = &isr_array[i];
 903		if (isr_data->isr != isr || isr_data->arg != arg ||
 904				isr_data->mask != mask)
 905			continue;
 906
 907		isr_data->isr = NULL;
 908		isr_data->arg = NULL;
 909		isr_data->mask = 0;
 910
 911		return 0;
 912	}
 913
 914	return -EINVAL;
 915}
 916
 917static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
 918		void *arg, u32 mask)
 919{
 920	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 921	unsigned long flags;
 922	int r;
 923
 924	spin_lock_irqsave(&dsi->irq_lock, flags);
 925
 926	r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
 927			ARRAY_SIZE(dsi->isr_tables.isr_table));
 928
 929	if (r == 0)
 930		_omap_dsi_set_irqs(dsidev);
 931
 932	spin_unlock_irqrestore(&dsi->irq_lock, flags);
 933
 934	return r;
 935}
 936
 937static int dsi_unregister_isr(struct platform_device *dsidev,
 938		omap_dsi_isr_t isr, void *arg, u32 mask)
 939{
 940	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 941	unsigned long flags;
 942	int r;
 943
 944	spin_lock_irqsave(&dsi->irq_lock, flags);
 945
 946	r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
 947			ARRAY_SIZE(dsi->isr_tables.isr_table));
 948
 949	if (r == 0)
 950		_omap_dsi_set_irqs(dsidev);
 951
 952	spin_unlock_irqrestore(&dsi->irq_lock, flags);
 953
 954	return r;
 955}
 956
 957static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
 958		omap_dsi_isr_t isr, void *arg, u32 mask)
 959{
 960	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 961	unsigned long flags;
 962	int r;
 963
 964	spin_lock_irqsave(&dsi->irq_lock, flags);
 965
 966	r = _dsi_register_isr(isr, arg, mask,
 967			dsi->isr_tables.isr_table_vc[channel],
 968			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
 969
 970	if (r == 0)
 971		_omap_dsi_set_irqs_vc(dsidev, channel);
 972
 973	spin_unlock_irqrestore(&dsi->irq_lock, flags);
 974
 975	return r;
 976}
 977
 978static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
 979		omap_dsi_isr_t isr, void *arg, u32 mask)
 980{
 981	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 982	unsigned long flags;
 983	int r;
 984
 985	spin_lock_irqsave(&dsi->irq_lock, flags);
 986
 987	r = _dsi_unregister_isr(isr, arg, mask,
 988			dsi->isr_tables.isr_table_vc[channel],
 989			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
 990
 991	if (r == 0)
 992		_omap_dsi_set_irqs_vc(dsidev, channel);
 993
 994	spin_unlock_irqrestore(&dsi->irq_lock, flags);
 995
 996	return r;
 997}
 998
 999static int dsi_register_isr_cio(struct platform_device *dsidev,
1000		omap_dsi_isr_t isr, void *arg, u32 mask)
1001{
1002	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1003	unsigned long flags;
1004	int r;
1005
1006	spin_lock_irqsave(&dsi->irq_lock, flags);
1007
1008	r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1009			ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1010
1011	if (r == 0)
1012		_omap_dsi_set_irqs_cio(dsidev);
1013
1014	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1015
1016	return r;
1017}
1018
1019static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1020		omap_dsi_isr_t isr, void *arg, u32 mask)
1021{
1022	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1023	unsigned long flags;
1024	int r;
1025
1026	spin_lock_irqsave(&dsi->irq_lock, flags);
1027
1028	r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1029			ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1030
1031	if (r == 0)
1032		_omap_dsi_set_irqs_cio(dsidev);
1033
1034	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1035
1036	return r;
1037}
1038
1039static u32 dsi_get_errors(struct platform_device *dsidev)
1040{
1041	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1042	unsigned long flags;
1043	u32 e;
1044	spin_lock_irqsave(&dsi->errors_lock, flags);
1045	e = dsi->errors;
1046	dsi->errors = 0;
1047	spin_unlock_irqrestore(&dsi->errors_lock, flags);
1048	return e;
1049}
1050
1051int dsi_runtime_get(struct platform_device *dsidev)
1052{
1053	int r;
1054	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1055
1056	mutex_lock(&dsi->runtime_lock);
1057
1058	if (dsi->runtime_count++ == 0) {
1059		DSSDBG("dsi_runtime_get\n");
1060
1061		r = dss_runtime_get();
1062		if (r)
1063			goto err_get_dss;
1064
1065		r = dispc_runtime_get();
1066		if (r)
1067			goto err_get_dispc;
1068
1069		/* XXX dsi fclk can also come from DSI PLL */
1070		clk_enable(dsi->dss_clk);
1071
1072		r = pm_runtime_get_sync(&dsi->pdev->dev);
1073		WARN_ON(r);
1074		if (r < 0)
1075			goto err_runtime_get;
1076	}
1077
1078	mutex_unlock(&dsi->runtime_lock);
1079
1080	return 0;
1081
1082err_runtime_get:
1083	clk_disable(dsi->dss_clk);
1084	dispc_runtime_put();
1085err_get_dispc:
1086	dss_runtime_put();
1087err_get_dss:
1088	mutex_unlock(&dsi->runtime_lock);
1089
1090	return r;
1091}
1092
1093void dsi_runtime_put(struct platform_device *dsidev)
1094{
1095	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1096
1097	mutex_lock(&dsi->runtime_lock);
1098
1099	if (--dsi->runtime_count == 0) {
1100		int r;
1101
1102		DSSDBG("dsi_runtime_put\n");
1103
1104		r = pm_runtime_put_sync(&dsi->pdev->dev);
1105		WARN_ON(r);
1106
1107		clk_disable(dsi->dss_clk);
1108
1109		dispc_runtime_put();
1110		dss_runtime_put();
1111	}
1112
1113	mutex_unlock(&dsi->runtime_lock);
1114}
1115
1116/* source clock for DSI PLL. this could also be PCLKFREE */
1117static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1118		bool enable)
1119{
1120	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1121
1122	if (enable)
1123		clk_enable(dsi->sys_clk);
1124	else
1125		clk_disable(dsi->sys_clk);
1126
1127	if (enable && dsi->pll_locked) {
1128		if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1129			DSSERR("cannot lock PLL when enabling clocks\n");
1130	}
1131}
1132
1133#ifdef DEBUG
1134static void _dsi_print_reset_status(struct platform_device *dsidev)
1135{
1136	u32 l;
1137	int b0, b1, b2;
1138
1139	if (!dss_debug)
1140		return;
1141
1142	/* A dummy read using the SCP interface to any DSIPHY register is
1143	 * required after DSIPHY reset to complete the reset of the DSI complex
1144	 * I/O. */
1145	l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1146
1147	printk(KERN_DEBUG "DSI resets: ");
1148
1149	l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
1150	printk("PLL (%d) ", FLD_GET(l, 0, 0));
1151
1152	l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1153	printk("CIO (%d) ", FLD_GET(l, 29, 29));
1154
1155	if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1156		b0 = 28;
1157		b1 = 27;
1158		b2 = 26;
1159	} else {
1160		b0 = 24;
1161		b1 = 25;
1162		b2 = 26;
1163	}
1164
1165	l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1166	printk("PHY (%x%x%x, %d, %d, %d)\n",
1167			FLD_GET(l, b0, b0),
1168			FLD_GET(l, b1, b1),
1169			FLD_GET(l, b2, b2),
1170			FLD_GET(l, 29, 29),
1171			FLD_GET(l, 30, 30),
1172			FLD_GET(l, 31, 31));
1173}
1174#else
1175#define _dsi_print_reset_status(x)
1176#endif
1177
1178static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1179{
1180	DSSDBG("dsi_if_enable(%d)\n", enable);
1181
1182	enable = enable ? 1 : 0;
1183	REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1184
1185	if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1186			DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1187			return -EIO;
1188	}
1189
1190	return 0;
1191}
1192
1193unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1194{
1195	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1196
1197	return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1198}
1199
1200static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1201{
1202	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1203
1204	return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1205}
1206
1207static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1208{
1209	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1210
1211	return dsi->current_cinfo.clkin4ddr / 16;
1212}
1213
1214static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1215{
1216	unsigned long r;
1217	int dsi_module = dsi_get_dsidev_id(dsidev);
1218	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1219
1220	if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1221		/* DSI FCLK source is DSS_CLK_FCK */
1222		r = clk_get_rate(dsi->dss_clk);
1223	} else {
1224		/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1225		r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1226	}
1227
1228	return r;
1229}
1230
1231static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1232{
1233	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1234	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1235	unsigned long dsi_fclk;
1236	unsigned lp_clk_div;
1237	unsigned long lp_clk;
1238
1239	lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1240
1241	if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1242		return -EINVAL;
1243
1244	dsi_fclk = dsi_fclk_rate(dsidev);
1245
1246	lp_clk = dsi_fclk / 2 / lp_clk_div;
1247
1248	DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1249	dsi->current_cinfo.lp_clk = lp_clk;
1250	dsi->current_cinfo.lp_clk_div = lp_clk_div;
1251
1252	/* LP_CLK_DIVISOR */
1253	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1254
1255	/* LP_RX_SYNCHRO_ENABLE */
1256	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1257
1258	return 0;
1259}
1260
1261static void dsi_enable_scp_clk(struct platform_device *dsidev)
1262{
1263	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1264
1265	if (dsi->scp_clk_refcount++ == 0)
1266		REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1267}
1268
1269static void dsi_disable_scp_clk(struct platform_device *dsidev)
1270{
1271	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1272
1273	WARN_ON(dsi->scp_clk_refcount == 0);
1274	if (--dsi->scp_clk_refcount == 0)
1275		REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1276}
1277
1278enum dsi_pll_power_state {
1279	DSI_PLL_POWER_OFF	= 0x0,
1280	DSI_PLL_POWER_ON_HSCLK	= 0x1,
1281	DSI_PLL_POWER_ON_ALL	= 0x2,
1282	DSI_PLL_POWER_ON_DIV	= 0x3,
1283};
1284
1285static int dsi_pll_power(struct platform_device *dsidev,
1286		enum dsi_pll_power_state state)
1287{
1288	int t = 0;
1289
1290	/* DSI-PLL power command 0x3 is not working */
1291	if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1292			state == DSI_PLL_POWER_ON_DIV)
1293		state = DSI_PLL_POWER_ON_ALL;
1294
1295	/* PLL_PWR_CMD */
1296	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1297
1298	/* PLL_PWR_STATUS */
1299	while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1300		if (++t > 1000) {
1301			DSSERR("Failed to set DSI PLL power mode to %d\n",
1302					state);
1303			return -ENODEV;
1304		}
1305		udelay(1);
1306	}
1307
1308	return 0;
1309}
1310
1311/* calculate clock rates using dividers in cinfo */
1312static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1313		struct dsi_clock_info *cinfo)
1314{
1315	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1316	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1317
1318	if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1319		return -EINVAL;
1320
1321	if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1322		return -EINVAL;
1323
1324	if (cinfo->regm_dispc > dsi->regm_dispc_max)
1325		return -EINVAL;
1326
1327	if (cinfo->regm_dsi > dsi->regm_dsi_max)
1328		return -EINVAL;
1329
1330	if (cinfo->use_sys_clk) {
1331		cinfo->clkin = clk_get_rate(dsi->sys_clk);
1332		/* XXX it is unclear if highfreq should be used
1333		 * with DSS_SYS_CLK source also */
1334		cinfo->highfreq = 0;
1335	} else {
1336		cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
1337
1338		if (cinfo->clkin < 32000000)
1339			cinfo->highfreq = 0;
1340		else
1341			cinfo->highfreq = 1;
1342	}
1343
1344	cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1345
1346	if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1347		return -EINVAL;
1348
1349	cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1350
1351	if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1352		return -EINVAL;
1353
1354	if (cinfo->regm_dispc > 0)
1355		cinfo->dsi_pll_hsdiv_dispc_clk =
1356			cinfo->clkin4ddr / cinfo->regm_dispc;
1357	else
1358		cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1359
1360	if (cinfo->regm_dsi > 0)
1361		cinfo->dsi_pll_hsdiv_dsi_clk =
1362			cinfo->clkin4ddr / cinfo->regm_dsi;
1363	else
1364		cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1365
1366	return 0;
1367}
1368
1369int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1370		unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1371		struct dispc_clock_info *dispc_cinfo)
1372{
1373	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1374	struct dsi_clock_info cur, best;
1375	struct dispc_clock_info best_dispc;
1376	int min_fck_per_pck;
1377	int match = 0;
1378	unsigned long dss_sys_clk, max_dss_fck;
1379
1380	dss_sys_clk = clk_get_rate(dsi->sys_clk);
1381
1382	max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1383
1384	if (req_pck == dsi->cache_req_pck &&
1385			dsi->cache_cinfo.clkin == dss_sys_clk) {
1386		DSSDBG("DSI clock info found from cache\n");
1387		*dsi_cinfo = dsi->cache_cinfo;
1388		dispc_find_clk_divs(is_tft, req_pck,
1389			dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1390		return 0;
1391	}
1392
1393	min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1394
1395	if (min_fck_per_pck &&
1396		req_pck * min_fck_per_pck > max_dss_fck) {
1397		DSSERR("Requested pixel clock not possible with the current "
1398				"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1399				"the constraint off.\n");
1400		min_fck_per_pck = 0;
1401	}
1402
1403	DSSDBG("dsi_pll_calc\n");
1404
1405retry:
1406	memset(&best, 0, sizeof(best));
1407	memset(&best_dispc, 0, sizeof(best_dispc));
1408
1409	memset(&cur, 0, sizeof(cur));
1410	cur.clkin = dss_sys_clk;
1411	cur.use_sys_clk = 1;
1412	cur.highfreq = 0;
1413
1414	/* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1415	/* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1416	/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1417	for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1418		if (cur.highfreq == 0)
1419			cur.fint = cur.clkin / cur.regn;
1420		else
1421			cur.fint = cur.clkin / (2 * cur.regn);
1422
1423		if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1424			continue;
1425
1426		/* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1427		for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1428			unsigned long a, b;
1429
1430			a = 2 * cur.regm * (cur.clkin/1000);
1431			b = cur.regn * (cur.highfreq + 1);
1432			cur.clkin4ddr = a / b * 1000;
1433
1434			if (cur.clkin4ddr > 1800 * 1000 * 1000)
1435				break;
1436
1437			/* dsi_pll_hsdiv_dispc_clk(MHz) =
1438			 * DSIPHY(MHz) / regm_dispc  < 173MHz/186Mhz */
1439			for (cur.regm_dispc = 1; cur.regm_dispc <
1440					dsi->regm_dispc_max; ++cur.regm_dispc) {
1441				struct dispc_clock_info cur_dispc;
1442				cur.dsi_pll_hsdiv_dispc_clk =
1443					cur.clkin4ddr / cur.regm_dispc;
1444
1445				/* this will narrow down the search a bit,
1446				 * but still give pixclocks below what was
1447				 * requested */
1448				if (cur.dsi_pll_hsdiv_dispc_clk  < req_pck)
1449					break;
1450
1451				if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
1452					continue;
1453
1454				if (min_fck_per_pck &&
1455					cur.dsi_pll_hsdiv_dispc_clk <
1456						req_pck * min_fck_per_pck)
1457					continue;
1458
1459				match = 1;
1460
1461				dispc_find_clk_divs(is_tft, req_pck,
1462						cur.dsi_pll_hsdiv_dispc_clk,
1463						&cur_dispc);
1464
1465				if (abs(cur_dispc.pck - req_pck) <
1466						abs(best_dispc.pck - req_pck)) {
1467					best = cur;
1468					best_dispc = cur_dispc;
1469
1470					if (cur_dispc.pck == req_pck)
1471						goto found;
1472				}
1473			}
1474		}
1475	}
1476found:
1477	if (!match) {
1478		if (min_fck_per_pck) {
1479			DSSERR("Could not find suitable clock settings.\n"
1480					"Turning FCK/PCK constraint off and"
1481					"trying again.\n");
1482			min_fck_per_pck = 0;
1483			goto retry;
1484		}
1485
1486		DSSERR("Could not find suitable clock settings.\n");
1487
1488		return -EINVAL;
1489	}
1490
1491	/* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
1492	best.regm_dsi = 0;
1493	best.dsi_pll_hsdiv_dsi_clk = 0;
1494
1495	if (dsi_cinfo)
1496		*dsi_cinfo = best;
1497	if (dispc_cinfo)
1498		*dispc_cinfo = best_dispc;
1499
1500	dsi->cache_req_pck = req_pck;
1501	dsi->cache_clk_freq = 0;
1502	dsi->cache_cinfo = best;
1503
1504	return 0;
1505}
1506
1507int dsi_pll_set_clock_div(struct platform_device *dsidev,
1508		struct dsi_clock_info *cinfo)
1509{
1510	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1511	int r = 0;
1512	u32 l;
1513	int f = 0;
1514	u8 regn_start, regn_end, regm_start, regm_end;
1515	u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1516
1517	DSSDBGF();
1518
1519	dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1520	dsi->current_cinfo.highfreq = cinfo->highfreq;
1521
1522	dsi->current_cinfo.fint = cinfo->fint;
1523	dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1524	dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1525			cinfo->dsi_pll_hsdiv_dispc_clk;
1526	dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1527			cinfo->dsi_pll_hsdiv_dsi_clk;
1528
1529	dsi->current_cinfo.regn = cinfo->regn;
1530	dsi->current_cinfo.regm = cinfo->regm;
1531	dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1532	dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1533
1534	DSSDBG("DSI Fint %ld\n", cinfo->fint);
1535
1536	DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1537			cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
1538			cinfo->clkin,
1539			cinfo->highfreq);
1540
1541	/* DSIPHY == CLKIN4DDR */
1542	DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1543			cinfo->regm,
1544			cinfo->regn,
1545			cinfo->clkin,
1546			cinfo->highfreq + 1,
1547			cinfo->clkin4ddr);
1548
1549	DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1550			cinfo->clkin4ddr / 1000 / 1000 / 2);
1551
1552	DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1553
1554	DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1555		dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1556		dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1557		cinfo->dsi_pll_hsdiv_dispc_clk);
1558	DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1559		dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1560		dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1561		cinfo->dsi_pll_hsdiv_dsi_clk);
1562
1563	dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
1564	dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, &regm_start, &regm_end);
1565	dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, &regm_dispc_start,
1566			&regm_dispc_end);
1567	dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
1568			&regm_dsi_end);
1569
1570	/* DSI_PLL_AUTOMODE = manual */
1571	REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1572
1573	l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1574	l = FLD_MOD(l, 1, 0, 0);		/* DSI_PLL_STOPMODE */
1575	/* DSI_PLL_REGN */
1576	l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1577	/* DSI_PLL_REGM */
1578	l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1579	/* DSI_CLOCK_DIV */
1580	l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1581			regm_dispc_start, regm_dispc_end);
1582	/* DSIPROTO_CLOCK_DIV */
1583	l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1584			regm_dsi_start, regm_dsi_end);
1585	dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1586
1587	BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1588
1589	if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1590		f = cinfo->fint < 1000000 ? 0x3 :
1591			cinfo->fint < 1250000 ? 0x4 :
1592			cinfo->fint < 1500000 ? 0x5 :
1593			cinfo->fint < 1750000 ? 0x6 :
1594			0x7;
1595	}
1596
1597	l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1598
1599	if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1600		l = FLD_MOD(l, f, 4, 1);	/* DSI_PLL_FREQSEL */
1601	l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1602			11, 11);		/* DSI_PLL_CLKSEL */
1603	l = FLD_MOD(l, cinfo->highfreq,
1604			12, 12);		/* DSI_PLL_HIGHFREQ */
1605	l = FLD_MOD(l, 1, 13, 13);		/* DSI_PLL_REFEN */
1606	l = FLD_MOD(l, 0, 14, 14);		/* DSIPHY_CLKINEN */
1607	l = FLD_MOD(l, 1, 20, 20);		/* DSI_HSDIVBYPASS */
1608
1609	if (cpu_is_omap44xx())
1610		l = FLD_MOD(l, 3, 22, 21);	/* DSI_REF_SEL */
1611	dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1612
1613	REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0);	/* DSI_PLL_GO */
1614
1615	if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1616		DSSERR("dsi pll go bit not going down.\n");
1617		r = -EIO;
1618		goto err;
1619	}
1620
1621	if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1622		DSSERR("cannot lock PLL\n");
1623		r = -EIO;
1624		goto err;
1625	}
1626
1627	dsi->pll_locked = 1;
1628
1629	l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1630	l = FLD_MOD(l, 0, 0, 0);	/* DSI_PLL_IDLE */
1631	l = FLD_MOD(l, 0, 5, 5);	/* DSI_PLL_PLLLPMODE */
1632	l = FLD_MOD(l, 0, 6, 6);	/* DSI_PLL_LOWCURRSTBY */
1633	l = FLD_MOD(l, 0, 7, 7);	/* DSI_PLL_TIGHTPHASELOCK */
1634	l = FLD_MOD(l, 0, 8, 8);	/* DSI_PLL_DRIFTGUARDEN */
1635	l = FLD_MOD(l, 0, 10, 9);	/* DSI_PLL_LOCKSEL */
1636	l = FLD_MOD(l, 1, 13, 13);	/* DSI_PLL_REFEN */
1637	l = FLD_MOD(l, 1, 14, 14);	/* DSIPHY_CLKINEN */
1638	l = FLD_MOD(l, 0, 15, 15);	/* DSI_BYPASSEN */
1639	l = FLD_MOD(l, 1, 16, 16);	/* DSS_CLOCK_EN */
1640	l = FLD_MOD(l, 0, 17, 17);	/* DSS_CLOCK_PWDN */
1641	l = FLD_MOD(l, 1, 18, 18);	/* DSI_PROTO_CLOCK_EN */
1642	l = FLD_MOD(l, 0, 19, 19);	/* DSI_PROTO_CLOCK_PWDN */
1643	l = FLD_MOD(l, 0, 20, 20);	/* DSI_HSDIVBYPASS */
1644	dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1645
1646	DSSDBG("PLL config done\n");
1647err:
1648	return r;
1649}
1650
1651int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1652		bool enable_hsdiv)
1653{
1654	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1655	int r = 0;
1656	enum dsi_pll_power_state pwstate;
1657
1658	DSSDBG("PLL init\n");
1659
1660	if (dsi->vdds_dsi_reg == NULL) {
1661		struct regulator *vdds_dsi;
1662
1663		vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1664
1665		if (IS_ERR(vdds_dsi)) {
1666			DSSERR("can't get VDDS_DSI regulator\n");
1667			return PTR_ERR(vdds_dsi);
1668		}
1669
1670		dsi->vdds_dsi_reg = vdds_dsi;
1671	}
1672
1673	dsi_enable_pll_clock(dsidev, 1);
1674	/*
1675	 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1676	 */
1677	dsi_enable_scp_clk(dsidev);
1678
1679	if (!dsi->vdds_dsi_enabled) {
1680		r = regulator_enable(dsi->vdds_dsi_reg);
1681		if (r)
1682			goto err0;
1683		dsi->vdds_dsi_enabled = true;
1684	}
1685
1686	/* XXX PLL does not come out of reset without this... */
1687	dispc_pck_free_enable(1);
1688
1689	if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1690		DSSERR("PLL not coming out of reset.\n");
1691		r = -ENODEV;
1692		dispc_pck_free_enable(0);
1693		goto err1;
1694	}
1695
1696	/* XXX ... but if left on, we get problems when planes do not
1697	 * fill the whole display. No idea about this */
1698	dispc_pck_free_enable(0);
1699
1700	if (enable_hsclk && enable_hsdiv)
1701		pwstate = DSI_PLL_POWER_ON_ALL;
1702	else if (enable_hsclk)
1703		pwstate = DSI_PLL_POWER_ON_HSCLK;
1704	else if (enable_hsdiv)
1705		pwstate = DSI_PLL_POWER_ON_DIV;
1706	else
1707		pwstate = DSI_PLL_POWER_OFF;
1708
1709	r = dsi_pll_power(dsidev, pwstate);
1710
1711	if (r)
1712		goto err1;
1713
1714	DSSDBG("PLL init done\n");
1715
1716	return 0;
1717err1:
1718	if (dsi->vdds_dsi_enabled) {
1719		regulator_disable(dsi->vdds_dsi_reg);
1720		dsi->vdds_dsi_enabled = false;
1721	}
1722err0:
1723	dsi_disable_scp_clk(dsidev);
1724	dsi_enable_pll_clock(dsidev, 0);
1725	return r;
1726}
1727
1728void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1729{
1730	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1731
1732	dsi->pll_locked = 0;
1733	dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1734	if (disconnect_lanes) {
1735		WARN_ON(!dsi->vdds_dsi_enabled);
1736		regulator_disable(dsi->vdds_dsi_reg);
1737		dsi->vdds_dsi_enabled = false;
1738	}
1739
1740	dsi_disable_scp_clk(dsidev);
1741	dsi_enable_pll_clock(dsidev, 0);
1742
1743	DSSDBG("PLL uninit done\n");
1744}
1745
1746static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1747		struct seq_file *s)
1748{
1749	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1750	struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1751	enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1752	int dsi_module = dsi_get_dsidev_id(dsidev);
1753
1754	dispc_clk_src = dss_get_dispc_clk_source();
1755	dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1756
1757	if (dsi_runtime_get(dsidev))
1758		return;
1759
1760	seq_printf(s,	"- DSI%d PLL -\n", dsi_module + 1);
1761
1762	seq_printf(s,	"dsi pll source = %s\n",
1763			cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1764
1765	seq_printf(s,	"Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1766
1767	seq_printf(s,	"CLKIN4DDR\t%-16luregm %u\n",
1768			cinfo->clkin4ddr, cinfo->regm);
1769
1770	seq_printf(s,	"%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
1771			dss_get_generic_clk_source_name(dispc_clk_src),
1772			dss_feat_get_clk_source_name(dispc_clk_src),
1773			cinfo->dsi_pll_hsdiv_dispc_clk,
1774			cinfo->regm_dispc,
1775			dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1776			"off" : "on");
1777
1778	seq_printf(s,	"%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
1779			dss_get_generic_clk_source_name(dsi_clk_src),
1780			dss_feat_get_clk_source_name(dsi_clk_src),
1781			cinfo->dsi_pll_hsdiv_dsi_clk,
1782			cinfo->regm_dsi,
1783			dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1784			"off" : "on");
1785
1786	seq_printf(s,	"- DSI%d -\n", dsi_module + 1);
1787
1788	seq_printf(s,	"dsi fclk source = %s (%s)\n",
1789			dss_get_generic_clk_source_name(dsi_clk_src),
1790			dss_feat_get_clk_source_name(dsi_clk_src));
1791
1792	seq_printf(s,	"DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1793
1794	seq_printf(s,	"DDR_CLK\t\t%lu\n",
1795			cinfo->clkin4ddr / 4);
1796
1797	seq_printf(s,	"TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1798
1799	seq_printf(s,	"LP_CLK\t\t%lu\n", cinfo->lp_clk);
1800
1801	dsi_runtime_put(dsidev);
1802}
1803
1804void dsi_dump_clocks(struct seq_file *s)
1805{
1806	struct platform_device *dsidev;
1807	int i;
1808
1809	for  (i = 0; i < MAX_NUM_DSI; i++) {
1810		dsidev = dsi_get_dsidev_from_id(i);
1811		if (dsidev)
1812			dsi_dump_dsidev_clocks(dsidev, s);
1813	}
1814}
1815
1816#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1817static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1818		struct seq_file *s)
1819{
1820	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1821	unsigned long flags;
1822	struct dsi_irq_stats stats;
1823	int dsi_module = dsi_get_dsidev_id(dsidev);
1824
1825	spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1826
1827	stats = dsi->irq_stats;
1828	memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1829	dsi->irq_stats.last_reset = jiffies;
1830
1831	spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1832
1833	seq_printf(s, "period %u ms\n",
1834			jiffies_to_msecs(jiffies - stats.last_reset));
1835
1836	seq_printf(s, "irqs %d\n", stats.irq_count);
1837#define PIS(x) \
1838	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1839
1840	seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
1841	PIS(VC0);
1842	PIS(VC1);
1843	PIS(VC2);
1844	PIS(VC3);
1845	PIS(WAKEUP);
1846	PIS(RESYNC);
1847	PIS(PLL_LOCK);
1848	PIS(PLL_UNLOCK);
1849	PIS(PLL_RECALL);
1850	PIS(COMPLEXIO_ERR);
1851	PIS(HS_TX_TIMEOUT);
1852	PIS(LP_RX_TIMEOUT);
1853	PIS(TE_TRIGGER);
1854	PIS(ACK_TRIGGER);
1855	PIS(SYNC_LOST);
1856	PIS(LDO_POWER_GOOD);
1857	PIS(TA_TIMEOUT);
1858#undef PIS
1859
1860#define PIS(x) \
1861	seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1862			stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1863			stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1864			stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1865			stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1866
1867	seq_printf(s, "-- VC interrupts --\n");
1868	PIS(CS);
1869	PIS(ECC_CORR);
1870	PIS(PACKET_SENT);
1871	PIS(FIFO_TX_OVF);
1872	PIS(FIFO_RX_OVF);
1873	PIS(BTA);
1874	PIS(ECC_NO_CORR);
1875	PIS(FIFO_TX_UDF);
1876	PIS(PP_BUSY_CHANGE);
1877#undef PIS
1878
1879#define PIS(x) \
1880	seq_printf(s, "%-20s %10d\n", #x, \
1881			stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1882
1883	seq_printf(s, "-- CIO interrupts --\n");
1884	PIS(ERRSYNCESC1);
1885	PIS(ERRSYNCESC2);
1886	PIS(ERRSYNCESC3);
1887	PIS(ERRESC1);
1888	PIS(ERRESC2);
1889	PIS(ERRESC3);
1890	PIS(ERRCONTROL1);
1891	PIS(ERRCONTROL2);
1892	PIS(ERRCONTROL3);
1893	PIS(STATEULPS1);
1894	PIS(STATEULPS2);
1895	PIS(STATEULPS3);
1896	PIS(ERRCONTENTIONLP0_1);
1897	PIS(ERRCONTENTIONLP1_1);
1898	PIS(ERRCONTENTIONLP0_2);
1899	PIS(ERRCONTENTIONLP1_2);
1900	PIS(ERRCONTENTIONLP0_3);
1901	PIS(ERRCONTENTIONLP1_3);
1902	PIS(ULPSACTIVENOT_ALL0);
1903	PIS(ULPSACTIVENOT_ALL1);
1904#undef PIS
1905}
1906
1907static void dsi1_dump_irqs(struct seq_file *s)
1908{
1909	struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1910
1911	dsi_dump_dsidev_irqs(dsidev, s);
1912}
1913
1914static void dsi2_dump_irqs(struct seq_file *s)
1915{
1916	struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1917
1918	dsi_dump_dsidev_irqs(dsidev, s);
1919}
1920
1921void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1922		const struct file_operations *debug_fops)
1923{
1924	struct platform_device *dsidev;
1925
1926	dsidev = dsi_get_dsidev_from_id(0);
1927	if (dsidev)
1928		debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1929			&dsi1_dump_irqs, debug_fops);
1930
1931	dsidev = dsi_get_dsidev_from_id(1);
1932	if (dsidev)
1933		debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1934			&dsi2_dump_irqs, debug_fops);
1935}
1936#endif
1937
1938static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1939		struct seq_file *s)
1940{
1941#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1942
1943	if (dsi_runtime_get(dsidev))
1944		return;
1945	dsi_enable_scp_clk(dsidev);
1946
1947	DUMPREG(DSI_REVISION);
1948	DUMPREG(DSI_SYSCONFIG);
1949	DUMPREG(DSI_SYSSTATUS);
1950	DUMPREG(DSI_IRQSTATUS);
1951	DUMPREG(DSI_IRQENABLE);
1952	DUMPREG(DSI_CTRL);
1953	DUMPREG(DSI_COMPLEXIO_CFG1);
1954	DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1955	DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1956	DUMPREG(DSI_CLK_CTRL);
1957	DUMPREG(DSI_TIMING1);
1958	DUMPREG(DSI_TIMING2);
1959	DUMPREG(DSI_VM_TIMING1);
1960	DUMPREG(DSI_VM_TIMING2);
1961	DUMPREG(DSI_VM_TIMING3);
1962	DUMPREG(DSI_CLK_TIMING);
1963	DUMPREG(DSI_TX_FIFO_VC_SIZE);
1964	DUMPREG(DSI_RX_FIFO_VC_SIZE);
1965	DUMPREG(DSI_COMPLEXIO_CFG2);
1966	DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1967	DUMPREG(DSI_VM_TIMING4);
1968	DUMPREG(DSI_TX_FIFO_VC_EMPTINESS

Large files files are truncated, but you can click here to view the full file