PageRenderTime 51ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 0ms

/sound/soc/sh/rcar/dma.c

https://bitbucket.org/mirror/linux
C | 875 lines | 600 code | 131 blank | 144 comment | 87 complexity | 765788fc0e2e1b73b680a05d31337fa5 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Renesas R-Car Audio DMAC support
  4. //
  5. // Copyright (C) 2015 Renesas Electronics Corp.
  6. // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  7. #include <linux/delay.h>
  8. #include <linux/of_dma.h>
  9. #include "rsnd.h"
  10. /*
  11. * Audio DMAC peri peri register
  12. */
  13. #define PDMASAR 0x00
  14. #define PDMADAR 0x04
  15. #define PDMACHCR 0x0c
  16. /* PDMACHCR */
  17. #define PDMACHCR_DE (1 << 0)
  18. struct rsnd_dmaen {
  19. struct dma_chan *chan;
  20. dma_cookie_t cookie;
  21. unsigned int dma_len;
  22. };
  23. struct rsnd_dmapp {
  24. int dmapp_id;
  25. u32 chcr;
  26. };
  27. struct rsnd_dma {
  28. struct rsnd_mod mod;
  29. struct rsnd_mod *mod_from;
  30. struct rsnd_mod *mod_to;
  31. dma_addr_t src_addr;
  32. dma_addr_t dst_addr;
  33. union {
  34. struct rsnd_dmaen en;
  35. struct rsnd_dmapp pp;
  36. } dma;
  37. };
  38. struct rsnd_dma_ctrl {
  39. void __iomem *base;
  40. int dmaen_num;
  41. int dmapp_num;
  42. };
  43. #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
  44. #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
  45. #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
  46. #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
  47. /* for DEBUG */
  48. static struct rsnd_mod_ops mem_ops = {
  49. .name = "mem",
  50. };
  51. static struct rsnd_mod mem = {
  52. };
  53. /*
  54. * Audio DMAC
  55. */
  56. static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
  57. struct rsnd_dai_stream *io)
  58. {
  59. if (rsnd_io_is_working(io))
  60. rsnd_dai_period_elapsed(io);
  61. }
  62. static void rsnd_dmaen_complete(void *data)
  63. {
  64. struct rsnd_mod *mod = data;
  65. rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
  66. }
  67. static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
  68. struct rsnd_mod *mod_from,
  69. struct rsnd_mod *mod_to)
  70. {
  71. if ((!mod_from && !mod_to) ||
  72. (mod_from && mod_to))
  73. return NULL;
  74. if (mod_from)
  75. return rsnd_mod_dma_req(io, mod_from);
  76. else
  77. return rsnd_mod_dma_req(io, mod_to);
  78. }
  79. static int rsnd_dmaen_stop(struct rsnd_mod *mod,
  80. struct rsnd_dai_stream *io,
  81. struct rsnd_priv *priv)
  82. {
  83. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  84. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  85. if (dmaen->chan)
  86. dmaengine_terminate_all(dmaen->chan);
  87. return 0;
  88. }
  89. static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
  90. struct rsnd_dai_stream *io,
  91. struct rsnd_priv *priv)
  92. {
  93. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  94. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  95. /*
  96. * DMAEngine release uses mutex lock.
  97. * Thus, it shouldn't be called under spinlock.
  98. * Let's call it under prepare
  99. */
  100. if (dmaen->chan)
  101. dma_release_channel(dmaen->chan);
  102. dmaen->chan = NULL;
  103. return 0;
  104. }
  105. static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
  106. struct rsnd_dai_stream *io,
  107. struct rsnd_priv *priv)
  108. {
  109. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  110. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  111. struct device *dev = rsnd_priv_to_dev(priv);
  112. /* maybe suspended */
  113. if (dmaen->chan)
  114. return 0;
  115. /*
  116. * DMAEngine request uses mutex lock.
  117. * Thus, it shouldn't be called under spinlock.
  118. * Let's call it under prepare
  119. */
  120. dmaen->chan = rsnd_dmaen_request_channel(io,
  121. dma->mod_from,
  122. dma->mod_to);
  123. if (IS_ERR_OR_NULL(dmaen->chan)) {
  124. dmaen->chan = NULL;
  125. dev_err(dev, "can't get dma channel\n");
  126. return -EIO;
  127. }
  128. return 0;
  129. }
  130. static int rsnd_dmaen_start(struct rsnd_mod *mod,
  131. struct rsnd_dai_stream *io,
  132. struct rsnd_priv *priv)
  133. {
  134. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  135. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  136. struct snd_pcm_substream *substream = io->substream;
  137. struct device *dev = rsnd_priv_to_dev(priv);
  138. struct dma_async_tx_descriptor *desc;
  139. struct dma_slave_config cfg = {};
  140. enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
  141. int is_play = rsnd_io_is_play(io);
  142. int ret;
  143. /*
  144. * in case of monaural data writing or reading through Audio-DMAC
  145. * data is always in Left Justified format, so both src and dst
  146. * DMA Bus width need to be set equal to physical data width.
  147. */
  148. if (rsnd_runtime_channel_original(io) == 1) {
  149. struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
  150. int bits = snd_pcm_format_physical_width(runtime->format);
  151. switch (bits) {
  152. case 8:
  153. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  154. break;
  155. case 16:
  156. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  157. break;
  158. case 32:
  159. buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
  160. break;
  161. default:
  162. dev_err(dev, "invalid format width %d\n", bits);
  163. return -EINVAL;
  164. }
  165. }
  166. cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
  167. cfg.src_addr = dma->src_addr;
  168. cfg.dst_addr = dma->dst_addr;
  169. cfg.src_addr_width = buswidth;
  170. cfg.dst_addr_width = buswidth;
  171. dev_dbg(dev, "%s %pad -> %pad\n",
  172. rsnd_mod_name(mod),
  173. &cfg.src_addr, &cfg.dst_addr);
  174. ret = dmaengine_slave_config(dmaen->chan, &cfg);
  175. if (ret < 0)
  176. return ret;
  177. desc = dmaengine_prep_dma_cyclic(dmaen->chan,
  178. substream->runtime->dma_addr,
  179. snd_pcm_lib_buffer_bytes(substream),
  180. snd_pcm_lib_period_bytes(substream),
  181. is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  182. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  183. if (!desc) {
  184. dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
  185. return -EIO;
  186. }
  187. desc->callback = rsnd_dmaen_complete;
  188. desc->callback_param = rsnd_mod_get(dma);
  189. dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
  190. dmaen->cookie = dmaengine_submit(desc);
  191. if (dmaen->cookie < 0) {
  192. dev_err(dev, "dmaengine_submit() fail\n");
  193. return -EIO;
  194. }
  195. dma_async_issue_pending(dmaen->chan);
  196. return 0;
  197. }
  198. struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
  199. struct rsnd_mod *mod, char *name)
  200. {
  201. struct dma_chan *chan = NULL;
  202. struct device_node *np;
  203. int i = 0;
  204. for_each_child_of_node(of_node, np) {
  205. if (i == rsnd_mod_id_raw(mod) && (!chan))
  206. chan = of_dma_request_slave_channel(np, name);
  207. i++;
  208. }
  209. /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
  210. of_node_put(of_node);
  211. return chan;
  212. }
  213. static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
  214. struct rsnd_dma *dma,
  215. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
  216. {
  217. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  218. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  219. struct dma_chan *chan;
  220. /* try to get DMAEngine channel */
  221. chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
  222. if (IS_ERR_OR_NULL(chan)) {
  223. /* Let's follow when -EPROBE_DEFER case */
  224. if (PTR_ERR(chan) == -EPROBE_DEFER)
  225. return PTR_ERR(chan);
  226. /*
  227. * DMA failed. try to PIO mode
  228. * see
  229. * rsnd_ssi_fallback()
  230. * rsnd_rdai_continuance_probe()
  231. */
  232. return -EAGAIN;
  233. }
  234. /*
  235. * use it for IPMMU if needed
  236. * see
  237. * rsnd_preallocate_pages()
  238. */
  239. io->dmac_dev = chan->device->dev;
  240. dma_release_channel(chan);
  241. dmac->dmaen_num++;
  242. return 0;
  243. }
  244. static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
  245. struct rsnd_dai_stream *io,
  246. snd_pcm_uframes_t *pointer)
  247. {
  248. struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
  249. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  250. struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
  251. struct dma_tx_state state;
  252. enum dma_status status;
  253. unsigned int pos = 0;
  254. status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
  255. if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
  256. if (state.residue > 0 && state.residue <= dmaen->dma_len)
  257. pos = dmaen->dma_len - state.residue;
  258. }
  259. *pointer = bytes_to_frames(runtime, pos);
  260. return 0;
  261. }
  262. static struct rsnd_mod_ops rsnd_dmaen_ops = {
  263. .name = "audmac",
  264. .prepare = rsnd_dmaen_prepare,
  265. .cleanup = rsnd_dmaen_cleanup,
  266. .start = rsnd_dmaen_start,
  267. .stop = rsnd_dmaen_stop,
  268. .pointer = rsnd_dmaen_pointer,
  269. .get_status = rsnd_mod_get_status,
  270. };
  271. /*
  272. * Audio DMAC peri peri
  273. */
  274. static const u8 gen2_id_table_ssiu[] = {
  275. /* SSI00 ~ SSI07 */
  276. 0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
  277. /* SSI10 ~ SSI17 */
  278. 0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
  279. /* SSI20 ~ SSI27 */
  280. 0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
  281. /* SSI30 ~ SSI37 */
  282. 0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
  283. /* SSI40 ~ SSI47 */
  284. 0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
  285. /* SSI5 */
  286. 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  287. /* SSI6 */
  288. 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  289. /* SSI7 */
  290. 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  291. /* SSI8 */
  292. 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  293. /* SSI90 ~ SSI97 */
  294. 0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
  295. };
  296. static const u8 gen2_id_table_scu[] = {
  297. 0x2d, /* SCU_SRCI0 */
  298. 0x2e, /* SCU_SRCI1 */
  299. 0x2f, /* SCU_SRCI2 */
  300. 0x30, /* SCU_SRCI3 */
  301. 0x31, /* SCU_SRCI4 */
  302. 0x32, /* SCU_SRCI5 */
  303. 0x33, /* SCU_SRCI6 */
  304. 0x34, /* SCU_SRCI7 */
  305. 0x35, /* SCU_SRCI8 */
  306. 0x36, /* SCU_SRCI9 */
  307. };
  308. static const u8 gen2_id_table_cmd[] = {
  309. 0x37, /* SCU_CMD0 */
  310. 0x38, /* SCU_CMD1 */
  311. };
  312. static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
  313. struct rsnd_mod *mod)
  314. {
  315. struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
  316. struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
  317. struct rsnd_mod *src = rsnd_io_to_mod_src(io);
  318. struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
  319. const u8 *entry = NULL;
  320. int id = 255;
  321. int size = 0;
  322. if ((mod == ssi) ||
  323. (mod == ssiu)) {
  324. int busif = rsnd_mod_id_sub(ssiu);
  325. entry = gen2_id_table_ssiu;
  326. size = ARRAY_SIZE(gen2_id_table_ssiu);
  327. id = (rsnd_mod_id(mod) * 8) + busif;
  328. } else if (mod == src) {
  329. entry = gen2_id_table_scu;
  330. size = ARRAY_SIZE(gen2_id_table_scu);
  331. id = rsnd_mod_id(mod);
  332. } else if (mod == dvc) {
  333. entry = gen2_id_table_cmd;
  334. size = ARRAY_SIZE(gen2_id_table_cmd);
  335. id = rsnd_mod_id(mod);
  336. }
  337. if ((!entry) || (size <= id)) {
  338. struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
  339. dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
  340. /* use non-prohibited SRS number as error */
  341. return 0x00; /* SSI00 */
  342. }
  343. return entry[id];
  344. }
  345. static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
  346. struct rsnd_mod *mod_from,
  347. struct rsnd_mod *mod_to)
  348. {
  349. return (rsnd_dmapp_get_id(io, mod_from) << 24) +
  350. (rsnd_dmapp_get_id(io, mod_to) << 16);
  351. }
  352. #define rsnd_dmapp_addr(dmac, dma, reg) \
  353. (dmac->base + 0x20 + reg + \
  354. (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
  355. static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
  356. {
  357. struct rsnd_mod *mod = rsnd_mod_get(dma);
  358. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  359. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  360. struct device *dev = rsnd_priv_to_dev(priv);
  361. dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
  362. iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
  363. }
  364. static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
  365. {
  366. struct rsnd_mod *mod = rsnd_mod_get(dma);
  367. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  368. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  369. return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
  370. }
  371. static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
  372. {
  373. struct rsnd_mod *mod = rsnd_mod_get(dma);
  374. struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
  375. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  376. void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
  377. u32 val = ioread32(addr);
  378. val &= ~mask;
  379. val |= (data & mask);
  380. iowrite32(val, addr);
  381. }
  382. static int rsnd_dmapp_stop(struct rsnd_mod *mod,
  383. struct rsnd_dai_stream *io,
  384. struct rsnd_priv *priv)
  385. {
  386. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  387. int i;
  388. rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
  389. for (i = 0; i < 1024; i++) {
  390. if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
  391. return 0;
  392. udelay(1);
  393. }
  394. return -EIO;
  395. }
  396. static int rsnd_dmapp_start(struct rsnd_mod *mod,
  397. struct rsnd_dai_stream *io,
  398. struct rsnd_priv *priv)
  399. {
  400. struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
  401. struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
  402. rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
  403. rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
  404. rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
  405. return 0;
  406. }
  407. static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
  408. struct rsnd_dma *dma,
  409. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
  410. {
  411. struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
  412. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  413. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  414. struct device *dev = rsnd_priv_to_dev(priv);
  415. dmapp->dmapp_id = dmac->dmapp_num;
  416. dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
  417. dmac->dmapp_num++;
  418. dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
  419. dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
  420. return 0;
  421. }
  422. static struct rsnd_mod_ops rsnd_dmapp_ops = {
  423. .name = "audmac-pp",
  424. .start = rsnd_dmapp_start,
  425. .stop = rsnd_dmapp_stop,
  426. .quit = rsnd_dmapp_stop,
  427. .get_status = rsnd_mod_get_status,
  428. };
  429. /*
  430. * Common DMAC Interface
  431. */
  432. /*
  433. * DMA read/write register offset
  434. *
  435. * RSND_xxx_I_N for Audio DMAC input
  436. * RSND_xxx_O_N for Audio DMAC output
  437. * RSND_xxx_I_P for Audio DMAC peri peri input
  438. * RSND_xxx_O_P for Audio DMAC peri peri output
  439. *
  440. * ex) R-Car H2 case
  441. * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
  442. * SSI : 0xec541000 / 0xec241008 / 0xec24100c
  443. * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
  444. * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
  445. * CMD : 0xec500000 / / 0xec008000 0xec308000
  446. */
  447. #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
  448. #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
  449. #define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
  450. #define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
  451. #define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
  452. #define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
  453. #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
  454. #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
  455. #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
  456. #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
  457. #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
  458. #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
  459. static dma_addr_t
  460. rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
  461. struct rsnd_mod *mod,
  462. int is_play, int is_from)
  463. {
  464. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  465. struct device *dev = rsnd_priv_to_dev(priv);
  466. phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
  467. phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
  468. int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
  469. !!(rsnd_io_to_mod_ssiu(io) == mod);
  470. int use_src = !!rsnd_io_to_mod_src(io);
  471. int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
  472. !!rsnd_io_to_mod_mix(io) ||
  473. !!rsnd_io_to_mod_ctu(io);
  474. int id = rsnd_mod_id(mod);
  475. int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
  476. struct dma_addr {
  477. dma_addr_t out_addr;
  478. dma_addr_t in_addr;
  479. } dma_addrs[3][2][3] = {
  480. /* SRC */
  481. /* Capture */
  482. {{{ 0, 0 },
  483. { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
  484. { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
  485. /* Playback */
  486. {{ 0, 0, },
  487. { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
  488. { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
  489. },
  490. /* SSI */
  491. /* Capture */
  492. {{{ RDMA_SSI_O_N(ssi, id), 0 },
  493. { RDMA_SSIU_O_P(ssi, id, busif), 0 },
  494. { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
  495. /* Playback */
  496. {{ 0, RDMA_SSI_I_N(ssi, id) },
  497. { 0, RDMA_SSIU_I_P(ssi, id, busif) },
  498. { 0, RDMA_SSIU_I_P(ssi, id, busif) } }
  499. },
  500. /* SSIU */
  501. /* Capture */
  502. {{{ RDMA_SSIU_O_N(ssi, id, busif), 0 },
  503. { RDMA_SSIU_O_P(ssi, id, busif), 0 },
  504. { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
  505. /* Playback */
  506. {{ 0, RDMA_SSIU_I_N(ssi, id, busif) },
  507. { 0, RDMA_SSIU_I_P(ssi, id, busif) },
  508. { 0, RDMA_SSIU_I_P(ssi, id, busif) } } },
  509. };
  510. /*
  511. * FIXME
  512. *
  513. * We can't support SSI9-4/5/6/7, because its address is
  514. * out of calculation rule
  515. */
  516. if ((id == 9) && (busif >= 4))
  517. dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
  518. id, busif);
  519. /* it shouldn't happen */
  520. if (use_cmd && !use_src)
  521. dev_err(dev, "DVC is selected without SRC\n");
  522. /* use SSIU or SSI ? */
  523. if (is_ssi && rsnd_ssi_use_busif(io))
  524. is_ssi++;
  525. return (is_from) ?
  526. dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
  527. dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
  528. }
  529. static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
  530. struct rsnd_mod *mod,
  531. int is_play, int is_from)
  532. {
  533. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  534. /*
  535. * gen1 uses default DMA addr
  536. */
  537. if (rsnd_is_gen1(priv))
  538. return 0;
  539. if (!mod)
  540. return 0;
  541. return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
  542. }
  543. #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
  544. static void rsnd_dma_of_path(struct rsnd_mod *this,
  545. struct rsnd_dai_stream *io,
  546. int is_play,
  547. struct rsnd_mod **mod_from,
  548. struct rsnd_mod **mod_to)
  549. {
  550. struct rsnd_mod *ssi;
  551. struct rsnd_mod *src = rsnd_io_to_mod_src(io);
  552. struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
  553. struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
  554. struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
  555. struct rsnd_mod *mod[MOD_MAX];
  556. struct rsnd_mod *mod_start, *mod_end;
  557. struct rsnd_priv *priv = rsnd_mod_to_priv(this);
  558. struct device *dev = rsnd_priv_to_dev(priv);
  559. int nr, i, idx;
  560. /*
  561. * It should use "rcar_sound,ssiu" on DT.
  562. * But, we need to keep compatibility for old version.
  563. *
  564. * If it has "rcar_sound.ssiu", it will be used.
  565. * If not, "rcar_sound.ssi" will be used.
  566. * see
  567. * rsnd_ssiu_dma_req()
  568. * rsnd_ssi_dma_req()
  569. */
  570. if (rsnd_ssiu_of_node(priv)) {
  571. struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
  572. /* use SSIU */
  573. ssi = ssiu;
  574. if (this == rsnd_io_to_mod_ssi(io))
  575. this = ssiu;
  576. } else {
  577. /* keep compatible, use SSI */
  578. ssi = rsnd_io_to_mod_ssi(io);
  579. }
  580. if (!ssi)
  581. return;
  582. nr = 0;
  583. for (i = 0; i < MOD_MAX; i++) {
  584. mod[i] = NULL;
  585. nr += !!rsnd_io_to_mod(io, i);
  586. }
  587. /*
  588. * [S] -*-> [E]
  589. * [S] -*-> SRC -o-> [E]
  590. * [S] -*-> SRC -> DVC -o-> [E]
  591. * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
  592. *
  593. * playback [S] = mem
  594. * [E] = SSI
  595. *
  596. * capture [S] = SSI
  597. * [E] = mem
  598. *
  599. * -*-> Audio DMAC
  600. * -o-> Audio DMAC peri peri
  601. */
  602. mod_start = (is_play) ? NULL : ssi;
  603. mod_end = (is_play) ? ssi : NULL;
  604. idx = 0;
  605. mod[idx++] = mod_start;
  606. for (i = 1; i < nr; i++) {
  607. if (src) {
  608. mod[idx++] = src;
  609. src = NULL;
  610. } else if (ctu) {
  611. mod[idx++] = ctu;
  612. ctu = NULL;
  613. } else if (mix) {
  614. mod[idx++] = mix;
  615. mix = NULL;
  616. } else if (dvc) {
  617. mod[idx++] = dvc;
  618. dvc = NULL;
  619. }
  620. }
  621. mod[idx] = mod_end;
  622. /*
  623. * | SSI | SRC |
  624. * -------------+-----+-----+
  625. * is_play | o | * |
  626. * !is_play | * | o |
  627. */
  628. if ((this == ssi) == (is_play)) {
  629. *mod_from = mod[idx - 1];
  630. *mod_to = mod[idx];
  631. } else {
  632. *mod_from = mod[0];
  633. *mod_to = mod[1];
  634. }
  635. dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
  636. for (i = 0; i <= idx; i++) {
  637. dev_dbg(dev, " %s%s\n",
  638. rsnd_mod_name(mod[i] ? mod[i] : &mem),
  639. (mod[i] == *mod_from) ? " from" :
  640. (mod[i] == *mod_to) ? " to" : "");
  641. }
  642. }
  643. static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
  644. struct rsnd_mod **dma_mod)
  645. {
  646. struct rsnd_mod *mod_from = NULL;
  647. struct rsnd_mod *mod_to = NULL;
  648. struct rsnd_priv *priv = rsnd_io_to_priv(io);
  649. struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
  650. struct device *dev = rsnd_priv_to_dev(priv);
  651. struct rsnd_dma *dma;
  652. struct rsnd_mod_ops *ops;
  653. enum rsnd_mod_type type;
  654. int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
  655. struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
  656. int is_play = rsnd_io_is_play(io);
  657. int ret, dma_id;
  658. /*
  659. * DMA failed. try to PIO mode
  660. * see
  661. * rsnd_ssi_fallback()
  662. * rsnd_rdai_continuance_probe()
  663. */
  664. if (!dmac)
  665. return -EAGAIN;
  666. rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
  667. /* for Gen2 or later */
  668. if (mod_from && mod_to) {
  669. ops = &rsnd_dmapp_ops;
  670. attach = rsnd_dmapp_attach;
  671. dma_id = dmac->dmapp_num;
  672. type = RSND_MOD_AUDMAPP;
  673. } else {
  674. ops = &rsnd_dmaen_ops;
  675. attach = rsnd_dmaen_attach;
  676. dma_id = dmac->dmaen_num;
  677. type = RSND_MOD_AUDMA;
  678. }
  679. /* for Gen1, overwrite */
  680. if (rsnd_is_gen1(priv)) {
  681. ops = &rsnd_dmaen_ops;
  682. attach = rsnd_dmaen_attach;
  683. dma_id = dmac->dmaen_num;
  684. type = RSND_MOD_AUDMA;
  685. }
  686. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  687. if (!dma)
  688. return -ENOMEM;
  689. *dma_mod = rsnd_mod_get(dma);
  690. ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
  691. type, dma_id);
  692. if (ret < 0)
  693. return ret;
  694. dev_dbg(dev, "%s %s -> %s\n",
  695. rsnd_mod_name(*dma_mod),
  696. rsnd_mod_name(mod_from ? mod_from : &mem),
  697. rsnd_mod_name(mod_to ? mod_to : &mem));
  698. ret = attach(io, dma, mod_from, mod_to);
  699. if (ret < 0)
  700. return ret;
  701. dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
  702. dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
  703. dma->mod_from = mod_from;
  704. dma->mod_to = mod_to;
  705. return 0;
  706. }
  707. int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
  708. struct rsnd_mod **dma_mod)
  709. {
  710. if (!(*dma_mod)) {
  711. int ret = rsnd_dma_alloc(io, mod, dma_mod);
  712. if (ret < 0)
  713. return ret;
  714. }
  715. return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
  716. }
  717. int rsnd_dma_probe(struct rsnd_priv *priv)
  718. {
  719. struct platform_device *pdev = rsnd_priv_to_pdev(priv);
  720. struct device *dev = rsnd_priv_to_dev(priv);
  721. struct rsnd_dma_ctrl *dmac;
  722. struct resource *res;
  723. /*
  724. * for Gen1
  725. */
  726. if (rsnd_is_gen1(priv))
  727. return 0;
  728. /*
  729. * for Gen2 or later
  730. */
  731. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
  732. dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
  733. if (!dmac || !res) {
  734. dev_err(dev, "dma allocate failed\n");
  735. return 0; /* it will be PIO mode */
  736. }
  737. dmac->dmapp_num = 0;
  738. dmac->base = devm_ioremap_resource(dev, res);
  739. if (IS_ERR(dmac->base))
  740. return PTR_ERR(dmac->base);
  741. priv->dma = dmac;
  742. /* dummy mem mod for debug */
  743. return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
  744. }