PageRenderTime 62ms CodeModel.GetById 38ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/thunderbolt/dma_port.c

https://github.com/acmel/linux
C | 519 lines | 343 code | 86 blank | 90 comment | 50 complexity | c41f5ee8829d2e53f9adb9de68b363ee MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt DMA configuration based mailbox support
  4. *
  5. * Copyright (C) 2017, Intel Corporation
  6. * Authors: Michael Jamet <michael.jamet@intel.com>
  7. * Mika Westerberg <mika.westerberg@linux.intel.com>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include "dma_port.h"
  12. #include "tb_regs.h"
  13. #define DMA_PORT_CAP 0x3e
  14. #define MAIL_DATA 1
  15. #define MAIL_DATA_DWORDS 16
  16. #define MAIL_IN 17
  17. #define MAIL_IN_CMD_SHIFT 28
  18. #define MAIL_IN_CMD_MASK GENMASK(31, 28)
  19. #define MAIL_IN_CMD_FLASH_WRITE 0x0
  20. #define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
  21. #define MAIL_IN_CMD_FLASH_READ 0x2
  22. #define MAIL_IN_CMD_POWER_CYCLE 0x4
  23. #define MAIL_IN_DWORDS_SHIFT 24
  24. #define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
  25. #define MAIL_IN_ADDRESS_SHIFT 2
  26. #define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
  27. #define MAIL_IN_CSS BIT(1)
  28. #define MAIL_IN_OP_REQUEST BIT(0)
  29. #define MAIL_OUT 18
  30. #define MAIL_OUT_STATUS_RESPONSE BIT(29)
  31. #define MAIL_OUT_STATUS_CMD_SHIFT 4
  32. #define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
  33. #define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
  34. #define MAIL_OUT_STATUS_COMPLETED 0
  35. #define MAIL_OUT_STATUS_ERR_AUTH 1
  36. #define MAIL_OUT_STATUS_ERR_ACCESS 2
  37. #define DMA_PORT_TIMEOUT 5000 /* ms */
  38. #define DMA_PORT_RETRIES 3
  39. /**
  40. * struct tb_dma_port - DMA control port
  41. * @sw: Switch the DMA port belongs to
  42. * @port: Switch port number where DMA capability is found
  43. * @base: Start offset of the mailbox registers
  44. * @buf: Temporary buffer to store a single block
  45. */
  46. struct tb_dma_port {
  47. struct tb_switch *sw;
  48. u8 port;
  49. u32 base;
  50. u8 *buf;
  51. };
  52. /*
  53. * When the switch is in safe mode it supports very little functionality
  54. * so we don't validate that much here.
  55. */
  56. static bool dma_port_match(const struct tb_cfg_request *req,
  57. const struct ctl_pkg *pkg)
  58. {
  59. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  60. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  61. return true;
  62. if (pkg->frame.eof != req->response_type)
  63. return false;
  64. if (route != tb_cfg_get_route(req->request))
  65. return false;
  66. if (pkg->frame.size != req->response_size)
  67. return false;
  68. return true;
  69. }
  70. static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  71. {
  72. memcpy(req->response, pkg->buffer, req->response_size);
  73. return true;
  74. }
  75. static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
  76. u32 port, u32 offset, u32 length, int timeout_msec)
  77. {
  78. struct cfg_read_pkg request = {
  79. .header = tb_cfg_make_header(route),
  80. .addr = {
  81. .seq = 1,
  82. .port = port,
  83. .space = TB_CFG_PORT,
  84. .offset = offset,
  85. .length = length,
  86. },
  87. };
  88. struct tb_cfg_request *req;
  89. struct cfg_write_pkg reply;
  90. struct tb_cfg_result res;
  91. req = tb_cfg_request_alloc();
  92. if (!req)
  93. return -ENOMEM;
  94. req->match = dma_port_match;
  95. req->copy = dma_port_copy;
  96. req->request = &request;
  97. req->request_size = sizeof(request);
  98. req->request_type = TB_CFG_PKG_READ;
  99. req->response = &reply;
  100. req->response_size = 12 + 4 * length;
  101. req->response_type = TB_CFG_PKG_READ;
  102. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  103. tb_cfg_request_put(req);
  104. if (res.err)
  105. return res.err;
  106. memcpy(buffer, &reply.data, 4 * length);
  107. return 0;
  108. }
  109. static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
  110. u32 port, u32 offset, u32 length, int timeout_msec)
  111. {
  112. struct cfg_write_pkg request = {
  113. .header = tb_cfg_make_header(route),
  114. .addr = {
  115. .seq = 1,
  116. .port = port,
  117. .space = TB_CFG_PORT,
  118. .offset = offset,
  119. .length = length,
  120. },
  121. };
  122. struct tb_cfg_request *req;
  123. struct cfg_read_pkg reply;
  124. struct tb_cfg_result res;
  125. memcpy(&request.data, buffer, length * 4);
  126. req = tb_cfg_request_alloc();
  127. if (!req)
  128. return -ENOMEM;
  129. req->match = dma_port_match;
  130. req->copy = dma_port_copy;
  131. req->request = &request;
  132. req->request_size = 12 + 4 * length;
  133. req->request_type = TB_CFG_PKG_WRITE;
  134. req->response = &reply;
  135. req->response_size = sizeof(reply);
  136. req->response_type = TB_CFG_PKG_WRITE;
  137. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  138. tb_cfg_request_put(req);
  139. return res.err;
  140. }
  141. static int dma_find_port(struct tb_switch *sw)
  142. {
  143. static const int ports[] = { 3, 5, 7 };
  144. int i;
  145. /*
  146. * The DMA (NHI) port is either 3, 5 or 7 depending on the
  147. * controller. Try all of them.
  148. */
  149. for (i = 0; i < ARRAY_SIZE(ports); i++) {
  150. u32 type;
  151. int ret;
  152. ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
  153. 2, 1, DMA_PORT_TIMEOUT);
  154. if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
  155. return ports[i];
  156. }
  157. return -ENODEV;
  158. }
  159. /**
  160. * dma_port_alloc() - Finds DMA control port from a switch pointed by route
  161. * @sw: Switch from where find the DMA port
  162. *
  163. * Function checks if the switch NHI port supports DMA configuration
  164. * based mailbox capability and if it does, allocates and initializes
  165. * DMA port structure. Returns %NULL if the capabity was not found.
  166. *
  167. * The DMA control port is functional also when the switch is in safe
  168. * mode.
  169. */
  170. struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
  171. {
  172. struct tb_dma_port *dma;
  173. int port;
  174. port = dma_find_port(sw);
  175. if (port < 0)
  176. return NULL;
  177. dma = kzalloc(sizeof(*dma), GFP_KERNEL);
  178. if (!dma)
  179. return NULL;
  180. dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
  181. if (!dma->buf) {
  182. kfree(dma);
  183. return NULL;
  184. }
  185. dma->sw = sw;
  186. dma->port = port;
  187. dma->base = DMA_PORT_CAP;
  188. return dma;
  189. }
  190. /**
  191. * dma_port_free() - Release DMA control port structure
  192. * @dma: DMA control port
  193. */
  194. void dma_port_free(struct tb_dma_port *dma)
  195. {
  196. if (dma) {
  197. kfree(dma->buf);
  198. kfree(dma);
  199. }
  200. }
  201. static int dma_port_wait_for_completion(struct tb_dma_port *dma,
  202. unsigned int timeout)
  203. {
  204. unsigned long end = jiffies + msecs_to_jiffies(timeout);
  205. struct tb_switch *sw = dma->sw;
  206. do {
  207. int ret;
  208. u32 in;
  209. ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
  210. dma->base + MAIL_IN, 1, 50);
  211. if (ret) {
  212. if (ret != -ETIMEDOUT)
  213. return ret;
  214. } else if (!(in & MAIL_IN_OP_REQUEST)) {
  215. return 0;
  216. }
  217. usleep_range(50, 100);
  218. } while (time_before(jiffies, end));
  219. return -ETIMEDOUT;
  220. }
  221. static int status_to_errno(u32 status)
  222. {
  223. switch (status & MAIL_OUT_STATUS_MASK) {
  224. case MAIL_OUT_STATUS_COMPLETED:
  225. return 0;
  226. case MAIL_OUT_STATUS_ERR_AUTH:
  227. return -EINVAL;
  228. case MAIL_OUT_STATUS_ERR_ACCESS:
  229. return -EACCES;
  230. }
  231. return -EIO;
  232. }
  233. static int dma_port_request(struct tb_dma_port *dma, u32 in,
  234. unsigned int timeout)
  235. {
  236. struct tb_switch *sw = dma->sw;
  237. u32 out;
  238. int ret;
  239. ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
  240. dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
  241. if (ret)
  242. return ret;
  243. ret = dma_port_wait_for_completion(dma, timeout);
  244. if (ret)
  245. return ret;
  246. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  247. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  248. if (ret)
  249. return ret;
  250. return status_to_errno(out);
  251. }
  252. static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
  253. void *buf, u32 size)
  254. {
  255. struct tb_switch *sw = dma->sw;
  256. u32 in, dwaddress, dwords;
  257. int ret;
  258. dwaddress = address / 4;
  259. dwords = size / 4;
  260. in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
  261. if (dwords < MAIL_DATA_DWORDS)
  262. in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  263. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  264. in |= MAIL_IN_OP_REQUEST;
  265. ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  266. if (ret)
  267. return ret;
  268. return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
  269. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  270. }
  271. static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
  272. const void *buf, u32 size)
  273. {
  274. struct tb_switch *sw = dma->sw;
  275. u32 in, dwaddress, dwords;
  276. int ret;
  277. dwords = size / 4;
  278. /* Write the block to MAIL_DATA registers */
  279. ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
  280. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  281. in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
  282. /* CSS header write is always done to the same magic address */
  283. if (address >= DMA_PORT_CSS_ADDRESS) {
  284. dwaddress = DMA_PORT_CSS_ADDRESS;
  285. in |= MAIL_IN_CSS;
  286. } else {
  287. dwaddress = address / 4;
  288. }
  289. in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  290. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  291. in |= MAIL_IN_OP_REQUEST;
  292. return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  293. }
  294. /**
  295. * dma_port_flash_read() - Read from active flash region
  296. * @dma: DMA control port
  297. * @address: Address relative to the start of active region
  298. * @buf: Buffer where the data is read
  299. * @size: Size of the buffer
  300. */
  301. int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
  302. void *buf, size_t size)
  303. {
  304. unsigned int retries = DMA_PORT_RETRIES;
  305. unsigned int offset;
  306. offset = address & 3;
  307. address = address & ~3;
  308. do {
  309. u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
  310. int ret;
  311. ret = dma_port_flash_read_block(dma, address, dma->buf,
  312. ALIGN(nbytes, 4));
  313. if (ret) {
  314. if (ret == -ETIMEDOUT) {
  315. if (retries--)
  316. continue;
  317. ret = -EIO;
  318. }
  319. return ret;
  320. }
  321. memcpy(buf, dma->buf + offset, nbytes);
  322. size -= nbytes;
  323. address += nbytes;
  324. buf += nbytes;
  325. } while (size > 0);
  326. return 0;
  327. }
  328. /**
  329. * dma_port_flash_write() - Write to non-active flash region
  330. * @dma: DMA control port
  331. * @address: Address relative to the start of non-active region
  332. * @buf: Data to write
  333. * @size: Size of the buffer
  334. *
  335. * Writes block of data to the non-active flash region of the switch. If
  336. * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
  337. * using CSS command.
  338. */
  339. int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
  340. const void *buf, size_t size)
  341. {
  342. unsigned int retries = DMA_PORT_RETRIES;
  343. unsigned int offset;
  344. if (address >= DMA_PORT_CSS_ADDRESS) {
  345. offset = 0;
  346. if (size > DMA_PORT_CSS_MAX_SIZE)
  347. return -E2BIG;
  348. } else {
  349. offset = address & 3;
  350. address = address & ~3;
  351. }
  352. do {
  353. u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
  354. int ret;
  355. memcpy(dma->buf + offset, buf, nbytes);
  356. ret = dma_port_flash_write_block(dma, address, buf, nbytes);
  357. if (ret) {
  358. if (ret == -ETIMEDOUT) {
  359. if (retries--)
  360. continue;
  361. ret = -EIO;
  362. }
  363. return ret;
  364. }
  365. size -= nbytes;
  366. address += nbytes;
  367. buf += nbytes;
  368. } while (size > 0);
  369. return 0;
  370. }
  371. /**
  372. * dma_port_flash_update_auth() - Starts flash authenticate cycle
  373. * @dma: DMA control port
  374. *
  375. * Starts the flash update authentication cycle. If the image in the
  376. * non-active area was valid, the switch starts upgrade process where
  377. * active and non-active area get swapped in the end. Caller should call
  378. * dma_port_flash_update_auth_status() to get status of this command.
  379. * This is because if the switch in question is root switch the
  380. * thunderbolt host controller gets reset as well.
  381. */
  382. int dma_port_flash_update_auth(struct tb_dma_port *dma)
  383. {
  384. u32 in;
  385. in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
  386. in |= MAIL_IN_OP_REQUEST;
  387. return dma_port_request(dma, in, 150);
  388. }
  389. /**
  390. * dma_port_flash_update_auth_status() - Reads status of update auth command
  391. * @dma: DMA control port
  392. * @status: Status code of the operation
  393. *
  394. * The function checks if there is status available from the last update
  395. * auth command. Returns %0 if there is no status and no further
  396. * action is required. If there is status, %1 is returned instead and
  397. * @status holds the failure code.
  398. *
  399. * Negative return means there was an error reading status from the
  400. * switch.
  401. */
  402. int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
  403. {
  404. struct tb_switch *sw = dma->sw;
  405. u32 out, cmd;
  406. int ret;
  407. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  408. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  409. if (ret)
  410. return ret;
  411. /* Check if the status relates to flash update auth */
  412. cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
  413. if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
  414. if (status)
  415. *status = out & MAIL_OUT_STATUS_MASK;
  416. /* Reset is needed in any case */
  417. return 1;
  418. }
  419. return 0;
  420. }
  421. /**
  422. * dma_port_power_cycle() - Power cycles the switch
  423. * @dma: DMA control port
  424. *
  425. * Triggers power cycle to the switch.
  426. */
  427. int dma_port_power_cycle(struct tb_dma_port *dma)
  428. {
  429. u32 in;
  430. in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
  431. in |= MAIL_IN_OP_REQUEST;
  432. return dma_port_request(dma, in, 150);
  433. }