PageRenderTime 32ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/memstick/core/ms_block.c

https://bitbucket.org/alfredchen/linux-gc
C | 2359 lines | 1737 code | 482 blank | 140 comment | 318 complexity | 978ab8bb5ac7091bdc6c9b23c9b7f4f4 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * ms_block.c - Sony MemoryStick (legacy) storage support
  3. * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * Minor portions of the driver were copied from mspro_block.c which is
  10. * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
  11. *
  12. */
  13. #define DRIVER_NAME "ms_block"
  14. #define pr_fmt(fmt) DRIVER_NAME ": " fmt
  15. #include <linux/module.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/memstick.h>
  18. #include <linux/idr.h>
  19. #include <linux/hdreg.h>
  20. #include <linux/delay.h>
  21. #include <linux/slab.h>
  22. #include <linux/random.h>
  23. #include <linux/bitmap.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/jiffies.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/mutex.h>
  28. #include "ms_block.h"
  29. static int debug;
  30. static int cache_flush_timeout = 1000;
  31. static bool verify_writes;
  32. /*
  33. * Copies section of 'sg_from' starting from offset 'offset' and with length
  34. * 'len' To another scatterlist of to_nents enties
  35. */
  36. static size_t msb_sg_copy(struct scatterlist *sg_from,
  37. struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
  38. {
  39. size_t copied = 0;
  40. while (offset > 0) {
  41. if (offset >= sg_from->length) {
  42. if (sg_is_last(sg_from))
  43. return 0;
  44. offset -= sg_from->length;
  45. sg_from = sg_next(sg_from);
  46. continue;
  47. }
  48. copied = min(len, sg_from->length - offset);
  49. sg_set_page(sg_to, sg_page(sg_from),
  50. copied, sg_from->offset + offset);
  51. len -= copied;
  52. offset = 0;
  53. if (sg_is_last(sg_from) || !len)
  54. goto out;
  55. sg_to = sg_next(sg_to);
  56. to_nents--;
  57. sg_from = sg_next(sg_from);
  58. }
  59. while (len > sg_from->length && to_nents--) {
  60. len -= sg_from->length;
  61. copied += sg_from->length;
  62. sg_set_page(sg_to, sg_page(sg_from),
  63. sg_from->length, sg_from->offset);
  64. if (sg_is_last(sg_from) || !len)
  65. goto out;
  66. sg_from = sg_next(sg_from);
  67. sg_to = sg_next(sg_to);
  68. }
  69. if (len && to_nents) {
  70. sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
  71. copied += len;
  72. }
  73. out:
  74. sg_mark_end(sg_to);
  75. return copied;
  76. }
  77. /*
  78. * Compares section of 'sg' starting from offset 'offset' and with length 'len'
  79. * to linear buffer of length 'len' at address 'buffer'
  80. * Returns 0 if equal and -1 otherwice
  81. */
  82. static int msb_sg_compare_to_buffer(struct scatterlist *sg,
  83. size_t offset, u8 *buffer, size_t len)
  84. {
  85. int retval = 0, cmplen;
  86. struct sg_mapping_iter miter;
  87. sg_miter_start(&miter, sg, sg_nents(sg),
  88. SG_MITER_ATOMIC | SG_MITER_FROM_SG);
  89. while (sg_miter_next(&miter) && len > 0) {
  90. if (offset >= miter.length) {
  91. offset -= miter.length;
  92. continue;
  93. }
  94. cmplen = min(miter.length - offset, len);
  95. retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
  96. if (retval)
  97. break;
  98. buffer += cmplen;
  99. len -= cmplen;
  100. offset = 0;
  101. }
  102. if (!retval && len)
  103. retval = -1;
  104. sg_miter_stop(&miter);
  105. return retval;
  106. }
  107. /* Get zone at which block with logical address 'lba' lives
  108. * Flash is broken into zones.
  109. * Each zone consists of 512 eraseblocks, out of which in first
  110. * zone 494 are used and 496 are for all following zones.
  111. * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
  112. */
  113. static int msb_get_zone_from_lba(int lba)
  114. {
  115. if (lba < 494)
  116. return 0;
  117. return ((lba - 494) / 496) + 1;
  118. }
  119. /* Get zone of physical block. Trivial */
  120. static int msb_get_zone_from_pba(int pba)
  121. {
  122. return pba / MS_BLOCKS_IN_ZONE;
  123. }
  124. /* Debug test to validate free block counts */
  125. static int msb_validate_used_block_bitmap(struct msb_data *msb)
  126. {
  127. int total_free_blocks = 0;
  128. int i;
  129. if (!debug)
  130. return 0;
  131. for (i = 0; i < msb->zone_count; i++)
  132. total_free_blocks += msb->free_block_count[i];
  133. if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
  134. msb->block_count) == total_free_blocks)
  135. return 0;
  136. pr_err("BUG: free block counts don't match the bitmap");
  137. msb->read_only = true;
  138. return -EINVAL;
  139. }
  140. /* Mark physical block as used */
  141. static void msb_mark_block_used(struct msb_data *msb, int pba)
  142. {
  143. int zone = msb_get_zone_from_pba(pba);
  144. if (test_bit(pba, msb->used_blocks_bitmap)) {
  145. pr_err(
  146. "BUG: attempt to mark already used pba %d as used", pba);
  147. msb->read_only = true;
  148. return;
  149. }
  150. if (msb_validate_used_block_bitmap(msb))
  151. return;
  152. /* No races because all IO is single threaded */
  153. __set_bit(pba, msb->used_blocks_bitmap);
  154. msb->free_block_count[zone]--;
  155. }
  156. /* Mark physical block as free */
  157. static void msb_mark_block_unused(struct msb_data *msb, int pba)
  158. {
  159. int zone = msb_get_zone_from_pba(pba);
  160. if (!test_bit(pba, msb->used_blocks_bitmap)) {
  161. pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
  162. msb->read_only = true;
  163. return;
  164. }
  165. if (msb_validate_used_block_bitmap(msb))
  166. return;
  167. /* No races because all IO is single threaded */
  168. __clear_bit(pba, msb->used_blocks_bitmap);
  169. msb->free_block_count[zone]++;
  170. }
  171. /* Invalidate current register window */
  172. static void msb_invalidate_reg_window(struct msb_data *msb)
  173. {
  174. msb->reg_addr.w_offset = offsetof(struct ms_register, id);
  175. msb->reg_addr.w_length = sizeof(struct ms_id_register);
  176. msb->reg_addr.r_offset = offsetof(struct ms_register, id);
  177. msb->reg_addr.r_length = sizeof(struct ms_id_register);
  178. msb->addr_valid = false;
  179. }
  180. /* Start a state machine */
  181. static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
  182. (struct memstick_dev *card, struct memstick_request **req))
  183. {
  184. struct memstick_dev *card = msb->card;
  185. WARN_ON(msb->state != -1);
  186. msb->int_polling = false;
  187. msb->state = 0;
  188. msb->exit_error = 0;
  189. memset(&card->current_mrq, 0, sizeof(card->current_mrq));
  190. card->next_request = state_func;
  191. memstick_new_req(card->host);
  192. wait_for_completion(&card->mrq_complete);
  193. WARN_ON(msb->state != -1);
  194. return msb->exit_error;
  195. }
  196. /* State machines call that to exit */
  197. static int msb_exit_state_machine(struct msb_data *msb, int error)
  198. {
  199. WARN_ON(msb->state == -1);
  200. msb->state = -1;
  201. msb->exit_error = error;
  202. msb->card->next_request = h_msb_default_bad;
  203. /* Invalidate reg window on errors */
  204. if (error)
  205. msb_invalidate_reg_window(msb);
  206. complete(&msb->card->mrq_complete);
  207. return -ENXIO;
  208. }
  209. /* read INT register */
  210. static int msb_read_int_reg(struct msb_data *msb, long timeout)
  211. {
  212. struct memstick_request *mrq = &msb->card->current_mrq;
  213. WARN_ON(msb->state == -1);
  214. if (!msb->int_polling) {
  215. msb->int_timeout = jiffies +
  216. msecs_to_jiffies(timeout == -1 ? 500 : timeout);
  217. msb->int_polling = true;
  218. } else if (time_after(jiffies, msb->int_timeout)) {
  219. mrq->data[0] = MEMSTICK_INT_CMDNAK;
  220. return 0;
  221. }
  222. if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
  223. mrq->need_card_int && !mrq->error) {
  224. mrq->data[0] = mrq->int_reg;
  225. mrq->need_card_int = false;
  226. return 0;
  227. } else {
  228. memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
  229. return 1;
  230. }
  231. }
  232. /* Read a register */
  233. static int msb_read_regs(struct msb_data *msb, int offset, int len)
  234. {
  235. struct memstick_request *req = &msb->card->current_mrq;
  236. if (msb->reg_addr.r_offset != offset ||
  237. msb->reg_addr.r_length != len || !msb->addr_valid) {
  238. msb->reg_addr.r_offset = offset;
  239. msb->reg_addr.r_length = len;
  240. msb->addr_valid = true;
  241. memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
  242. &msb->reg_addr, sizeof(msb->reg_addr));
  243. return 0;
  244. }
  245. memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
  246. return 1;
  247. }
  248. /* Write a card register */
  249. static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
  250. {
  251. struct memstick_request *req = &msb->card->current_mrq;
  252. if (msb->reg_addr.w_offset != offset ||
  253. msb->reg_addr.w_length != len || !msb->addr_valid) {
  254. msb->reg_addr.w_offset = offset;
  255. msb->reg_addr.w_length = len;
  256. msb->addr_valid = true;
  257. memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
  258. &msb->reg_addr, sizeof(msb->reg_addr));
  259. return 0;
  260. }
  261. memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
  262. return 1;
  263. }
  264. /* Handler for absence of IO */
  265. static int h_msb_default_bad(struct memstick_dev *card,
  266. struct memstick_request **mrq)
  267. {
  268. return -ENXIO;
  269. }
  270. /*
  271. * This function is a handler for reads of one page from device.
  272. * Writes output to msb->current_sg, takes sector address from msb->reg.param
  273. * Can also be used to read extra data only. Set params accordintly.
  274. */
  275. static int h_msb_read_page(struct memstick_dev *card,
  276. struct memstick_request **out_mrq)
  277. {
  278. struct msb_data *msb = memstick_get_drvdata(card);
  279. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  280. struct scatterlist sg[2];
  281. u8 command, intreg;
  282. if (mrq->error) {
  283. dbg("read_page, unknown error");
  284. return msb_exit_state_machine(msb, mrq->error);
  285. }
  286. again:
  287. switch (msb->state) {
  288. case MSB_RP_SEND_BLOCK_ADDRESS:
  289. /* msb_write_regs sometimes "fails" because it needs to update
  290. the reg window, and thus it returns request for that.
  291. Then we stay in this state and retry */
  292. if (!msb_write_regs(msb,
  293. offsetof(struct ms_register, param),
  294. sizeof(struct ms_param_register),
  295. (unsigned char *)&msb->regs.param))
  296. return 0;
  297. msb->state = MSB_RP_SEND_READ_COMMAND;
  298. return 0;
  299. case MSB_RP_SEND_READ_COMMAND:
  300. command = MS_CMD_BLOCK_READ;
  301. memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
  302. msb->state = MSB_RP_SEND_INT_REQ;
  303. return 0;
  304. case MSB_RP_SEND_INT_REQ:
  305. msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
  306. /* If dont actually need to send the int read request (only in
  307. serial mode), then just fall through */
  308. if (msb_read_int_reg(msb, -1))
  309. return 0;
  310. /* fallthrough */
  311. case MSB_RP_RECEIVE_INT_REQ_RESULT:
  312. intreg = mrq->data[0];
  313. msb->regs.status.interrupt = intreg;
  314. if (intreg & MEMSTICK_INT_CMDNAK)
  315. return msb_exit_state_machine(msb, -EIO);
  316. if (!(intreg & MEMSTICK_INT_CED)) {
  317. msb->state = MSB_RP_SEND_INT_REQ;
  318. goto again;
  319. }
  320. msb->int_polling = false;
  321. msb->state = (intreg & MEMSTICK_INT_ERR) ?
  322. MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
  323. goto again;
  324. case MSB_RP_SEND_READ_STATUS_REG:
  325. /* read the status register to understand source of the INT_ERR */
  326. if (!msb_read_regs(msb,
  327. offsetof(struct ms_register, status),
  328. sizeof(struct ms_status_register)))
  329. return 0;
  330. msb->state = MSB_RP_RECEIVE_STATUS_REG;
  331. return 0;
  332. case MSB_RP_RECEIVE_STATUS_REG:
  333. msb->regs.status = *(struct ms_status_register *)mrq->data;
  334. msb->state = MSB_RP_SEND_OOB_READ;
  335. /* fallthrough */
  336. case MSB_RP_SEND_OOB_READ:
  337. if (!msb_read_regs(msb,
  338. offsetof(struct ms_register, extra_data),
  339. sizeof(struct ms_extra_data_register)))
  340. return 0;
  341. msb->state = MSB_RP_RECEIVE_OOB_READ;
  342. return 0;
  343. case MSB_RP_RECEIVE_OOB_READ:
  344. msb->regs.extra_data =
  345. *(struct ms_extra_data_register *) mrq->data;
  346. msb->state = MSB_RP_SEND_READ_DATA;
  347. /* fallthrough */
  348. case MSB_RP_SEND_READ_DATA:
  349. /* Skip that state if we only read the oob */
  350. if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
  351. msb->state = MSB_RP_RECEIVE_READ_DATA;
  352. goto again;
  353. }
  354. sg_init_table(sg, ARRAY_SIZE(sg));
  355. msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
  356. msb->current_sg_offset,
  357. msb->page_size);
  358. memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
  359. msb->state = MSB_RP_RECEIVE_READ_DATA;
  360. return 0;
  361. case MSB_RP_RECEIVE_READ_DATA:
  362. if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
  363. msb->current_sg_offset += msb->page_size;
  364. return msb_exit_state_machine(msb, 0);
  365. }
  366. if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
  367. dbg("read_page: uncorrectable error");
  368. return msb_exit_state_machine(msb, -EBADMSG);
  369. }
  370. if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
  371. dbg("read_page: correctable error");
  372. msb->current_sg_offset += msb->page_size;
  373. return msb_exit_state_machine(msb, -EUCLEAN);
  374. } else {
  375. dbg("read_page: INT error, but no status error bits");
  376. return msb_exit_state_machine(msb, -EIO);
  377. }
  378. }
  379. BUG();
  380. }
  381. /*
  382. * Handler of writes of exactly one block.
  383. * Takes address from msb->regs.param.
  384. * Writes same extra data to blocks, also taken
  385. * from msb->regs.extra
  386. * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
  387. * device refuses to take the command or something else
  388. */
  389. static int h_msb_write_block(struct memstick_dev *card,
  390. struct memstick_request **out_mrq)
  391. {
  392. struct msb_data *msb = memstick_get_drvdata(card);
  393. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  394. struct scatterlist sg[2];
  395. u8 intreg, command;
  396. if (mrq->error)
  397. return msb_exit_state_machine(msb, mrq->error);
  398. again:
  399. switch (msb->state) {
  400. /* HACK: Jmicon handling of TPCs between 8 and
  401. * sizeof(memstick_request.data) is broken due to hardware
  402. * bug in PIO mode that is used for these TPCs
  403. * Therefore split the write
  404. */
  405. case MSB_WB_SEND_WRITE_PARAMS:
  406. if (!msb_write_regs(msb,
  407. offsetof(struct ms_register, param),
  408. sizeof(struct ms_param_register),
  409. &msb->regs.param))
  410. return 0;
  411. msb->state = MSB_WB_SEND_WRITE_OOB;
  412. return 0;
  413. case MSB_WB_SEND_WRITE_OOB:
  414. if (!msb_write_regs(msb,
  415. offsetof(struct ms_register, extra_data),
  416. sizeof(struct ms_extra_data_register),
  417. &msb->regs.extra_data))
  418. return 0;
  419. msb->state = MSB_WB_SEND_WRITE_COMMAND;
  420. return 0;
  421. case MSB_WB_SEND_WRITE_COMMAND:
  422. command = MS_CMD_BLOCK_WRITE;
  423. memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
  424. msb->state = MSB_WB_SEND_INT_REQ;
  425. return 0;
  426. case MSB_WB_SEND_INT_REQ:
  427. msb->state = MSB_WB_RECEIVE_INT_REQ;
  428. if (msb_read_int_reg(msb, -1))
  429. return 0;
  430. /* fallthrough */
  431. case MSB_WB_RECEIVE_INT_REQ:
  432. intreg = mrq->data[0];
  433. msb->regs.status.interrupt = intreg;
  434. /* errors mean out of here, and fast... */
  435. if (intreg & (MEMSTICK_INT_CMDNAK))
  436. return msb_exit_state_machine(msb, -EIO);
  437. if (intreg & MEMSTICK_INT_ERR)
  438. return msb_exit_state_machine(msb, -EBADMSG);
  439. /* for last page we need to poll CED */
  440. if (msb->current_page == msb->pages_in_block) {
  441. if (intreg & MEMSTICK_INT_CED)
  442. return msb_exit_state_machine(msb, 0);
  443. msb->state = MSB_WB_SEND_INT_REQ;
  444. goto again;
  445. }
  446. /* for non-last page we need BREQ before writing next chunk */
  447. if (!(intreg & MEMSTICK_INT_BREQ)) {
  448. msb->state = MSB_WB_SEND_INT_REQ;
  449. goto again;
  450. }
  451. msb->int_polling = false;
  452. msb->state = MSB_WB_SEND_WRITE_DATA;
  453. /* fallthrough */
  454. case MSB_WB_SEND_WRITE_DATA:
  455. sg_init_table(sg, ARRAY_SIZE(sg));
  456. if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
  457. msb->current_sg_offset,
  458. msb->page_size) < msb->page_size)
  459. return msb_exit_state_machine(msb, -EIO);
  460. memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
  461. mrq->need_card_int = 1;
  462. msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
  463. return 0;
  464. case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
  465. msb->current_page++;
  466. msb->current_sg_offset += msb->page_size;
  467. msb->state = MSB_WB_SEND_INT_REQ;
  468. goto again;
  469. default:
  470. BUG();
  471. }
  472. return 0;
  473. }
  474. /*
  475. * This function is used to send simple IO requests to device that consist
  476. * of register write + command
  477. */
  478. static int h_msb_send_command(struct memstick_dev *card,
  479. struct memstick_request **out_mrq)
  480. {
  481. struct msb_data *msb = memstick_get_drvdata(card);
  482. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  483. u8 intreg;
  484. if (mrq->error) {
  485. dbg("send_command: unknown error");
  486. return msb_exit_state_machine(msb, mrq->error);
  487. }
  488. again:
  489. switch (msb->state) {
  490. /* HACK: see h_msb_write_block */
  491. case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
  492. if (!msb_write_regs(msb,
  493. offsetof(struct ms_register, param),
  494. sizeof(struct ms_param_register),
  495. &msb->regs.param))
  496. return 0;
  497. msb->state = MSB_SC_SEND_WRITE_OOB;
  498. return 0;
  499. case MSB_SC_SEND_WRITE_OOB:
  500. if (!msb->command_need_oob) {
  501. msb->state = MSB_SC_SEND_COMMAND;
  502. goto again;
  503. }
  504. if (!msb_write_regs(msb,
  505. offsetof(struct ms_register, extra_data),
  506. sizeof(struct ms_extra_data_register),
  507. &msb->regs.extra_data))
  508. return 0;
  509. msb->state = MSB_SC_SEND_COMMAND;
  510. return 0;
  511. case MSB_SC_SEND_COMMAND:
  512. memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
  513. msb->state = MSB_SC_SEND_INT_REQ;
  514. return 0;
  515. case MSB_SC_SEND_INT_REQ:
  516. msb->state = MSB_SC_RECEIVE_INT_REQ;
  517. if (msb_read_int_reg(msb, -1))
  518. return 0;
  519. /* fallthrough */
  520. case MSB_SC_RECEIVE_INT_REQ:
  521. intreg = mrq->data[0];
  522. if (intreg & MEMSTICK_INT_CMDNAK)
  523. return msb_exit_state_machine(msb, -EIO);
  524. if (intreg & MEMSTICK_INT_ERR)
  525. return msb_exit_state_machine(msb, -EBADMSG);
  526. if (!(intreg & MEMSTICK_INT_CED)) {
  527. msb->state = MSB_SC_SEND_INT_REQ;
  528. goto again;
  529. }
  530. return msb_exit_state_machine(msb, 0);
  531. }
  532. BUG();
  533. }
  534. /* Small handler for card reset */
  535. static int h_msb_reset(struct memstick_dev *card,
  536. struct memstick_request **out_mrq)
  537. {
  538. u8 command = MS_CMD_RESET;
  539. struct msb_data *msb = memstick_get_drvdata(card);
  540. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  541. if (mrq->error)
  542. return msb_exit_state_machine(msb, mrq->error);
  543. switch (msb->state) {
  544. case MSB_RS_SEND:
  545. memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
  546. mrq->need_card_int = 0;
  547. msb->state = MSB_RS_CONFIRM;
  548. return 0;
  549. case MSB_RS_CONFIRM:
  550. return msb_exit_state_machine(msb, 0);
  551. }
  552. BUG();
  553. }
  554. /* This handler is used to do serial->parallel switch */
  555. static int h_msb_parallel_switch(struct memstick_dev *card,
  556. struct memstick_request **out_mrq)
  557. {
  558. struct msb_data *msb = memstick_get_drvdata(card);
  559. struct memstick_request *mrq = *out_mrq = &card->current_mrq;
  560. struct memstick_host *host = card->host;
  561. if (mrq->error) {
  562. dbg("parallel_switch: error");
  563. msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
  564. return msb_exit_state_machine(msb, mrq->error);
  565. }
  566. switch (msb->state) {
  567. case MSB_PS_SEND_SWITCH_COMMAND:
  568. /* Set the parallel interface on memstick side */
  569. msb->regs.param.system |= MEMSTICK_SYS_PAM;
  570. if (!msb_write_regs(msb,
  571. offsetof(struct ms_register, param),
  572. 1,
  573. (unsigned char *)&msb->regs.param))
  574. return 0;
  575. msb->state = MSB_PS_SWICH_HOST;
  576. return 0;
  577. case MSB_PS_SWICH_HOST:
  578. /* Set parallel interface on our side + send a dummy request
  579. to see if card responds */
  580. host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
  581. memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
  582. msb->state = MSB_PS_CONFIRM;
  583. return 0;
  584. case MSB_PS_CONFIRM:
  585. return msb_exit_state_machine(msb, 0);
  586. }
  587. BUG();
  588. }
  589. static int msb_switch_to_parallel(struct msb_data *msb);
  590. /* Reset the card, to guard against hw errors beeing treated as bad blocks */
  591. static int msb_reset(struct msb_data *msb, bool full)
  592. {
  593. bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
  594. struct memstick_dev *card = msb->card;
  595. struct memstick_host *host = card->host;
  596. int error;
  597. /* Reset the card */
  598. msb->regs.param.system = MEMSTICK_SYS_BAMD;
  599. if (full) {
  600. error = host->set_param(host,
  601. MEMSTICK_POWER, MEMSTICK_POWER_OFF);
  602. if (error)
  603. goto out_error;
  604. msb_invalidate_reg_window(msb);
  605. error = host->set_param(host,
  606. MEMSTICK_POWER, MEMSTICK_POWER_ON);
  607. if (error)
  608. goto out_error;
  609. error = host->set_param(host,
  610. MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
  611. if (error) {
  612. out_error:
  613. dbg("Failed to reset the host controller");
  614. msb->read_only = true;
  615. return -EFAULT;
  616. }
  617. }
  618. error = msb_run_state_machine(msb, h_msb_reset);
  619. if (error) {
  620. dbg("Failed to reset the card");
  621. msb->read_only = true;
  622. return -ENODEV;
  623. }
  624. /* Set parallel mode */
  625. if (was_parallel)
  626. msb_switch_to_parallel(msb);
  627. return 0;
  628. }
  629. /* Attempts to switch interface to parallel mode */
  630. static int msb_switch_to_parallel(struct msb_data *msb)
  631. {
  632. int error;
  633. error = msb_run_state_machine(msb, h_msb_parallel_switch);
  634. if (error) {
  635. pr_err("Switch to parallel failed");
  636. msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
  637. msb_reset(msb, true);
  638. return -EFAULT;
  639. }
  640. msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
  641. return 0;
  642. }
  643. /* Changes overwrite flag on a page */
  644. static int msb_set_overwrite_flag(struct msb_data *msb,
  645. u16 pba, u8 page, u8 flag)
  646. {
  647. if (msb->read_only)
  648. return -EROFS;
  649. msb->regs.param.block_address = cpu_to_be16(pba);
  650. msb->regs.param.page_address = page;
  651. msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
  652. msb->regs.extra_data.overwrite_flag = flag;
  653. msb->command_value = MS_CMD_BLOCK_WRITE;
  654. msb->command_need_oob = true;
  655. dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
  656. flag, pba, page);
  657. return msb_run_state_machine(msb, h_msb_send_command);
  658. }
  659. static int msb_mark_bad(struct msb_data *msb, int pba)
  660. {
  661. pr_notice("marking pba %d as bad", pba);
  662. msb_reset(msb, true);
  663. return msb_set_overwrite_flag(
  664. msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
  665. }
  666. static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
  667. {
  668. dbg("marking page %d of pba %d as bad", page, pba);
  669. msb_reset(msb, true);
  670. return msb_set_overwrite_flag(msb,
  671. pba, page, ~MEMSTICK_OVERWRITE_PGST0);
  672. }
  673. /* Erases one physical block */
  674. static int msb_erase_block(struct msb_data *msb, u16 pba)
  675. {
  676. int error, try;
  677. if (msb->read_only)
  678. return -EROFS;
  679. dbg_verbose("erasing pba %d", pba);
  680. for (try = 1; try < 3; try++) {
  681. msb->regs.param.block_address = cpu_to_be16(pba);
  682. msb->regs.param.page_address = 0;
  683. msb->regs.param.cp = MEMSTICK_CP_BLOCK;
  684. msb->command_value = MS_CMD_BLOCK_ERASE;
  685. msb->command_need_oob = false;
  686. error = msb_run_state_machine(msb, h_msb_send_command);
  687. if (!error || msb_reset(msb, true))
  688. break;
  689. }
  690. if (error) {
  691. pr_err("erase failed, marking pba %d as bad", pba);
  692. msb_mark_bad(msb, pba);
  693. }
  694. dbg_verbose("erase success, marking pba %d as unused", pba);
  695. msb_mark_block_unused(msb, pba);
  696. __set_bit(pba, msb->erased_blocks_bitmap);
  697. return error;
  698. }
  699. /* Reads one page from device */
  700. static int msb_read_page(struct msb_data *msb,
  701. u16 pba, u8 page, struct ms_extra_data_register *extra,
  702. struct scatterlist *sg, int offset)
  703. {
  704. int try, error;
  705. if (pba == MS_BLOCK_INVALID) {
  706. unsigned long flags;
  707. struct sg_mapping_iter miter;
  708. size_t len = msb->page_size;
  709. dbg_verbose("read unmapped sector. returning 0xFF");
  710. local_irq_save(flags);
  711. sg_miter_start(&miter, sg, sg_nents(sg),
  712. SG_MITER_ATOMIC | SG_MITER_TO_SG);
  713. while (sg_miter_next(&miter) && len > 0) {
  714. int chunklen;
  715. if (offset && offset >= miter.length) {
  716. offset -= miter.length;
  717. continue;
  718. }
  719. chunklen = min(miter.length - offset, len);
  720. memset(miter.addr + offset, 0xFF, chunklen);
  721. len -= chunklen;
  722. offset = 0;
  723. }
  724. sg_miter_stop(&miter);
  725. local_irq_restore(flags);
  726. if (offset)
  727. return -EFAULT;
  728. if (extra)
  729. memset(extra, 0xFF, sizeof(*extra));
  730. return 0;
  731. }
  732. if (pba >= msb->block_count) {
  733. pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
  734. return -EINVAL;
  735. }
  736. for (try = 1; try < 3; try++) {
  737. msb->regs.param.block_address = cpu_to_be16(pba);
  738. msb->regs.param.page_address = page;
  739. msb->regs.param.cp = MEMSTICK_CP_PAGE;
  740. msb->current_sg = sg;
  741. msb->current_sg_offset = offset;
  742. error = msb_run_state_machine(msb, h_msb_read_page);
  743. if (error == -EUCLEAN) {
  744. pr_notice("correctable error on pba %d, page %d",
  745. pba, page);
  746. error = 0;
  747. }
  748. if (!error && extra)
  749. *extra = msb->regs.extra_data;
  750. if (!error || msb_reset(msb, true))
  751. break;
  752. }
  753. /* Mark bad pages */
  754. if (error == -EBADMSG) {
  755. pr_err("uncorrectable error on read of pba %d, page %d",
  756. pba, page);
  757. if (msb->regs.extra_data.overwrite_flag &
  758. MEMSTICK_OVERWRITE_PGST0)
  759. msb_mark_page_bad(msb, pba, page);
  760. return -EBADMSG;
  761. }
  762. if (error)
  763. pr_err("read of pba %d, page %d failed with error %d",
  764. pba, page, error);
  765. return error;
  766. }
  767. /* Reads oob of page only */
  768. static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
  769. struct ms_extra_data_register *extra)
  770. {
  771. int error;
  772. BUG_ON(!extra);
  773. msb->regs.param.block_address = cpu_to_be16(pba);
  774. msb->regs.param.page_address = page;
  775. msb->regs.param.cp = MEMSTICK_CP_EXTRA;
  776. if (pba > msb->block_count) {
  777. pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
  778. return -EINVAL;
  779. }
  780. error = msb_run_state_machine(msb, h_msb_read_page);
  781. *extra = msb->regs.extra_data;
  782. if (error == -EUCLEAN) {
  783. pr_notice("correctable error on pba %d, page %d",
  784. pba, page);
  785. return 0;
  786. }
  787. return error;
  788. }
  789. /* Reads a block and compares it with data contained in scatterlist orig_sg */
  790. static int msb_verify_block(struct msb_data *msb, u16 pba,
  791. struct scatterlist *orig_sg, int offset)
  792. {
  793. struct scatterlist sg;
  794. int page = 0, error;
  795. sg_init_one(&sg, msb->block_buffer, msb->block_size);
  796. while (page < msb->pages_in_block) {
  797. error = msb_read_page(msb, pba, page,
  798. NULL, &sg, page * msb->page_size);
  799. if (error)
  800. return error;
  801. page++;
  802. }
  803. if (msb_sg_compare_to_buffer(orig_sg, offset,
  804. msb->block_buffer, msb->block_size))
  805. return -EIO;
  806. return 0;
  807. }
  808. /* Writes exectly one block + oob */
  809. static int msb_write_block(struct msb_data *msb,
  810. u16 pba, u32 lba, struct scatterlist *sg, int offset)
  811. {
  812. int error, current_try = 1;
  813. BUG_ON(sg->length < msb->page_size);
  814. if (msb->read_only)
  815. return -EROFS;
  816. if (pba == MS_BLOCK_INVALID) {
  817. pr_err(
  818. "BUG: write: attempt to write MS_BLOCK_INVALID block");
  819. return -EINVAL;
  820. }
  821. if (pba >= msb->block_count || lba >= msb->logical_block_count) {
  822. pr_err(
  823. "BUG: write: attempt to write beyond the end of device");
  824. return -EINVAL;
  825. }
  826. if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
  827. pr_err("BUG: write: lba zone mismatch");
  828. return -EINVAL;
  829. }
  830. if (pba == msb->boot_block_locations[0] ||
  831. pba == msb->boot_block_locations[1]) {
  832. pr_err("BUG: write: attempt to write to boot blocks!");
  833. return -EINVAL;
  834. }
  835. while (1) {
  836. if (msb->read_only)
  837. return -EROFS;
  838. msb->regs.param.cp = MEMSTICK_CP_BLOCK;
  839. msb->regs.param.page_address = 0;
  840. msb->regs.param.block_address = cpu_to_be16(pba);
  841. msb->regs.extra_data.management_flag = 0xFF;
  842. msb->regs.extra_data.overwrite_flag = 0xF8;
  843. msb->regs.extra_data.logical_address = cpu_to_be16(lba);
  844. msb->current_sg = sg;
  845. msb->current_sg_offset = offset;
  846. msb->current_page = 0;
  847. error = msb_run_state_machine(msb, h_msb_write_block);
  848. /* Sector we just wrote to is assumed erased since its pba
  849. was erased. If it wasn't erased, write will succeed
  850. and will just clear the bits that were set in the block
  851. thus test that what we have written,
  852. matches what we expect.
  853. We do trust the blocks that we erased */
  854. if (!error && (verify_writes ||
  855. !test_bit(pba, msb->erased_blocks_bitmap)))
  856. error = msb_verify_block(msb, pba, sg, offset);
  857. if (!error)
  858. break;
  859. if (current_try > 1 || msb_reset(msb, true))
  860. break;
  861. pr_err("write failed, trying to erase the pba %d", pba);
  862. error = msb_erase_block(msb, pba);
  863. if (error)
  864. break;
  865. current_try++;
  866. }
  867. return error;
  868. }
  869. /* Finds a free block for write replacement */
  870. static u16 msb_get_free_block(struct msb_data *msb, int zone)
  871. {
  872. u16 pos;
  873. int pba = zone * MS_BLOCKS_IN_ZONE;
  874. int i;
  875. get_random_bytes(&pos, sizeof(pos));
  876. if (!msb->free_block_count[zone]) {
  877. pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
  878. msb->read_only = true;
  879. return MS_BLOCK_INVALID;
  880. }
  881. pos %= msb->free_block_count[zone];
  882. dbg_verbose("have %d choices for a free block, selected randomally: %d",
  883. msb->free_block_count[zone], pos);
  884. pba = find_next_zero_bit(msb->used_blocks_bitmap,
  885. msb->block_count, pba);
  886. for (i = 0; i < pos; ++i)
  887. pba = find_next_zero_bit(msb->used_blocks_bitmap,
  888. msb->block_count, pba + 1);
  889. dbg_verbose("result of the free blocks scan: pba %d", pba);
  890. if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
  891. pr_err("BUG: cant get a free block");
  892. msb->read_only = true;
  893. return MS_BLOCK_INVALID;
  894. }
  895. msb_mark_block_used(msb, pba);
  896. return pba;
  897. }
  898. static int msb_update_block(struct msb_data *msb, u16 lba,
  899. struct scatterlist *sg, int offset)
  900. {
  901. u16 pba, new_pba;
  902. int error, try;
  903. pba = msb->lba_to_pba_table[lba];
  904. dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
  905. if (pba != MS_BLOCK_INVALID) {
  906. dbg_verbose("setting the update flag on the block");
  907. msb_set_overwrite_flag(msb, pba, 0,
  908. 0xFF & ~MEMSTICK_OVERWRITE_UDST);
  909. }
  910. for (try = 0; try < 3; try++) {
  911. new_pba = msb_get_free_block(msb,
  912. msb_get_zone_from_lba(lba));
  913. if (new_pba == MS_BLOCK_INVALID) {
  914. error = -EIO;
  915. goto out;
  916. }
  917. dbg_verbose("block update: writing updated block to the pba %d",
  918. new_pba);
  919. error = msb_write_block(msb, new_pba, lba, sg, offset);
  920. if (error == -EBADMSG) {
  921. msb_mark_bad(msb, new_pba);
  922. continue;
  923. }
  924. if (error)
  925. goto out;
  926. dbg_verbose("block update: erasing the old block");
  927. msb_erase_block(msb, pba);
  928. msb->lba_to_pba_table[lba] = new_pba;
  929. return 0;
  930. }
  931. out:
  932. if (error) {
  933. pr_err("block update error after %d tries, switching to r/o mode", try);
  934. msb->read_only = true;
  935. }
  936. return error;
  937. }
  938. /* Converts endiannes in the boot block for easy use */
  939. static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
  940. {
  941. p->header.block_id = be16_to_cpu(p->header.block_id);
  942. p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
  943. p->entry.disabled_block.start_addr
  944. = be32_to_cpu(p->entry.disabled_block.start_addr);
  945. p->entry.disabled_block.data_size
  946. = be32_to_cpu(p->entry.disabled_block.data_size);
  947. p->entry.cis_idi.start_addr
  948. = be32_to_cpu(p->entry.cis_idi.start_addr);
  949. p->entry.cis_idi.data_size
  950. = be32_to_cpu(p->entry.cis_idi.data_size);
  951. p->attr.block_size = be16_to_cpu(p->attr.block_size);
  952. p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
  953. p->attr.number_of_effective_blocks
  954. = be16_to_cpu(p->attr.number_of_effective_blocks);
  955. p->attr.page_size = be16_to_cpu(p->attr.page_size);
  956. p->attr.memory_manufacturer_code
  957. = be16_to_cpu(p->attr.memory_manufacturer_code);
  958. p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
  959. p->attr.implemented_capacity
  960. = be16_to_cpu(p->attr.implemented_capacity);
  961. p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
  962. p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
  963. }
  964. static int msb_read_boot_blocks(struct msb_data *msb)
  965. {
  966. int pba = 0;
  967. struct scatterlist sg;
  968. struct ms_extra_data_register extra;
  969. struct ms_boot_page *page;
  970. msb->boot_block_locations[0] = MS_BLOCK_INVALID;
  971. msb->boot_block_locations[1] = MS_BLOCK_INVALID;
  972. msb->boot_block_count = 0;
  973. dbg_verbose("Start of a scan for the boot blocks");
  974. if (!msb->boot_page) {
  975. page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
  976. if (!page)
  977. return -ENOMEM;
  978. msb->boot_page = page;
  979. } else
  980. page = msb->boot_page;
  981. msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
  982. for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
  983. sg_init_one(&sg, page, sizeof(*page));
  984. if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
  985. dbg("boot scan: can't read pba %d", pba);
  986. continue;
  987. }
  988. if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
  989. dbg("management flag doesn't indicate boot block %d",
  990. pba);
  991. continue;
  992. }
  993. if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
  994. dbg("the pba at %d doesn' contain boot block ID", pba);
  995. continue;
  996. }
  997. msb_fix_boot_page_endianness(page);
  998. msb->boot_block_locations[msb->boot_block_count] = pba;
  999. page++;
  1000. msb->boot_block_count++;
  1001. if (msb->boot_block_count == 2)
  1002. break;
  1003. }
  1004. if (!msb->boot_block_count) {
  1005. pr_err("media doesn't contain master page, aborting");
  1006. return -EIO;
  1007. }
  1008. dbg_verbose("End of scan for boot blocks");
  1009. return 0;
  1010. }
  1011. static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
  1012. {
  1013. struct ms_boot_page *boot_block;
  1014. struct scatterlist sg;
  1015. u16 *buffer = NULL;
  1016. int offset = 0;
  1017. int i, error = 0;
  1018. int data_size, data_offset, page, page_offset, size_to_read;
  1019. u16 pba;
  1020. BUG_ON(block_nr > 1);
  1021. boot_block = &msb->boot_page[block_nr];
  1022. pba = msb->boot_block_locations[block_nr];
  1023. if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
  1024. return -EINVAL;
  1025. data_size = boot_block->entry.disabled_block.data_size;
  1026. data_offset = sizeof(struct ms_boot_page) +
  1027. boot_block->entry.disabled_block.start_addr;
  1028. if (!data_size)
  1029. return 0;
  1030. page = data_offset / msb->page_size;
  1031. page_offset = data_offset % msb->page_size;
  1032. size_to_read =
  1033. DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
  1034. msb->page_size;
  1035. dbg("reading bad block of boot block at pba %d, offset %d len %d",
  1036. pba, data_offset, data_size);
  1037. buffer = kzalloc(size_to_read, GFP_KERNEL);
  1038. if (!buffer)
  1039. return -ENOMEM;
  1040. /* Read the buffer */
  1041. sg_init_one(&sg, buffer, size_to_read);
  1042. while (offset < size_to_read) {
  1043. error = msb_read_page(msb, pba, page, NULL, &sg, offset);
  1044. if (error)
  1045. goto out;
  1046. page++;
  1047. offset += msb->page_size;
  1048. if (page == msb->pages_in_block) {
  1049. pr_err(
  1050. "bad block table extends beyond the boot block");
  1051. break;
  1052. }
  1053. }
  1054. /* Process the bad block table */
  1055. for (i = page_offset; i < data_size / sizeof(u16); i++) {
  1056. u16 bad_block = be16_to_cpu(buffer[i]);
  1057. if (bad_block >= msb->block_count) {
  1058. dbg("bad block table contains invalid block %d",
  1059. bad_block);
  1060. continue;
  1061. }
  1062. if (test_bit(bad_block, msb->used_blocks_bitmap)) {
  1063. dbg("duplicate bad block %d in the table",
  1064. bad_block);
  1065. continue;
  1066. }
  1067. dbg("block %d is marked as factory bad", bad_block);
  1068. msb_mark_block_used(msb, bad_block);
  1069. }
  1070. out:
  1071. kfree(buffer);
  1072. return error;
  1073. }
  1074. static int msb_ftl_initialize(struct msb_data *msb)
  1075. {
  1076. int i;
  1077. if (msb->ftl_initialized)
  1078. return 0;
  1079. msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
  1080. msb->logical_block_count = msb->zone_count * 496 - 2;
  1081. msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
  1082. msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
  1083. msb->lba_to_pba_table =
  1084. kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
  1085. if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
  1086. !msb->erased_blocks_bitmap) {
  1087. kfree(msb->used_blocks_bitmap);
  1088. kfree(msb->lba_to_pba_table);
  1089. kfree(msb->erased_blocks_bitmap);
  1090. return -ENOMEM;
  1091. }
  1092. for (i = 0; i < msb->zone_count; i++)
  1093. msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
  1094. memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
  1095. msb->logical_block_count * sizeof(u16));
  1096. dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
  1097. msb->zone_count, msb->logical_block_count);
  1098. msb->ftl_initialized = true;
  1099. return 0;
  1100. }
  1101. static int msb_ftl_scan(struct msb_data *msb)
  1102. {
  1103. u16 pba, lba, other_block;
  1104. u8 overwrite_flag, management_flag, other_overwrite_flag;
  1105. int error;
  1106. struct ms_extra_data_register extra;
  1107. u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
  1108. if (!overwrite_flags)
  1109. return -ENOMEM;
  1110. dbg("Start of media scanning");
  1111. for (pba = 0; pba < msb->block_count; pba++) {
  1112. if (pba == msb->boot_block_locations[0] ||
  1113. pba == msb->boot_block_locations[1]) {
  1114. dbg_verbose("pba %05d -> [boot block]", pba);
  1115. msb_mark_block_used(msb, pba);
  1116. continue;
  1117. }
  1118. if (test_bit(pba, msb->used_blocks_bitmap)) {
  1119. dbg_verbose("pba %05d -> [factory bad]", pba);
  1120. continue;
  1121. }
  1122. memset(&extra, 0, sizeof(extra));
  1123. error = msb_read_oob(msb, pba, 0, &extra);
  1124. /* can't trust the page if we can't read the oob */
  1125. if (error == -EBADMSG) {
  1126. pr_notice(
  1127. "oob of pba %d damaged, will try to erase it", pba);
  1128. msb_mark_block_used(msb, pba);
  1129. msb_erase_block(msb, pba);
  1130. continue;
  1131. } else if (error) {
  1132. pr_err("unknown error %d on read of oob of pba %d - aborting",
  1133. error, pba);
  1134. kfree(overwrite_flags);
  1135. return error;
  1136. }
  1137. lba = be16_to_cpu(extra.logical_address);
  1138. management_flag = extra.management_flag;
  1139. overwrite_flag = extra.overwrite_flag;
  1140. overwrite_flags[pba] = overwrite_flag;
  1141. /* Skip bad blocks */
  1142. if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
  1143. dbg("pba %05d -> [BAD]", pba);
  1144. msb_mark_block_used(msb, pba);
  1145. continue;
  1146. }
  1147. /* Skip system/drm blocks */
  1148. if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
  1149. MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
  1150. dbg("pba %05d -> [reserved management flag %02x]",
  1151. pba, management_flag);
  1152. msb_mark_block_used(msb, pba);
  1153. continue;
  1154. }
  1155. /* Erase temporary tables */
  1156. if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
  1157. dbg("pba %05d -> [temp table] - will erase", pba);
  1158. msb_mark_block_used(msb, pba);
  1159. msb_erase_block(msb, pba);
  1160. continue;
  1161. }
  1162. if (lba == MS_BLOCK_INVALID) {
  1163. dbg_verbose("pba %05d -> [free]", pba);
  1164. continue;
  1165. }
  1166. msb_mark_block_used(msb, pba);
  1167. /* Block has LBA not according to zoning*/
  1168. if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
  1169. pr_notice("pba %05d -> [bad lba %05d] - will erase",
  1170. pba, lba);
  1171. msb_erase_block(msb, pba);
  1172. continue;
  1173. }
  1174. /* No collisions - great */
  1175. if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
  1176. dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
  1177. msb->lba_to_pba_table[lba] = pba;
  1178. continue;
  1179. }
  1180. other_block = msb->lba_to_pba_table[lba];
  1181. other_overwrite_flag = overwrite_flags[other_block];
  1182. pr_notice("Collision between pba %d and pba %d",
  1183. pba, other_block);
  1184. if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
  1185. pr_notice("pba %d is marked as stable, use it", pba);
  1186. msb_erase_block(msb, other_block);
  1187. msb->lba_to_pba_table[lba] = pba;
  1188. continue;
  1189. }
  1190. if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
  1191. pr_notice("pba %d is marked as stable, use it",
  1192. other_block);
  1193. msb_erase_block(msb, pba);
  1194. continue;
  1195. }
  1196. pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
  1197. pba, other_block, other_block);
  1198. msb_erase_block(msb, other_block);
  1199. msb->lba_to_pba_table[lba] = pba;
  1200. }
  1201. dbg("End of media scanning");
  1202. kfree(overwrite_flags);
  1203. return 0;
  1204. }
  1205. static void msb_cache_flush_timer(unsigned long data)
  1206. {
  1207. struct msb_data *msb = (struct msb_data *)data;
  1208. msb->need_flush_cache = true;
  1209. queue_work(msb->io_queue, &msb->io_work);
  1210. }
  1211. static void msb_cache_discard(struct msb_data *msb)
  1212. {
  1213. if (msb->cache_block_lba == MS_BLOCK_INVALID)
  1214. return;
  1215. del_timer_sync(&msb->cache_flush_timer);
  1216. dbg_verbose("Discarding the write cache");
  1217. msb->cache_block_lba = MS_BLOCK_INVALID;
  1218. bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
  1219. }
  1220. static int msb_cache_init(struct msb_data *msb)
  1221. {
  1222. setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
  1223. (unsigned long)msb);
  1224. if (!msb->cache)
  1225. msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
  1226. if (!msb->cache)
  1227. return -ENOMEM;
  1228. msb_cache_discard(msb);
  1229. return 0;
  1230. }
  1231. static int msb_cache_flush(struct msb_data *msb)
  1232. {
  1233. struct scatterlist sg;
  1234. struct ms_extra_data_register extra;
  1235. int page, offset, error;
  1236. u16 pba, lba;
  1237. if (msb->read_only)
  1238. return -EROFS;
  1239. if (msb->cache_block_lba == MS_BLOCK_INVALID)
  1240. return 0;
  1241. lba = msb->cache_block_lba;
  1242. pba = msb->lba_to_pba_table[lba];
  1243. dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
  1244. pba, msb->cache_block_lba);
  1245. sg_init_one(&sg, msb->cache , msb->block_size);
  1246. /* Read all missing pages in cache */
  1247. for (page = 0; page < msb->pages_in_block; page++) {
  1248. if (test_bit(page, &msb->valid_cache_bitmap))
  1249. continue;
  1250. offset = page * msb->page_size;
  1251. dbg_verbose("reading non-present sector %d of cache block %d",
  1252. page, lba);
  1253. error = msb_read_page(msb, pba, page, &extra, &sg, offset);
  1254. /* Bad pages are copied with 00 page status */
  1255. if (error == -EBADMSG) {
  1256. pr_err("read error on sector %d, contents probably damaged", page);
  1257. continue;
  1258. }
  1259. if (error)
  1260. return error;
  1261. if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
  1262. MEMSTICK_OV_PG_NORMAL) {
  1263. dbg("page %d is marked as bad", page);
  1264. continue;
  1265. }
  1266. set_bit(page, &msb->valid_cache_bitmap);
  1267. }
  1268. /* Write the cache now */
  1269. error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
  1270. pba = msb->lba_to_pba_table[msb->cache_block_lba];
  1271. /* Mark invalid pages */
  1272. if (!error) {
  1273. for (page = 0; page < msb->pages_in_block; page++) {
  1274. if (test_bit(page, &msb->valid_cache_bitmap))
  1275. continue;
  1276. dbg("marking page %d as containing damaged data",
  1277. page);
  1278. msb_set_overwrite_flag(msb,
  1279. pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
  1280. }
  1281. }
  1282. msb_cache_discard(msb);
  1283. return error;
  1284. }
  1285. static int msb_cache_write(struct msb_data *msb, int lba,
  1286. int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
  1287. {
  1288. int error;
  1289. struct scatterlist sg_tmp[10];
  1290. if (msb->read_only)
  1291. return -EROFS;
  1292. if (msb->cache_block_lba == MS_BLOCK_INVALID ||
  1293. lba != msb->cache_block_lba)
  1294. if (add_to_cache_only)
  1295. return 0;
  1296. /* If we need to write different block */
  1297. if (msb->cache_block_lba != MS_BLOCK_INVALID &&
  1298. lba != msb->cache_block_lba) {
  1299. dbg_verbose("first flush the cache");
  1300. error = msb_cache_flush(msb);
  1301. if (error)
  1302. return error;
  1303. }
  1304. if (msb->cache_block_lba == MS_BLOCK_INVALID) {
  1305. msb->cache_block_lba = lba;
  1306. mod_timer(&msb->cache_flush_timer,
  1307. jiffies + msecs_to_jiffies(cache_flush_timeout));
  1308. }
  1309. dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
  1310. sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
  1311. msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
  1312. sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
  1313. msb->cache + page * msb->page_size, msb->page_size);
  1314. set_bit(page, &msb->valid_cache_bitmap);
  1315. return 0;
  1316. }
  1317. static int msb_cache_read(struct msb_data *msb, int lba,
  1318. int page, struct scatterlist *sg, int offset)
  1319. {
  1320. int pba = msb->lba_to_pba_table[lba];
  1321. struct scatterlist sg_tmp[10];
  1322. int error = 0;
  1323. if (lba == msb->cache_block_lba &&
  1324. test_bit(page, &msb->valid_cache_bitmap)) {
  1325. dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
  1326. lba, pba, page);
  1327. sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
  1328. msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
  1329. offset, msb->page_size);
  1330. sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
  1331. msb->cache + msb->page_size * page,
  1332. msb->page_size);
  1333. } else {
  1334. dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
  1335. lba, pba, page);
  1336. error = msb_read_page(msb, pba, page, NULL, sg, offset);
  1337. if (error)
  1338. return error;
  1339. msb_cache_write(msb, lba, page, true, sg, offset);
  1340. }
  1341. return error;
  1342. }
  1343. /* Emulated geometry table
  1344. * This table content isn't that importaint,
  1345. * One could put here different values, providing that they still
  1346. * cover whole disk.
  1347. * 64 MB entry is what windows reports for my 64M memstick */
  1348. static const struct chs_entry chs_table[] = {
  1349. /* size sectors cylynders heads */
  1350. { 4, 16, 247, 2 },
  1351. { 8, 16, 495, 2 },
  1352. { 16, 16, 495, 4 },
  1353. { 32, 16, 991, 4 },
  1354. { 64, 16, 991, 8 },
  1355. {128, 16, 991, 16 },
  1356. { 0 }
  1357. };
  1358. /* Load information about the card */
  1359. static int msb_init_card(struct memstick_dev *card)
  1360. {
  1361. struct msb_data *msb = memstick_get_drvdata(card);
  1362. struct memstick_host *host = card->host;
  1363. struct ms_boot_page *boot_block;
  1364. int error = 0, i, raw_size_in_megs;
  1365. msb->caps = 0;
  1366. if (card->id.class >= MEMSTICK_CLASS_ROM &&
  1367. card->id.class <= MEMSTICK_CLASS_ROM)
  1368. msb->read_only = true;
  1369. msb->state = -1;
  1370. error = msb_reset(msb, false);
  1371. if (error)
  1372. return error;
  1373. /* Due to a bug in Jmicron driver written by Alex Dubov,
  1374. its serial mode barely works,
  1375. so we switch to parallel mode right away */
  1376. if (host->caps & MEMSTICK_CAP_PAR4)
  1377. msb_switch_to_parallel(msb);
  1378. msb->page_size = sizeof(struct ms_boot_page);
  1379. /* Read the boot page */
  1380. error = msb_read_boot_blocks(msb);
  1381. if (error)
  1382. return -EIO;
  1383. boot_block = &msb->boot_page[0];
  1384. /* Save intersting attributes from boot page */
  1385. msb->block_count = boot_block->attr.number_of_blocks;
  1386. msb->page_size = boot_block->attr.page_size;
  1387. msb->pages_in_block = boot_block->attr.block_size * 2;
  1388. msb->block_size = msb->page_size * msb->pages_in_block;
  1389. if (msb->page_size > PAGE_SIZE) {
  1390. /* this isn't supported by linux at all, anyway*/
  1391. dbg("device page %d size isn't supported", msb->page_size);
  1392. return -EINVAL;
  1393. }
  1394. msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
  1395. if (!msb->block_buffer)
  1396. return -ENOMEM;
  1397. raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
  1398. for (i = 0; chs_table[i].size; i++) {
  1399. if (chs_table[i].size != raw_size_in_megs)
  1400. continue;
  1401. msb->geometry.cylinders = chs_table[i].cyl;
  1402. msb->geometry.heads = chs_table[i].head;
  1403. msb->geometry.sectors = chs_table[i].sec;
  1404. break;
  1405. }
  1406. if (boot_block->attr.transfer_supporting == 1)
  1407. msb->caps |= MEMSTICK_CAP_PAR4;
  1408. if (boot_block->attr.device_type & 0x03)
  1409. msb->read_only = true;
  1410. dbg("Total block count = %d", msb->block_count);
  1411. dbg("Each block consists of %d pages", msb->pages_in_block);
  1412. dbg("Page size = %d bytes", msb->page_size);
  1413. dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
  1414. dbg("Read only: %d", msb->read_only);
  1415. #if 0
  1416. /* Now we can switch the interface */
  1417. if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
  1418. msb_switch_to_parallel(msb);
  1419. #endif
  1420. error = msb_cache_init(msb);
  1421. if (error)
  1422. return error;
  1423. error = msb_ftl_initialize(msb);
  1424. if (error)
  1425. return error;
  1426. /* Read the bad block table */
  1427. error = msb_read_bad_block_table(msb, 0);
  1428. if (error && error != -ENOMEM) {
  1429. dbg("failed to read bad block table from primary boot block, trying from backup");
  1430. error = msb_read_bad_block_table(msb, 1);
  1431. }
  1432. if (error)
  1433. return error;
  1434. /* *drum roll* Scan the media */
  1435. error = msb_ftl_scan(msb);
  1436. if (error) {
  1437. pr_err("Scan of media failed");
  1438. return error;
  1439. }
  1440. return 0;
  1441. }
  1442. static int msb_do_write_request(struct msb_data *msb, int lba,
  1443. int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
  1444. {
  1445. int error = 0;
  1446. off_t offset = 0;
  1447. *sucessfuly_written = 0;
  1448. while (offset < len) {
  1449. if (page == 0 && len - offset >= msb->block_size) {
  1450. if (msb->cache_block_lba == lba)
  1451. msb_cache_discard(msb);
  1452. dbg_verbose("Writing whole lba %d", lba);
  1453. error = msb_update_block(msb, lba, sg, offset);
  1454. if (error)
  1455. return error;
  1456. offset += msb->block_size;
  1457. *sucessfuly_written += msb->block_size;
  1458. lba++;
  1459. continue;
  1460. }
  1461. error = msb_cache_write(msb, lba, page, false, sg, offset);
  1462. if (error)
  1463. return error;
  1464. offset += msb->page_size;
  1465. *sucessfuly_written += msb->page_size;
  1466. page++;
  1467. if (page == msb->pages_in_block) {
  1468. page = 0;
  1469. lba++;
  1470. }
  1471. }
  1472. return 0;
  1473. }
  1474. static int msb_do_read_request(struct msb_data *msb, int lba,
  1475. int page, struct scatterlist *sg, int len, int *sucessfuly_read)
  1476. {
  1477. int error = 0;
  1478. int offset = 0;
  1479. *sucessfuly_read = 0;
  1480. while (offset < len) {
  1481. error = msb_cache_read(msb, lba, page, sg, offset);
  1482. if (error)
  1483. return error;
  1484. offset += msb->page_size;
  1485. *sucessfuly_read += msb->page_size;
  1486. page++;
  1487. if (page == msb->pages_in_block) {
  1488. page = 0;
  1489. lba++;
  1490. }
  1491. }
  1492. return 0;
  1493. }
  1494. static void msb_io_work(struct work_struct *work)
  1495. {
  1496. struct msb_data *msb = container_of(work, struct msb_data, io_work);
  1497. int page, error, len;
  1498. sector_t lba;
  1499. unsigned long flags;
  1500. struct scatterlist *sg = msb->prealloc_sg;
  1501. dbg_verbose("IO: work started");
  1502. while (1) {
  1503. spin_lock_irqsave(&msb->q_lock, flags);
  1504. if (msb->need_flush_cache) {
  1505. msb->need_flush_cache = false;
  1506. spin_unlock_irqrestore(&msb->q_lock, flags);
  1507. msb_cache_flush(msb);
  1508. continue;
  1509. }
  1510. if (!msb->req) {
  1511. msb->req = blk_fetch_request(msb->queue);
  1512. if (!msb->req) {
  1513. dbg_verbose("IO: no more requests exiting");
  1514. spin_unlock_irqrestore(&msb->q_lock, flags);
  1515. return;
  1516. }
  1517. }
  1518. spin_unlock_irqrestore(&msb->q_lock, flags);
  1519. /* If card was removed meanwhile */
  1520. if (!msb->req)
  1521. return;
  1522. /* process the request */
  1523. dbg_verbose("IO: processing new request");
  1524. blk_rq_map_sg(msb->queue, msb->req, sg);
  1525. lba = blk_rq_pos(msb->req);
  1526. sector_div(lba, msb->page_size / 512);
  1527. page = sector_div(lba, msb->pages_in_block);
  1528. if (rq_data_dir(msb->req) == READ)
  1529. error = msb_do_read_request(msb, lba, page, sg,
  1530. blk_rq_bytes(msb->req), &len);
  1531. else
  1532. error = msb_do_write_request(msb, lba, page, sg,
  1533. blk_rq_bytes(msb->req), &len);
  1534. spin_lock_irqsave(&msb->q_lock, flags);
  1535. if (len)
  1536. if (!__blk_end_request(msb->req, 0, len))
  1537. msb->req = NULL;
  1538. if (error && msb->req) {
  1539. dbg_verbose("IO: ending one sector of the request with error");
  1540. if (!__blk_end_request(msb->req, error, msb->page_size))
  1541. msb->req = NULL;
  1542. }
  1543. if (msb->req)
  1544. dbg_verbose("IO: request still pending");
  1545. spin_unlock_irqrestore(&msb->q_lock, flags);
  1546. }
  1547. }
  1548. static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
  1549. static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
  1550. static int msb_bd_open(struct block_device *bdev, fmode_t mode)
  1551. {
  1552. struct gendisk *disk = bdev->bd_disk;
  1553. struct msb_data *msb = disk->private_data;
  1554. dbg_verbose("block device open");
  1555. mutex_lock(&msb_disk_lock);
  1556. if (msb && msb->card)
  1557. msb->usage_count++;
  1558. mutex_unlock(&msb_disk_lock);
  1559. return 0;
  1560. }
  1561. static void msb_data_clear(struct msb_data *msb)
  1562. {
  1563. kfree(msb->boot_page);
  1564. kfree(msb->used_blocks_bitmap);
  1565. kfree(msb->lba_to_pba_table);
  1566. kfree(msb->cache);
  1567. msb->card = NULL;
  1568. }
  1569. static int msb_disk_release(struct gendisk *disk)
  1570. {
  1571. struct msb_data *msb = disk->private_data;
  1572. dbg_verbose("block device release");
  1573. mutex_lock(&msb_disk_lock);
  1574. if (msb) {
  1575. if (msb->usage_count)
  1576. msb->usage_count--;
  1577. if (!msb->usage_count) {
  1578. disk->private_data = NULL;
  1579. idr_remove(&msb_disk_idr, msb->disk_id);
  1580. put_disk(disk);
  1581. kfree(msb);
  1582. }
  1583. }
  1584. mutex_unlock(&msb_disk_lock);
  1585. return 0;
  1586. }
  1587. static void msb_bd_release(struct gendisk *disk, fmode_t mode)
  1588. {
  1589. msb_disk_release(disk);
  1590. }
  1591. static int msb_bd_getgeo(struct block_device *bdev,
  1592. struct hd_geometry *geo)
  1593. {
  1594. struct msb_data *msb = bdev->bd_disk->private_data;
  1595. *geo = msb->geometry;
  1596. return 0;
  1597. }
  1598. static void msb_submit_req(struct request_queue *q)
  1599. {
  1600. struct memstick_dev *card = q->queuedata;
  1601. struct msb_data *msb = memstick_get_drvdata(card);
  1602. struct request *req = NULL;
  1603. dbg_verbose("Submit request");
  1604. if (msb->card_dead) {
  1605. dbg("Refusing requests on removed card");
  1606. WARN_ON(!msb->io_queue_stopped);
  1607. while ((req = blk_fetch_request(q)) != NULL)
  1608. __blk_end_request_all(req, -ENODEV);
  1609. return;
  1610. }
  1611. if (msb->req)
  1612. return;
  1613. if (!msb->io_queue_stopped)
  1614. queue_work(msb->io_queue, &msb->io_work);
  1615. }
  1616. static int msb_check_card(struct memstick_dev *card)
  1617. {
  1618. struct msb_data *msb = memstick_get_drvdata(card);
  1619. return (msb->card_dead == 0);
  1620. }
  1621. static void msb_stop(struct memstick_dev *card)
  1622. {
  1623. struct msb_data *msb = memstick_get_drvdata(card);
  1624. unsigned long flags;
  1625. dbg("Stopping all msblock IO");
  1626. spin_lock_irqsave(&msb->q_lock, flags);
  1627. blk_stop_queue(msb->queue);
  1628. msb->io_queue_stopped = true;
  1629. spin_unlock_irqrestore(&msb->q_lock, flags);
  1630. del_timer_sync(&msb->cache_flush_timer);
  1631. flush_workqueue(msb->io_queue);
  1632. if (msb->req) {
  1633. spin_lock_irqsave(&msb->q_lock, flags);
  1634. blk_requeue_request(msb->queue, msb->req);
  1635. msb->req = NULL;
  1636. spin_unlock_irqrestore(&msb->q_lock, flags);
  1637. }
  1638. }
  1639. static void msb_start(struct memstick_dev *card)
  1640. {
  1641. struct msb_data *msb = memstick_get_drvdata(card);
  1642. unsigned long flags;
  1643. dbg("Resuming IO from msblock");
  1644. msb_invalidate_reg_window(msb);
  1645. spin_lock_irqsave(&msb->q_lock, flags);
  1646. if (!msb->io_queue_stopped || msb->card_dead) {
  1647. spin_unlock_irqrestore(&msb->q_lock, flags);
  1648. return;
  1649. }
  1650. spin_unlock_irqrestore(&msb->q_lock, flags);
  1651. /* Kick cache flush anyway, its harmless */
  1652. msb->need_flush_cache = true;
  1653. msb->io_queue_stopped = false;
  1654. spin_lock_irqsave(&msb->q_lock, flags);
  1655. blk_start_queue(msb->queue);
  1656. spin_unlock_irqrestore(&msb->q_lock, flags);
  1657. queue_work(msb->io_queue, &msb->io_work);
  1658. }
  1659. static const struct block_device_operations msb_bdops = {
  1660. .open = msb_bd_open,
  1661. .release = msb_bd_release,
  1662. .getgeo = msb_bd_getgeo,
  1663. .owner = THIS_MODULE
  1664. };
  1665. /* Registers the block device */
  1666. static int msb_init_disk(struct memstick_dev *card)
  1667. {
  1668. struct msb_data *msb = memstick_get_drvdata(card);
  1669. struct memstick_host *host = card->host;
  1670. int rc;
  1671. u64 limit = BLK_BOUNCE_HIGH;
  1672. unsigned long capacity;
  1673. if (host->dev.dma_mask && *(host->dev.dma_mask))
  1674. limit = *(host->dev.dma_mask);
  1675. mutex_lock(&msb_disk_lock);
  1676. msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
  1677. mutex_unlock(&msb_disk_lock);
  1678. if (msb->disk_id < 0)
  1679. return msb->disk_id;
  1680. msb->disk = alloc_disk(0);
  1681. if (!msb->disk) {
  1682. rc = -ENOMEM;
  1683. goto out_release_id;
  1684. }
  1685. msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
  1686. if (!msb->queue) {
  1687. rc = -ENOMEM;
  1688. goto out_put_disk;
  1689. }
  1690. msb->queue->queuedata = card;
  1691. blk_queue_bounce_limit(msb->queue, limit);
  1692. blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
  1693. blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
  1694. blk_queue_max_segment_size(msb->queue,
  1695. MS_BLOCK_MAX_PAGES * msb->page_size);
  1696. blk_queue_logical_block_size(msb->queue, msb->page_size);
  1697. sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
  1698. msb->disk->fops = &msb_bdops;
  1699. msb->disk->private_data = msb;
  1700. msb->disk->queue = msb->queue;
  1701. msb->disk->flags |= GENHD_FL_EXT_DEVT;
  1702. capacity = msb->pages_in_block * msb->logical_block_count;
  1703. capacity *= (msb->page_size / 512);
  1704. set_capacity(msb->disk, capacity);
  1705. dbg("Set total disk size to %lu sectors", capacity);
  1706. msb->usage_count = 1;
  1707. msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
  1708. INIT_WORK(&msb->io_work, msb_io_work);
  1709. sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
  1710. if (msb->read_only)
  1711. set_disk_ro(msb->disk, 1);
  1712. msb_start(card);
  1713. device_add_disk(&card->dev, msb->disk);
  1714. dbg("Disk added");
  1715. return 0;
  1716. out_put_disk:
  1717. put_disk(msb->disk);
  1718. out_release_id:
  1719. mutex_lock(&msb_disk_lock);
  1720. idr_remove(&msb_disk_idr, msb->disk_id);
  1721. mutex_unlock(&msb_disk_lock);
  1722. return rc;
  1723. }
  1724. static int msb_probe(struct memstick_dev *card)
  1725. {
  1726. struct msb_data *msb;
  1727. int rc = 0;
  1728. msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
  1729. if (!msb)
  1730. return -ENOMEM;
  1731. memstick_set_drvdata(card, msb);
  1732. msb->card = card;
  1733. spin_lock_init(&msb->q_lock);
  1734. rc = msb_init_card(card);
  1735. if (rc)
  1736. goto out_free;
  1737. rc = msb_init_disk(card);
  1738. if (!rc) {
  1739. card->check = msb_check_card;
  1740. card->stop = msb_stop;
  1741. card->start = msb_start;
  1742. return 0;
  1743. }
  1744. out_free:
  1745. memstick_set_drvdata(card, NULL);
  1746. msb_data_clear(msb);
  1747. kfree(msb);
  1748. return rc;
  1749. }
  1750. static void msb_remove(struct memstick_dev *card)
  1751. {
  1752. struct msb_data *msb = memstick_get_drvdata(card);
  1753. unsigned long flags;
  1754. if (!msb->io_queue_stopped)
  1755. msb_stop(card);
  1756. dbg("Removing the disk device");
  1757. /* Take care of unhandled + new requests from now on */
  1758. spin_lock_irqsave(&msb->q_lock, flags);
  1759. msb->card_dead = true;
  1760. blk_start_queue(msb->queue);
  1761. spin_unlock_irqrestore(&msb->q_lock, flags);
  1762. /* Remove the disk */
  1763. del_gendisk(msb->disk);
  1764. blk_cleanup_queue(msb->queue);
  1765. msb->queue = NULL;
  1766. mutex_lock(&msb_disk_lock);
  1767. msb_data_clear(msb);
  1768. mutex_unlock(&msb_disk_lock);
  1769. msb_disk_release(msb->disk);
  1770. memstick_set_drvdata(card, NULL);
  1771. }
  1772. #ifdef CONFIG_PM
  1773. static int msb_suspend(struct memstick_dev *card, pm_message_t state)
  1774. {
  1775. msb_stop(card);
  1776. return 0;
  1777. }
  1778. static int msb_resume(struct memstick_dev *card)
  1779. {
  1780. struct msb_data *msb = memstick_get_drvdata(card);
  1781. struct msb_data *new_msb = NULL;
  1782. bool card_dead = true;
  1783. #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
  1784. msb->card_dead = true;
  1785. return 0;
  1786. #endif
  1787. mutex_lock(&card->host->lock);
  1788. new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
  1789. if (!new_msb)
  1790. goto out;
  1791. new_msb->card = card;
  1792. memstick_set_drvdata(card, new_msb);
  1793. spin_lock_init(&new_msb->q_lock);
  1794. sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
  1795. if (msb_init_card(card))
  1796. goto out;
  1797. if (msb->block_size != new_msb->block_size)
  1798. goto out;
  1799. if (memcmp(msb->boot_page, new_msb->boot_page,
  1800. sizeof(struct ms_boot_page)))
  1801. goto out;
  1802. if (msb->logical_block_count != new_msb->logical_block_count ||
  1803. memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
  1804. msb->logical_block_count))
  1805. goto out;
  1806. if (msb->block_count != new_msb->block_count ||
  1807. memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
  1808. msb->block_count / 8))
  1809. goto out;
  1810. card_dead = false;
  1811. out:
  1812. if (card_dead)
  1813. dbg("Card was removed/replaced during suspend");
  1814. msb->card_dead = card_dead;
  1815. memstick_set_drvdata(card, msb);
  1816. if (new_msb) {
  1817. msb_data_clear(new_msb);
  1818. kfree(new_msb);
  1819. }
  1820. msb_start(card);
  1821. mutex_unlock(&card->host->lock);
  1822. return 0;
  1823. }
  1824. #else
  1825. #define msb_suspend NULL
  1826. #define msb_resume NULL
  1827. #endif /* CONFIG_PM */
  1828. static struct memstick_device_id msb_id_tbl[] = {
  1829. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1830. MEMSTICK_CLASS_FLASH},
  1831. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1832. MEMSTICK_CLASS_ROM},
  1833. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1834. MEMSTICK_CLASS_RO},
  1835. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
  1836. MEMSTICK_CLASS_WP},
  1837. {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
  1838. MEMSTICK_CLASS_DUO},
  1839. {}
  1840. };
  1841. MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
  1842. static struct memstick_driver msb_driver = {
  1843. .driver = {
  1844. .name = DRIVER_NAME,
  1845. .owner = THIS_MODULE
  1846. },
  1847. .id_table = msb_id_tbl,
  1848. .probe = msb_probe,
  1849. .remove = msb_remove,
  1850. .suspend = msb_suspend,
  1851. .resume = msb_resume
  1852. };
  1853. static int __init msb_init(void)
  1854. {
  1855. int rc = memstick_register_driver(&msb_driver);
  1856. if (rc)
  1857. pr_err("failed to register memstick driver (error %d)\n", rc);
  1858. return rc;
  1859. }
  1860. static void __exit msb_exit(void)
  1861. {
  1862. memstick_unregister_driver(&msb_driver);
  1863. idr_destroy(&msb_disk_idr);
  1864. }
  1865. module_init(msb_init);
  1866. module_exit(msb_exit);
  1867. module_param(cache_flush_timeout, int, S_IRUGO);
  1868. MODULE_PARM_DESC(cache_flush_timeout,
  1869. "Cache flush timeout in msec (1000 default)");
  1870. module_param(debug, int, S_IRUGO | S_IWUSR);
  1871. MODULE_PARM_DESC(debug, "Debug level (0-2)");
  1872. module_param(verify_writes, bool, S_IRUGO);
  1873. MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
  1874. MODULE_LICENSE("GPL");
  1875. MODULE_AUTHOR("Maxim Levitsky");
  1876. MODULE_DESCRIPTION("Sony MemoryStick block device driver");