PageRenderTime 435ms CodeModel.GetById 34ms RepoModel.GetById 19ms app.codeStats 0ms

/drivers/staging/spectra/ffsport.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 841 lines | 596 code | 143 blank | 102 comment | 79 complexity | 3307cc7fcd2adb86168d20b2724f35dc MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright (c) 2009, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include "ffsport.h"
  20. #include "flash.h"
  21. #include <linux/interrupt.h>
  22. #include <linux/delay.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/wait.h>
  25. #include <linux/mutex.h>
  26. #include <linux/kthread.h>
  27. #include <linux/log2.h>
  28. #include <linux/init.h>
  29. #include <linux/slab.h>
  30. #include <linux/async.h>
  31. /**** Helper functions used for Div, Remainder operation on u64 ****/
  32. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  33. * Function: GLOB_Calc_Used_Bits
  34. * Inputs: Power of 2 number
  35. * Outputs: Number of Used Bits
  36. * 0, if the argument is 0
  37. * Description: Calculate the number of bits used by a given power of 2 number
  38. * Number can be up to 32 bit
  39. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  40. int GLOB_Calc_Used_Bits(u32 n)
  41. {
  42. int tot_bits = 0;
  43. if (n >= 1 << 16) {
  44. n >>= 16;
  45. tot_bits += 16;
  46. }
  47. if (n >= 1 << 8) {
  48. n >>= 8;
  49. tot_bits += 8;
  50. }
  51. if (n >= 1 << 4) {
  52. n >>= 4;
  53. tot_bits += 4;
  54. }
  55. if (n >= 1 << 2) {
  56. n >>= 2;
  57. tot_bits += 2;
  58. }
  59. if (n >= 1 << 1)
  60. tot_bits += 1;
  61. return ((n == 0) ? (0) : tot_bits);
  62. }
  63. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  64. * Function: GLOB_u64_Div
  65. * Inputs: Number of u64
  66. * A power of 2 number as Division
  67. * Outputs: Quotient of the Divisor operation
  68. * Description: It divides the address by divisor by using bit shift operation
  69. * (essentially without explicitely using "/").
  70. * Divisor is a power of 2 number and Divided is of u64
  71. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  72. u64 GLOB_u64_Div(u64 addr, u32 divisor)
  73. {
  74. return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
  75. }
  76. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  77. * Function: GLOB_u64_Remainder
  78. * Inputs: Number of u64
  79. * Divisor Type (1 -PageAddress, 2- BlockAddress)
  80. * Outputs: Remainder of the Division operation
  81. * Description: It calculates the remainder of a number (of u64) by
  82. * divisor(power of 2 number ) by using bit shifting and multiply
  83. * operation(essentially without explicitely using "/").
  84. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  85. u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
  86. {
  87. u64 result = 0;
  88. if (divisor_type == 1) { /* Remainder -- Page */
  89. result = (addr >> DeviceInfo.nBitsInPageDataSize);
  90. result = result * DeviceInfo.wPageDataSize;
  91. } else if (divisor_type == 2) { /* Remainder -- Block */
  92. result = (addr >> DeviceInfo.nBitsInBlockDataSize);
  93. result = result * DeviceInfo.wBlockDataSize;
  94. }
  95. result = addr - result;
  96. return result;
  97. }
  98. #define NUM_DEVICES 1
  99. #define PARTITIONS 8
  100. #define GLOB_SBD_NAME "nd"
  101. #define GLOB_SBD_IRQ_NUM (29)
  102. #define GLOB_SBD_IOCTL_GC (0x7701)
  103. #define GLOB_SBD_IOCTL_WL (0x7702)
  104. #define GLOB_SBD_IOCTL_FORMAT (0x7703)
  105. #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
  106. #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
  107. #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
  108. #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
  109. #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
  110. #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
  111. #define GLOB_SBD_IOCTL_READ_DATA (0x770A)
  112. static int reserved_mb = 0;
  113. module_param(reserved_mb, int, 0);
  114. MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
  115. int nand_debug_level;
  116. module_param(nand_debug_level, int, 0644);
  117. MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
  118. MODULE_LICENSE("GPL");
  119. struct spectra_nand_dev {
  120. struct pci_dev *dev;
  121. u64 size;
  122. u16 users;
  123. spinlock_t qlock;
  124. void __iomem *ioaddr; /* Mapped address */
  125. struct request_queue *queue;
  126. struct task_struct *thread;
  127. struct gendisk *gd;
  128. u8 *tmp_buf;
  129. };
  130. static int GLOB_SBD_majornum;
  131. static char *GLOB_version = GLOB_VERSION;
  132. static struct spectra_nand_dev nand_device[NUM_DEVICES];
  133. static struct mutex spectra_lock;
  134. static int res_blks_os = 1;
  135. struct spectra_indentfy_dev_tag IdentifyDeviceData;
  136. static int force_flush_cache(void)
  137. {
  138. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  139. __FILE__, __LINE__, __func__);
  140. if (ERR == GLOB_FTL_Flush_Cache()) {
  141. printk(KERN_ERR "Fail to Flush FTL Cache!\n");
  142. return -EFAULT;
  143. }
  144. #if CMD_DMA
  145. if (glob_ftl_execute_cmds())
  146. return -EIO;
  147. else
  148. return 0;
  149. #endif
  150. return 0;
  151. }
  152. struct ioctl_rw_page_info {
  153. u8 *data;
  154. unsigned int page;
  155. };
  156. static int ioctl_read_page_data(unsigned long arg)
  157. {
  158. u8 *buf;
  159. struct ioctl_rw_page_info info;
  160. int result = PASS;
  161. if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
  162. return -EFAULT;
  163. buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  164. if (!buf) {
  165. printk(KERN_ERR "ioctl_read_page_data: "
  166. "failed to allocate memory\n");
  167. return -ENOMEM;
  168. }
  169. mutex_lock(&spectra_lock);
  170. result = GLOB_FTL_Page_Read(buf,
  171. (u64)info.page * IdentifyDeviceData.PageDataSize);
  172. mutex_unlock(&spectra_lock);
  173. if (copy_to_user((void __user *)info.data, buf,
  174. IdentifyDeviceData.PageDataSize)) {
  175. printk(KERN_ERR "ioctl_read_page_data: "
  176. "failed to copy user data\n");
  177. kfree(buf);
  178. return -EFAULT;
  179. }
  180. kfree(buf);
  181. return result;
  182. }
  183. static int ioctl_write_page_data(unsigned long arg)
  184. {
  185. u8 *buf;
  186. struct ioctl_rw_page_info info;
  187. int result = PASS;
  188. if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
  189. return -EFAULT;
  190. buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  191. if (!buf) {
  192. printk(KERN_ERR "ioctl_write_page_data: "
  193. "failed to allocate memory\n");
  194. return -ENOMEM;
  195. }
  196. if (copy_from_user(buf, (void __user *)info.data,
  197. IdentifyDeviceData.PageDataSize)) {
  198. printk(KERN_ERR "ioctl_write_page_data: "
  199. "failed to copy user data\n");
  200. kfree(buf);
  201. return -EFAULT;
  202. }
  203. mutex_lock(&spectra_lock);
  204. result = GLOB_FTL_Page_Write(buf,
  205. (u64)info.page * IdentifyDeviceData.PageDataSize);
  206. mutex_unlock(&spectra_lock);
  207. kfree(buf);
  208. return result;
  209. }
  210. /* Return how many blocks should be reserved for bad block replacement */
  211. static int get_res_blk_num_bad_blk(void)
  212. {
  213. return IdentifyDeviceData.wDataBlockNum / 10;
  214. }
  215. /* Return how many blocks should be reserved for OS image */
  216. static int get_res_blk_num_os(void)
  217. {
  218. u32 res_blks, blk_size;
  219. blk_size = IdentifyDeviceData.PageDataSize *
  220. IdentifyDeviceData.PagesPerBlock;
  221. res_blks = (reserved_mb * 1024 * 1024) / blk_size;
  222. if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
  223. res_blks = 1; /* Reserved 1 block for block table */
  224. return res_blks;
  225. }
  226. /* Transfer a full request. */
  227. static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
  228. {
  229. u64 start_addr, addr;
  230. u32 logical_start_sect, hd_start_sect;
  231. u32 nsect, hd_sects;
  232. u32 rsect, tsect = 0;
  233. char *buf;
  234. u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
  235. start_addr = (u64)(blk_rq_pos(req)) << 9;
  236. /* Add a big enough offset to prevent the OS Image from
  237. * being accessed or damaged by file system */
  238. start_addr += IdentifyDeviceData.PageDataSize *
  239. IdentifyDeviceData.PagesPerBlock *
  240. res_blks_os;
  241. if (req->cmd_type & REQ_FLUSH) {
  242. if (force_flush_cache()) /* Fail to flush cache */
  243. return -EIO;
  244. else
  245. return 0;
  246. }
  247. if (req->cmd_type != REQ_TYPE_FS)
  248. return -EIO;
  249. if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
  250. printk(KERN_ERR "Spectra error: request over the NAND "
  251. "capacity!sector %d, current_nr_sectors %d, "
  252. "while capacity is %d\n",
  253. (int)blk_rq_pos(req),
  254. blk_rq_cur_sectors(req),
  255. (int)get_capacity(tr->gd));
  256. return -EIO;
  257. }
  258. logical_start_sect = start_addr >> 9;
  259. hd_start_sect = logical_start_sect / ratio;
  260. rsect = logical_start_sect - hd_start_sect * ratio;
  261. addr = (u64)hd_start_sect * ratio * 512;
  262. buf = req->buffer;
  263. nsect = blk_rq_cur_sectors(req);
  264. if (rsect)
  265. tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
  266. switch (rq_data_dir(req)) {
  267. case READ:
  268. /* Read the first NAND page */
  269. if (rsect) {
  270. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  271. printk(KERN_ERR "Error in %s, Line %d\n",
  272. __FILE__, __LINE__);
  273. return -EIO;
  274. }
  275. memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
  276. addr += IdentifyDeviceData.PageDataSize;
  277. buf += tsect << 9;
  278. nsect -= tsect;
  279. }
  280. /* Read the other NAND pages */
  281. for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
  282. if (GLOB_FTL_Page_Read(buf, addr)) {
  283. printk(KERN_ERR "Error in %s, Line %d\n",
  284. __FILE__, __LINE__);
  285. return -EIO;
  286. }
  287. addr += IdentifyDeviceData.PageDataSize;
  288. buf += IdentifyDeviceData.PageDataSize;
  289. }
  290. /* Read the last NAND pages */
  291. if (nsect % ratio) {
  292. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  293. printk(KERN_ERR "Error in %s, Line %d\n",
  294. __FILE__, __LINE__);
  295. return -EIO;
  296. }
  297. memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
  298. }
  299. #if CMD_DMA
  300. if (glob_ftl_execute_cmds())
  301. return -EIO;
  302. else
  303. return 0;
  304. #endif
  305. return 0;
  306. case WRITE:
  307. /* Write the first NAND page */
  308. if (rsect) {
  309. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  310. printk(KERN_ERR "Error in %s, Line %d\n",
  311. __FILE__, __LINE__);
  312. return -EIO;
  313. }
  314. memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
  315. if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
  316. printk(KERN_ERR "Error in %s, Line %d\n",
  317. __FILE__, __LINE__);
  318. return -EIO;
  319. }
  320. addr += IdentifyDeviceData.PageDataSize;
  321. buf += tsect << 9;
  322. nsect -= tsect;
  323. }
  324. /* Write the other NAND pages */
  325. for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
  326. if (GLOB_FTL_Page_Write(buf, addr)) {
  327. printk(KERN_ERR "Error in %s, Line %d\n",
  328. __FILE__, __LINE__);
  329. return -EIO;
  330. }
  331. addr += IdentifyDeviceData.PageDataSize;
  332. buf += IdentifyDeviceData.PageDataSize;
  333. }
  334. /* Write the last NAND pages */
  335. if (nsect % ratio) {
  336. if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
  337. printk(KERN_ERR "Error in %s, Line %d\n",
  338. __FILE__, __LINE__);
  339. return -EIO;
  340. }
  341. memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
  342. if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
  343. printk(KERN_ERR "Error in %s, Line %d\n",
  344. __FILE__, __LINE__);
  345. return -EIO;
  346. }
  347. }
  348. #if CMD_DMA
  349. if (glob_ftl_execute_cmds())
  350. return -EIO;
  351. else
  352. return 0;
  353. #endif
  354. return 0;
  355. default:
  356. printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
  357. return -EIO;
  358. }
  359. }
  360. /* This function is copied from drivers/mtd/mtd_blkdevs.c */
  361. static int spectra_trans_thread(void *arg)
  362. {
  363. struct spectra_nand_dev *tr = arg;
  364. struct request_queue *rq = tr->queue;
  365. struct request *req = NULL;
  366. /* we might get involved when memory gets low, so use PF_MEMALLOC */
  367. current->flags |= PF_MEMALLOC;
  368. spin_lock_irq(rq->queue_lock);
  369. while (!kthread_should_stop()) {
  370. int res;
  371. if (!req) {
  372. req = blk_fetch_request(rq);
  373. if (!req) {
  374. set_current_state(TASK_INTERRUPTIBLE);
  375. spin_unlock_irq(rq->queue_lock);
  376. schedule();
  377. spin_lock_irq(rq->queue_lock);
  378. continue;
  379. }
  380. }
  381. spin_unlock_irq(rq->queue_lock);
  382. mutex_lock(&spectra_lock);
  383. res = do_transfer(tr, req);
  384. mutex_unlock(&spectra_lock);
  385. spin_lock_irq(rq->queue_lock);
  386. if (!__blk_end_request_cur(req, res))
  387. req = NULL;
  388. }
  389. if (req)
  390. __blk_end_request_all(req, -EIO);
  391. spin_unlock_irq(rq->queue_lock);
  392. return 0;
  393. }
  394. /* Request function that "handles clustering". */
  395. static void GLOB_SBD_request(struct request_queue *rq)
  396. {
  397. struct spectra_nand_dev *pdev = rq->queuedata;
  398. wake_up_process(pdev->thread);
  399. }
  400. static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
  401. {
  402. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  403. __FILE__, __LINE__, __func__);
  404. return 0;
  405. }
  406. static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
  407. {
  408. int ret;
  409. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  410. __FILE__, __LINE__, __func__);
  411. mutex_lock(&spectra_lock);
  412. ret = force_flush_cache();
  413. mutex_unlock(&spectra_lock);
  414. return 0;
  415. }
  416. static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  417. {
  418. geo->heads = 4;
  419. geo->sectors = 16;
  420. geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
  421. nand_dbg_print(NAND_DBG_DEBUG,
  422. "heads: %d, sectors: %d, cylinders: %d\n",
  423. geo->heads, geo->sectors, geo->cylinders);
  424. return 0;
  425. }
  426. int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
  427. unsigned int cmd, unsigned long arg)
  428. {
  429. int ret;
  430. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  431. __FILE__, __LINE__, __func__);
  432. switch (cmd) {
  433. case GLOB_SBD_IOCTL_GC:
  434. nand_dbg_print(NAND_DBG_DEBUG,
  435. "Spectra IOCTL: Garbage Collection "
  436. "being performed\n");
  437. if (PASS != GLOB_FTL_Garbage_Collection())
  438. return -EFAULT;
  439. return 0;
  440. case GLOB_SBD_IOCTL_WL:
  441. nand_dbg_print(NAND_DBG_DEBUG,
  442. "Spectra IOCTL: Static Wear Leveling "
  443. "being performed\n");
  444. if (PASS != GLOB_FTL_Wear_Leveling())
  445. return -EFAULT;
  446. return 0;
  447. case GLOB_SBD_IOCTL_FORMAT:
  448. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
  449. "being performed\n");
  450. if (PASS != GLOB_FTL_Flash_Format())
  451. return -EFAULT;
  452. return 0;
  453. case GLOB_SBD_IOCTL_FLUSH_CACHE:
  454. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
  455. "being performed\n");
  456. mutex_lock(&spectra_lock);
  457. ret = force_flush_cache();
  458. mutex_unlock(&spectra_lock);
  459. return ret;
  460. case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
  461. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  462. "Copy block table\n");
  463. if (copy_to_user((void __user *)arg,
  464. get_blk_table_start_addr(),
  465. get_blk_table_len()))
  466. return -EFAULT;
  467. return 0;
  468. case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
  469. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  470. "Copy wear leveling table\n");
  471. if (copy_to_user((void __user *)arg,
  472. get_wear_leveling_table_start_addr(),
  473. get_wear_leveling_table_len()))
  474. return -EFAULT;
  475. return 0;
  476. case GLOB_SBD_IOCTL_GET_NAND_INFO:
  477. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  478. "Get NAND info\n");
  479. if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
  480. sizeof(IdentifyDeviceData)))
  481. return -EFAULT;
  482. return 0;
  483. case GLOB_SBD_IOCTL_WRITE_DATA:
  484. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  485. "Write one page data\n");
  486. return ioctl_write_page_data(arg);
  487. case GLOB_SBD_IOCTL_READ_DATA:
  488. nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
  489. "Read one page data\n");
  490. return ioctl_read_page_data(arg);
  491. }
  492. return -ENOTTY;
  493. }
  494. static DEFINE_MUTEX(ffsport_mutex);
  495. int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
  496. unsigned int cmd, unsigned long arg)
  497. {
  498. int ret;
  499. mutex_lock(&ffsport_mutex);
  500. ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
  501. mutex_unlock(&ffsport_mutex);
  502. return ret;
  503. }
  504. static struct block_device_operations GLOB_SBD_ops = {
  505. .owner = THIS_MODULE,
  506. .open = GLOB_SBD_open,
  507. .release = GLOB_SBD_release,
  508. .ioctl = GLOB_SBD_unlocked_ioctl,
  509. .getgeo = GLOB_SBD_getgeo,
  510. };
  511. static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
  512. {
  513. int res_blks;
  514. u32 sects;
  515. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  516. __FILE__, __LINE__, __func__);
  517. memset(dev, 0, sizeof(struct spectra_nand_dev));
  518. nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
  519. "for OS image, %d blocks for bad block replacement.\n",
  520. get_res_blk_num_os(),
  521. get_res_blk_num_bad_blk());
  522. res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
  523. dev->size = (u64)IdentifyDeviceData.PageDataSize *
  524. IdentifyDeviceData.PagesPerBlock *
  525. (IdentifyDeviceData.wDataBlockNum - res_blks);
  526. res_blks_os = get_res_blk_num_os();
  527. spin_lock_init(&dev->qlock);
  528. dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
  529. if (!dev->tmp_buf) {
  530. printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
  531. __FILE__, __LINE__);
  532. goto out_vfree;
  533. }
  534. dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
  535. if (dev->queue == NULL) {
  536. printk(KERN_ERR
  537. "Spectra: Request queue could not be initialized."
  538. " Aborting\n ");
  539. goto out_vfree;
  540. }
  541. dev->queue->queuedata = dev;
  542. /* As Linux block layer doesn't support >4KB hardware sector, */
  543. /* Here we force report 512 byte hardware sector size to Kernel */
  544. blk_queue_logical_block_size(dev->queue, 512);
  545. blk_queue_flush(dev->queue, REQ_FLUSH);
  546. dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
  547. if (IS_ERR(dev->thread)) {
  548. blk_cleanup_queue(dev->queue);
  549. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  550. return PTR_ERR(dev->thread);
  551. }
  552. dev->gd = alloc_disk(PARTITIONS);
  553. if (!dev->gd) {
  554. printk(KERN_ERR
  555. "Spectra: Could not allocate disk. Aborting \n ");
  556. goto out_vfree;
  557. }
  558. dev->gd->major = GLOB_SBD_majornum;
  559. dev->gd->first_minor = which * PARTITIONS;
  560. dev->gd->fops = &GLOB_SBD_ops;
  561. dev->gd->queue = dev->queue;
  562. dev->gd->private_data = dev;
  563. snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
  564. sects = dev->size >> 9;
  565. nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
  566. set_capacity(dev->gd, sects);
  567. add_disk(dev->gd);
  568. return 0;
  569. out_vfree:
  570. return -ENOMEM;
  571. }
  572. /*
  573. static ssize_t show_nand_block_num(struct device *dev,
  574. struct device_attribute *attr, char *buf)
  575. {
  576. return snprintf(buf, PAGE_SIZE, "%d\n",
  577. (int)IdentifyDeviceData.wDataBlockNum);
  578. }
  579. static ssize_t show_nand_pages_per_block(struct device *dev,
  580. struct device_attribute *attr, char *buf)
  581. {
  582. return snprintf(buf, PAGE_SIZE, "%d\n",
  583. (int)IdentifyDeviceData.PagesPerBlock);
  584. }
  585. static ssize_t show_nand_page_size(struct device *dev,
  586. struct device_attribute *attr, char *buf)
  587. {
  588. return snprintf(buf, PAGE_SIZE, "%d\n",
  589. (int)IdentifyDeviceData.PageDataSize);
  590. }
  591. static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
  592. static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
  593. static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
  594. static void create_sysfs_entry(struct device *dev)
  595. {
  596. if (device_create_file(dev, &dev_attr_nand_block_num))
  597. printk(KERN_ERR "Spectra: "
  598. "failed to create sysfs entry nand_block_num.\n");
  599. if (device_create_file(dev, &dev_attr_nand_pages_per_block))
  600. printk(KERN_ERR "Spectra: "
  601. "failed to create sysfs entry nand_pages_per_block.\n");
  602. if (device_create_file(dev, &dev_attr_nand_page_size))
  603. printk(KERN_ERR "Spectra: "
  604. "failed to create sysfs entry nand_page_size.\n");
  605. }
  606. */
  607. static void register_spectra_ftl_async(void *unused, async_cookie_t cookie)
  608. {
  609. int i;
  610. /* create_sysfs_entry(&dev->dev); */
  611. if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
  612. printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
  613. "Aborting\n");
  614. return;
  615. } else {
  616. nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
  617. "Num blocks=%d, pagesperblock=%d, "
  618. "pagedatasize=%d, ECCBytesPerSector=%d\n",
  619. (int)IdentifyDeviceData.NumBlocks,
  620. (int)IdentifyDeviceData.PagesPerBlock,
  621. (int)IdentifyDeviceData.PageDataSize,
  622. (int)IdentifyDeviceData.wECCBytesPerSector);
  623. }
  624. printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
  625. if (GLOB_FTL_Init() != PASS) {
  626. printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
  627. "Aborting\n");
  628. goto out_ftl_flash_register;
  629. }
  630. printk(KERN_ALERT "Spectra: block table has been found.\n");
  631. GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
  632. if (GLOB_SBD_majornum <= 0) {
  633. printk(KERN_ERR "Unable to get the major %d for Spectra",
  634. GLOB_SBD_majornum);
  635. goto out_ftl_flash_register;
  636. }
  637. for (i = 0; i < NUM_DEVICES; i++)
  638. if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
  639. goto out_blk_register;
  640. nand_dbg_print(NAND_DBG_DEBUG,
  641. "Spectra: module loaded with major number %d\n",
  642. GLOB_SBD_majornum);
  643. return;
  644. out_blk_register:
  645. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  646. out_ftl_flash_register:
  647. GLOB_FTL_Cache_Release();
  648. printk(KERN_ERR "Spectra: Module load failed.\n");
  649. }
  650. int register_spectra_ftl()
  651. {
  652. async_schedule(register_spectra_ftl_async, NULL);
  653. return 0;
  654. }
  655. EXPORT_SYMBOL_GPL(register_spectra_ftl);
  656. static int GLOB_SBD_init(void)
  657. {
  658. /* Set debug output level (0~3) here. 3 is most verbose */
  659. printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
  660. mutex_init(&spectra_lock);
  661. if (PASS != GLOB_FTL_Flash_Init()) {
  662. printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
  663. "Aborting\n");
  664. return -ENODEV;
  665. }
  666. return 0;
  667. }
  668. static void __exit GLOB_SBD_exit(void)
  669. {
  670. int i;
  671. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  672. __FILE__, __LINE__, __func__);
  673. for (i = 0; i < NUM_DEVICES; i++) {
  674. struct spectra_nand_dev *dev = &nand_device[i];
  675. if (dev->gd) {
  676. del_gendisk(dev->gd);
  677. put_disk(dev->gd);
  678. }
  679. if (dev->queue)
  680. blk_cleanup_queue(dev->queue);
  681. kfree(dev->tmp_buf);
  682. }
  683. unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
  684. mutex_lock(&spectra_lock);
  685. force_flush_cache();
  686. mutex_unlock(&spectra_lock);
  687. GLOB_FTL_Cache_Release();
  688. GLOB_FTL_Flash_Release();
  689. nand_dbg_print(NAND_DBG_DEBUG,
  690. "Spectra FTL module (major number %d) unloaded.\n",
  691. GLOB_SBD_majornum);
  692. }
  693. module_init(GLOB_SBD_init);
  694. module_exit(GLOB_SBD_exit);