PageRenderTime 28ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/sc7731_kernel/block/partition-generic.c

https://gitlab.com/envieidoc/sprd_project
C | 582 lines | 446 code | 88 blank | 48 comment | 56 complexity | 6fd4ccb3581d0fd49408db37afb013d3 MD5 | raw file
  1. /*
  2. * Code extracted from drivers/block/genhd.c
  3. * Copyright (C) 1991-1998 Linus Torvalds
  4. * Re-organised Feb 1998 Russell King
  5. *
  6. * We now have independent partition support from the
  7. * block drivers, which allows all the partition code to
  8. * be grouped in one location, and it to be mostly self
  9. * contained.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/fs.h>
  14. #include <linux/slab.h>
  15. #include <linux/kmod.h>
  16. #include <linux/ctype.h>
  17. #include <linux/genhd.h>
  18. #include <linux/blktrace_api.h>
  19. #include "partitions/check.h"
  20. #ifdef CONFIG_BLK_DEV_MD
  21. extern void md_autodetect_dev(dev_t dev);
  22. #endif
  23. /*
  24. * disk_name() is used by partition check code and the genhd driver.
  25. * It formats the devicename of the indicated disk into
  26. * the supplied buffer (of size at least 32), and returns
  27. * a pointer to that same buffer (for convenience).
  28. */
  29. char *disk_name(struct gendisk *hd, int partno, char *buf)
  30. {
  31. if (!partno)
  32. snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
  33. else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
  34. snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
  35. else
  36. snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
  37. return buf;
  38. }
  39. const char *bdevname(struct block_device *bdev, char *buf)
  40. {
  41. return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf);
  42. }
  43. EXPORT_SYMBOL(bdevname);
  44. /*
  45. * There's very little reason to use this, you should really
  46. * have a struct block_device just about everywhere and use
  47. * bdevname() instead.
  48. */
  49. const char *__bdevname(dev_t dev, char *buffer)
  50. {
  51. scnprintf(buffer, BDEVNAME_SIZE, "unknown-block(%u,%u)",
  52. MAJOR(dev), MINOR(dev));
  53. return buffer;
  54. }
  55. EXPORT_SYMBOL(__bdevname);
  56. static ssize_t part_partition_show(struct device *dev,
  57. struct device_attribute *attr, char *buf)
  58. {
  59. struct hd_struct *p = dev_to_part(dev);
  60. return sprintf(buf, "%d\n", p->partno);
  61. }
  62. static ssize_t part_start_show(struct device *dev,
  63. struct device_attribute *attr, char *buf)
  64. {
  65. struct hd_struct *p = dev_to_part(dev);
  66. return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
  67. }
  68. ssize_t part_size_show(struct device *dev,
  69. struct device_attribute *attr, char *buf)
  70. {
  71. struct hd_struct *p = dev_to_part(dev);
  72. return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p));
  73. }
  74. static ssize_t part_ro_show(struct device *dev,
  75. struct device_attribute *attr, char *buf)
  76. {
  77. struct hd_struct *p = dev_to_part(dev);
  78. return sprintf(buf, "%d\n", p->policy ? 1 : 0);
  79. }
  80. static ssize_t part_alignment_offset_show(struct device *dev,
  81. struct device_attribute *attr, char *buf)
  82. {
  83. struct hd_struct *p = dev_to_part(dev);
  84. return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
  85. }
  86. static ssize_t part_discard_alignment_show(struct device *dev,
  87. struct device_attribute *attr, char *buf)
  88. {
  89. struct hd_struct *p = dev_to_part(dev);
  90. return sprintf(buf, "%u\n", p->discard_alignment);
  91. }
  92. ssize_t part_stat_show(struct device *dev,
  93. struct device_attribute *attr, char *buf)
  94. {
  95. struct hd_struct *p = dev_to_part(dev);
  96. int cpu;
  97. cpu = part_stat_lock();
  98. part_round_stats(cpu, p);
  99. part_stat_unlock();
  100. return sprintf(buf,
  101. "%8lu %8lu %8llu %8u "
  102. "%8lu %8lu %8llu %8u "
  103. "%8u %8u %8u"
  104. "\n",
  105. part_stat_read(p, ios[READ]),
  106. part_stat_read(p, merges[READ]),
  107. (unsigned long long)part_stat_read(p, sectors[READ]),
  108. jiffies_to_msecs(part_stat_read(p, ticks[READ])),
  109. part_stat_read(p, ios[WRITE]),
  110. part_stat_read(p, merges[WRITE]),
  111. (unsigned long long)part_stat_read(p, sectors[WRITE]),
  112. jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
  113. part_in_flight(p),
  114. jiffies_to_msecs(part_stat_read(p, io_ticks)),
  115. jiffies_to_msecs(part_stat_read(p, time_in_queue)));
  116. }
  117. ssize_t part_inflight_show(struct device *dev,
  118. struct device_attribute *attr, char *buf)
  119. {
  120. struct hd_struct *p = dev_to_part(dev);
  121. return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
  122. atomic_read(&p->in_flight[1]));
  123. }
  124. #ifdef CONFIG_FAIL_MAKE_REQUEST
  125. ssize_t part_fail_show(struct device *dev,
  126. struct device_attribute *attr, char *buf)
  127. {
  128. struct hd_struct *p = dev_to_part(dev);
  129. return sprintf(buf, "%d\n", p->make_it_fail);
  130. }
  131. ssize_t part_fail_store(struct device *dev,
  132. struct device_attribute *attr,
  133. const char *buf, size_t count)
  134. {
  135. struct hd_struct *p = dev_to_part(dev);
  136. int i;
  137. if (count > 0 && sscanf(buf, "%d", &i) > 0)
  138. p->make_it_fail = (i == 0) ? 0 : 1;
  139. return count;
  140. }
  141. #endif
  142. static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
  143. static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
  144. static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
  145. static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL);
  146. static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
  147. static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show,
  148. NULL);
  149. static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
  150. static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
  151. #ifdef CONFIG_FAIL_MAKE_REQUEST
  152. static struct device_attribute dev_attr_fail =
  153. __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
  154. #endif
  155. static struct attribute *part_attrs[] = {
  156. &dev_attr_partition.attr,
  157. &dev_attr_start.attr,
  158. &dev_attr_size.attr,
  159. &dev_attr_ro.attr,
  160. &dev_attr_alignment_offset.attr,
  161. &dev_attr_discard_alignment.attr,
  162. &dev_attr_stat.attr,
  163. &dev_attr_inflight.attr,
  164. #ifdef CONFIG_FAIL_MAKE_REQUEST
  165. &dev_attr_fail.attr,
  166. #endif
  167. NULL
  168. };
  169. static struct attribute_group part_attr_group = {
  170. .attrs = part_attrs,
  171. };
  172. static const struct attribute_group *part_attr_groups[] = {
  173. &part_attr_group,
  174. #ifdef CONFIG_BLK_DEV_IO_TRACE
  175. &blk_trace_attr_group,
  176. #endif
  177. NULL
  178. };
  179. static void part_release(struct device *dev)
  180. {
  181. struct hd_struct *p = dev_to_part(dev);
  182. free_part_stats(p);
  183. free_part_info(p);
  184. kfree(p);
  185. }
  186. static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
  187. {
  188. struct hd_struct *part = dev_to_part(dev);
  189. add_uevent_var(env, "PARTN=%u", part->partno);
  190. if (part->info && part->info->volname[0])
  191. add_uevent_var(env, "PARTNAME=%s", part->info->volname);
  192. return 0;
  193. }
  194. struct device_type part_type = {
  195. .name = "partition",
  196. .groups = part_attr_groups,
  197. .release = part_release,
  198. .uevent = part_uevent,
  199. };
  200. static void delete_partition_rcu_cb(struct rcu_head *head)
  201. {
  202. struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
  203. part->start_sect = 0;
  204. part->nr_sects = 0;
  205. part_stat_set_all(part, 0);
  206. put_device(part_to_dev(part));
  207. }
  208. void __delete_partition(struct hd_struct *part)
  209. {
  210. call_rcu(&part->rcu_head, delete_partition_rcu_cb);
  211. }
  212. void delete_partition(struct gendisk *disk, int partno)
  213. {
  214. struct disk_part_tbl *ptbl = disk->part_tbl;
  215. struct hd_struct *part;
  216. if (partno >= ptbl->len)
  217. return;
  218. part = ptbl->part[partno];
  219. if (!part)
  220. return;
  221. rcu_assign_pointer(ptbl->part[partno], NULL);
  222. rcu_assign_pointer(ptbl->last_lookup, NULL);
  223. kobject_put(part->holder_dir);
  224. device_del(part_to_dev(part));
  225. blk_free_devt(part_devt(part));
  226. hd_struct_put(part);
  227. }
  228. static ssize_t whole_disk_show(struct device *dev,
  229. struct device_attribute *attr, char *buf)
  230. {
  231. return 0;
  232. }
  233. static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
  234. whole_disk_show, NULL);
  235. struct hd_struct *add_partition(struct gendisk *disk, int partno,
  236. sector_t start, sector_t len, int flags,
  237. struct partition_meta_info *info)
  238. {
  239. struct hd_struct *p;
  240. dev_t devt = MKDEV(0, 0);
  241. struct device *ddev = disk_to_dev(disk);
  242. struct device *pdev;
  243. struct disk_part_tbl *ptbl;
  244. const char *dname;
  245. int err;
  246. err = disk_expand_part_tbl(disk, partno);
  247. if (err)
  248. return ERR_PTR(err);
  249. ptbl = disk->part_tbl;
  250. if (ptbl->part[partno])
  251. return ERR_PTR(-EBUSY);
  252. p = kzalloc(sizeof(*p), GFP_KERNEL);
  253. if (!p)
  254. return ERR_PTR(-EBUSY);
  255. if (!init_part_stats(p)) {
  256. err = -ENOMEM;
  257. goto out_free;
  258. }
  259. seqcount_init(&p->nr_sects_seq);
  260. pdev = part_to_dev(p);
  261. p->start_sect = start;
  262. p->alignment_offset =
  263. queue_limit_alignment_offset(&disk->queue->limits, start);
  264. p->discard_alignment =
  265. queue_limit_discard_alignment(&disk->queue->limits, start);
  266. p->nr_sects = len;
  267. p->partno = partno;
  268. p->policy = get_disk_ro(disk);
  269. if (info) {
  270. struct partition_meta_info *pinfo = alloc_part_info(disk);
  271. if (!pinfo)
  272. goto out_free_stats;
  273. memcpy(pinfo, info, sizeof(*info));
  274. p->info = pinfo;
  275. }
  276. dname = dev_name(ddev);
  277. if (isdigit(dname[strlen(dname) - 1]))
  278. dev_set_name(pdev, "%sp%d", dname, partno);
  279. else
  280. dev_set_name(pdev, "%s%d", dname, partno);
  281. device_initialize(pdev);
  282. pdev->class = &block_class;
  283. pdev->type = &part_type;
  284. pdev->parent = ddev;
  285. err = blk_alloc_devt(p, &devt);
  286. if (err)
  287. goto out_free_info;
  288. pdev->devt = devt;
  289. /* delay uevent until 'holders' subdir is created */
  290. dev_set_uevent_suppress(pdev, 1);
  291. err = device_add(pdev);
  292. if (err)
  293. goto out_put;
  294. err = -ENOMEM;
  295. p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
  296. if (!p->holder_dir)
  297. goto out_del;
  298. dev_set_uevent_suppress(pdev, 0);
  299. if (flags & ADDPART_FLAG_WHOLEDISK) {
  300. err = device_create_file(pdev, &dev_attr_whole_disk);
  301. if (err)
  302. goto out_del;
  303. }
  304. /* everything is up and running, commence */
  305. rcu_assign_pointer(ptbl->part[partno], p);
  306. /* suppress uevent if the disk suppresses it */
  307. if (!dev_get_uevent_suppress(ddev))
  308. kobject_uevent(&pdev->kobj, KOBJ_ADD);
  309. hd_ref_init(p);
  310. return p;
  311. out_free_info:
  312. free_part_info(p);
  313. out_free_stats:
  314. free_part_stats(p);
  315. out_free:
  316. kfree(p);
  317. return ERR_PTR(err);
  318. out_del:
  319. kobject_put(p->holder_dir);
  320. device_del(pdev);
  321. out_put:
  322. put_device(pdev);
  323. blk_free_devt(devt);
  324. return ERR_PTR(err);
  325. }
  326. static bool disk_unlock_native_capacity(struct gendisk *disk)
  327. {
  328. const struct block_device_operations *bdops = disk->fops;
  329. if (bdops->unlock_native_capacity &&
  330. !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) {
  331. printk(KERN_CONT "enabling native capacity\n");
  332. bdops->unlock_native_capacity(disk);
  333. disk->flags |= GENHD_FL_NATIVE_CAPACITY;
  334. return true;
  335. } else {
  336. printk(KERN_CONT "truncated\n");
  337. return false;
  338. }
  339. }
  340. static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
  341. {
  342. struct disk_part_iter piter;
  343. struct hd_struct *part;
  344. int res;
  345. if (bdev->bd_part_count)
  346. return -EBUSY;
  347. res = invalidate_partition(disk, 0);
  348. if (res)
  349. return res;
  350. disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
  351. while ((part = disk_part_iter_next(&piter)))
  352. delete_partition(disk, part->partno);
  353. disk_part_iter_exit(&piter);
  354. return 0;
  355. }
  356. int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
  357. {
  358. struct parsed_partitions *state = NULL;
  359. struct hd_struct *part;
  360. int p, highest, res;
  361. rescan:
  362. if (state && !IS_ERR(state)) {
  363. free_partitions(state);
  364. state = NULL;
  365. }
  366. res = drop_partitions(disk, bdev);
  367. if (res)
  368. return res;
  369. if (disk->fops->revalidate_disk)
  370. disk->fops->revalidate_disk(disk);
  371. check_disk_size_change(disk, bdev);
  372. bdev->bd_invalidated = 0;
  373. if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
  374. return 0;
  375. if (IS_ERR(state)) {
  376. /*
  377. * I/O error reading the partition table. If any
  378. * partition code tried to read beyond EOD, retry
  379. * after unlocking native capacity.
  380. */
  381. if (PTR_ERR(state) == -ENOSPC) {
  382. printk(KERN_WARNING "%s: partition table beyond EOD, ",
  383. disk->disk_name);
  384. if (disk_unlock_native_capacity(disk))
  385. goto rescan;
  386. }
  387. return -EIO;
  388. }
  389. /*
  390. * If any partition code tried to read beyond EOD, try
  391. * unlocking native capacity even if partition table is
  392. * successfully read as we could be missing some partitions.
  393. */
  394. if (state->access_beyond_eod) {
  395. printk(KERN_WARNING
  396. "%s: partition table partially beyond EOD, ",
  397. disk->disk_name);
  398. if (disk_unlock_native_capacity(disk))
  399. goto rescan;
  400. }
  401. /* tell userspace that the media / partition table may have changed */
  402. kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
  403. /* Detect the highest partition number and preallocate
  404. * disk->part_tbl. This is an optimization and not strictly
  405. * necessary.
  406. */
  407. for (p = 1, highest = 0; p < state->limit; p++)
  408. if (state->parts[p].size)
  409. highest = p;
  410. disk_expand_part_tbl(disk, highest);
  411. /* add partitions */
  412. for (p = 1; p < state->limit; p++) {
  413. sector_t size, from;
  414. struct partition_meta_info *info = NULL;
  415. size = state->parts[p].size;
  416. if (!size)
  417. continue;
  418. from = state->parts[p].from;
  419. if (from >= get_capacity(disk)) {
  420. printk(KERN_WARNING
  421. "%s: p%d start %llu is beyond EOD, ",
  422. disk->disk_name, p, (unsigned long long) from);
  423. if (disk_unlock_native_capacity(disk))
  424. goto rescan;
  425. continue;
  426. }
  427. if (from + size > get_capacity(disk)) {
  428. printk(KERN_WARNING
  429. "%s: p%d size %llu extends beyond EOD, ",
  430. disk->disk_name, p, (unsigned long long) size);
  431. if (disk_unlock_native_capacity(disk)) {
  432. /* free state and restart */
  433. goto rescan;
  434. } else {
  435. /*
  436. * we can not ignore partitions of broken tables
  437. * created by for example camera firmware, but
  438. * we limit them to the end of the disk to avoid
  439. * creating invalid block devices
  440. */
  441. size = get_capacity(disk) - from;
  442. }
  443. }
  444. if (state->parts[p].has_info)
  445. info = &state->parts[p].info;
  446. part = add_partition(disk, p, from, size,
  447. state->parts[p].flags,
  448. &state->parts[p].info);
  449. if (IS_ERR(part)) {
  450. printk(KERN_ERR " %s: p%d could not be added: %ld\n",
  451. disk->disk_name, p, -PTR_ERR(part));
  452. continue;
  453. }
  454. #ifdef CONFIG_BLK_DEV_MD
  455. if (state->parts[p].flags & ADDPART_FLAG_RAID)
  456. md_autodetect_dev(part_to_dev(part)->devt);
  457. #endif
  458. }
  459. free_partitions(state);
  460. return 0;
  461. }
  462. int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
  463. {
  464. int res;
  465. if (!bdev->bd_invalidated)
  466. return 0;
  467. res = drop_partitions(disk, bdev);
  468. if (res)
  469. return res;
  470. set_capacity(disk, 0);
  471. check_disk_size_change(disk, bdev);
  472. bdev->bd_invalidated = 0;
  473. /* tell userspace that the media / partition table may have changed */
  474. kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
  475. return 0;
  476. }
  477. unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
  478. {
  479. struct address_space *mapping = bdev->bd_inode->i_mapping;
  480. struct page *page;
  481. page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
  482. NULL);
  483. if (!IS_ERR(page)) {
  484. if (PageError(page))
  485. goto fail;
  486. p->v = page;
  487. return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
  488. fail:
  489. page_cache_release(page);
  490. }
  491. p->v = NULL;
  492. return NULL;
  493. }
  494. EXPORT_SYMBOL(read_dev_sector);