/filesystems/unixfs/minixfs/minixfs.c

http://macfuse.googlecode.com/ · C · 615 lines · 502 code · 95 blank · 18 comment · 103 complexity · 3c0e3a0ea40ea96218c97f9e5044bc0d MD5 · raw file

  1. /*
  2. * Minix File System Famiy for MacFUSE
  3. * Amit Singh
  4. * http://osxbook.com
  5. *
  6. * Most of the code in this file comes from the Linux kernel implementation
  7. * of the minix file systems. See fs/minix/ in the Linux kernel source tree.
  8. *
  9. * The code is Copyright (c) its various authors. It is covered by the
  10. * GNU GENERAL PUBLIC LICENSE Version 2.
  11. */
  12. #include "minixfs.h"
  13. #include <errno.h>
  14. #include <fcntl.h>
  15. #include <stdio.h>
  16. #include <stdlib.h>
  17. #include <string.h>
  18. #include <unistd.h>
  19. #include <sys/ioctl.h>
  20. #include <sys/stat.h>
  21. static unsigned long count_free(struct buffer_head*[], unsigned, __u32);
  22. static unsigned long minix_count_free_blocks(struct minix_sb_info*);
  23. static struct minix_inode* minix_V1_raw_inode(struct super_block*, ino_t,
  24. struct buffer_head**);
  25. static struct minix2_inode* minix_V2_raw_inode(struct super_block*, ino_t,
  26. struct buffer_head**);
  27. static unsigned long minix_count_free_inodes(struct minix_sb_info*);
  28. static int minix_iget_v1(struct super_block*, struct inode*);
  29. static int minix_iget_v2(struct super_block*, struct inode*);
  30. static unsigned minix_last_byte(struct inode*, unsigned long);
  31. static int minix_get_dirpage(struct inode*, sector_t, char*);
  32. static ino_t minix_find_entry(struct inode*, const char*);
  33. extern int minix_get_block_v1(struct inode*, sector_t, off_t*);
  34. extern int minix_get_block_v2(struct inode*, sector_t, off_t*);
  35. static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
  36. static unsigned long
  37. count_free(struct buffer_head* map[], unsigned numblocks, __u32 numbits)
  38. {
  39. unsigned i, j, sum = 0;
  40. struct buffer_head* bh;
  41. for (i = 0; i < numblocks - 1; i++) {
  42. if (!(bh = map[i]))
  43. return 0;
  44. for (j = 0; j < bh->b_size; j++)
  45. sum += nibblemap[bh->b_data[j] & 0xf]
  46. + nibblemap[(bh->b_data[j] >> 4) & 0xf];
  47. }
  48. if (numblocks == 0 || !(bh = map[numblocks - 1]))
  49. return 0;
  50. i = ((numbits - (numblocks - 1) * bh->b_size * 8) / 16) * 2;
  51. for (j = 0; j < i; j++) {
  52. sum += nibblemap[bh->b_data[j] & 0xf]
  53. + nibblemap[(bh->b_data[j] >> 4) & 0xf];
  54. }
  55. i = numbits % 16;
  56. if (i != 0) {
  57. i = *(__u16*)(&bh->b_data[j]) | ~((1 << i) - 1);
  58. sum += nibblemap[i & 0xf] + nibblemap[(i >> 4) & 0xf];
  59. sum += nibblemap[(i >> 8) & 0xf] + nibblemap[(i >> 12) & 0xf];
  60. }
  61. return(sum);
  62. }
  63. static unsigned long
  64. minix_count_free_blocks(struct minix_sb_info* sbi)
  65. {
  66. return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
  67. sbi->s_nzones - sbi->s_firstdatazone + 1) << sbi->s_log_zone_size);
  68. }
  69. struct minix_inode*
  70. minix_V1_raw_inode(struct super_block* sb, ino_t ino, struct buffer_head** bh)
  71. {
  72. int block;
  73. struct minix_sb_info* sbi = minix_sb(sb);
  74. struct minix_inode* p;
  75. if (!ino || ino > sbi->s_ninodes) {
  76. printk("Bad inode number on dev %s: %ld is out of range\n",
  77. sb->s_id, (long)ino);
  78. return NULL;
  79. }
  80. ino--;
  81. block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
  82. ino / MINIX_INODES_PER_BLOCK;
  83. int ret = sb_bread_intobh(sb, block, *bh);
  84. if (ret != 0) {
  85. printk("Unable to read inode block\n");
  86. return NULL;
  87. }
  88. p = (void *)(*bh)->b_data;
  89. return p + ino % MINIX_INODES_PER_BLOCK;
  90. }
  91. struct minix2_inode*
  92. minix_V2_raw_inode(struct super_block* sb, ino_t ino, struct buffer_head** bh)
  93. {
  94. int block;
  95. struct minix_sb_info* sbi = minix_sb(sb);
  96. struct minix2_inode* p;
  97. int minix2_inodes_per_block = sb->s_blocksize / sizeof(struct minix2_inode);
  98. if (!ino || ino > sbi->s_ninodes) {
  99. printk("Bad inode number on dev %s: %ld is out of range\n",
  100. sb->s_id, (long)ino);
  101. return NULL;
  102. }
  103. ino--;
  104. block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
  105. ino / minix2_inodes_per_block;
  106. int ret = sb_bread_intobh(sb, block, *bh);
  107. if (ret != 0) {
  108. printk("Unable to read inode block\n");
  109. return NULL;
  110. }
  111. p = (void *)(*bh)->b_data;
  112. return p + ino % minix2_inodes_per_block;
  113. }
  114. static unsigned long
  115. minix_count_free_inodes(struct minix_sb_info* sbi)
  116. {
  117. return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
  118. }
  119. static int
  120. minix_iget_v1(struct super_block* sb, struct inode* inode)
  121. {
  122. struct buffer_head _bh;
  123. struct buffer_head* bh = &_bh;;
  124. bh->b_flags.dynamic = 0;
  125. struct minix_inode* raw_inode;
  126. struct minix_inode_info* minix_inode = minix_i(inode);
  127. int i;
  128. raw_inode = minix_V1_raw_inode(inode->I_sb, inode->I_ino, &bh);
  129. if (!raw_inode)
  130. return -1;
  131. inode->I_mode = raw_inode->di_mode;
  132. inode->I_uid = (uid_t)raw_inode->di_uid;
  133. inode->I_gid = (gid_t)raw_inode->di_gid;
  134. inode->I_nlink = raw_inode->di_nlinks;
  135. inode->I_size = raw_inode->di_size;
  136. inode->I_mtime.tv_sec = inode->I_atime.tv_sec = inode->I_ctime.tv_sec =
  137. raw_inode->di_time;
  138. inode->I_mtime.tv_nsec = 0;
  139. inode->I_atime.tv_nsec = 0;
  140. inode->I_ctime.tv_nsec = 0;
  141. inode->I_blocks = 0;
  142. for (i = 0; i < 9; i++)
  143. minix_inode->u.i1_data[i] = raw_inode->di_zone[i];
  144. if (S_ISCHR(inode->I_mode) || S_ISBLK(inode->I_mode))
  145. inode->I_rdev = old_decode_dev(raw_inode->di_zone[0]);
  146. minix_inode->i_dir_start_lookup = 0;
  147. brelse(bh);
  148. return 0;
  149. }
  150. static int
  151. minix_iget_v2(struct super_block* sb, struct inode* inode)
  152. {
  153. struct buffer_head _bh;
  154. struct buffer_head* bh = &_bh;;
  155. bh->b_flags.dynamic = 0;
  156. struct minix2_inode* raw_inode;
  157. struct minix_inode_info* minix_inode = minix_i(inode);
  158. int i;
  159. raw_inode = minix_V2_raw_inode(inode->I_sb, inode->I_ino, &bh);
  160. if (!raw_inode)
  161. return -1;
  162. inode->I_mode = raw_inode->di_mode;
  163. inode->I_uid = (uid_t)raw_inode->di_uid;
  164. inode->I_gid = (gid_t)raw_inode->di_gid;
  165. inode->I_nlink = raw_inode->di_nlinks;
  166. inode->I_size = raw_inode->di_size;
  167. inode->I_mtime.tv_sec = raw_inode->di_mtime;
  168. inode->I_atime.tv_sec = raw_inode->di_atime;
  169. inode->I_ctime.tv_sec = raw_inode->di_ctime;
  170. inode->I_mtime.tv_nsec = 0;
  171. inode->I_atime.tv_nsec = 0;
  172. inode->I_ctime.tv_nsec = 0;
  173. inode->I_blocks = 0;
  174. for (i = 0; i < 10; i++)
  175. minix_inode->u.i2_data[i] = raw_inode->di_zone[i];
  176. if (S_ISCHR(inode->I_mode) || S_ISBLK(inode->I_mode))
  177. inode->I_rdev = old_decode_dev(raw_inode->di_zone[0]);
  178. minix_inode->i_dir_start_lookup = 0;
  179. brelse(bh);
  180. return 0;
  181. }
  182. static unsigned
  183. minix_last_byte(struct inode* inode, unsigned long page_nr)
  184. {
  185. unsigned last_byte = PAGE_CACHE_SIZE;
  186. if (page_nr == (inode->I_size >> PAGE_CACHE_SHIFT))
  187. last_byte = inode->I_size & (PAGE_CACHE_SIZE - 1);
  188. return last_byte;
  189. }
  190. static int
  191. minix_get_dirpage(struct inode* dir, sector_t index, char* pagebuf)
  192. {
  193. return minixfs_get_page(dir, index, pagebuf);
  194. }
  195. static inline void* minix_next_entry(void* de, struct minix_sb_info* sbi)
  196. {
  197. return (void*)((char*)de + sbi->s_dirsize);
  198. }
  199. static ino_t
  200. minix_find_entry(struct inode* dir, const char* name)
  201. {
  202. struct super_block* sb = dir->I_sb;
  203. struct minix_sb_info* sbi = minix_sb(sb);
  204. int namelen = strlen(name);
  205. unsigned long n;
  206. unsigned long npages = minix_dir_pages(dir);
  207. char page[PAGE_SIZE];
  208. char* p;
  209. ino_t test = 0, result = 0;
  210. char* namx;
  211. for (n = 0; n < npages; n++) {
  212. char* kaddr;
  213. char* limit;
  214. if (minix_get_dirpage(dir, n, page) == 0) {
  215. kaddr = page;
  216. limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
  217. for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
  218. if (sbi->s_version == MINIX_V3) {
  219. minix3_dirent* de3 = (minix3_dirent*)p;
  220. namx = de3->name;
  221. test = de3->inode;
  222. } else {
  223. minix_dirent* de = (minix_dirent*)p;
  224. namx = de->name;
  225. test = de->inode;
  226. }
  227. if (!test)
  228. continue;
  229. if (minix_namecompare(namelen, sbi->s_namelen, name, namx)) {
  230. result = test;
  231. goto found;
  232. }
  233. }
  234. }
  235. }
  236. found:
  237. return result;
  238. }
  239. struct super_block*
  240. minixfs_fill_super(int fd, void* data, int silent)
  241. {
  242. struct super_block* sb = NULL;
  243. struct minix_sb_info *sbi = NULL;
  244. struct minix_super_block* ms = NULL;
  245. struct minix3_super_block* m3s = NULL;
  246. unsigned long i, block;
  247. int ret = -EINVAL;
  248. struct buffer_head** map;
  249. struct buffer_head _bh;
  250. struct buffer_head* bh = &_bh;
  251. bh->b_flags.dynamic = 0;
  252. sb = calloc(1, sizeof(struct super_block));
  253. if (!sb)
  254. return NULL;
  255. sb->s_bdev = fd;
  256. sb->s_flags |= MS_RDONLY;
  257. sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
  258. if (!sbi)
  259. goto failed_nomem;
  260. sb->s_fs_info = sbi;
  261. BUILD_BUG_ON(32 != sizeof (struct minix_inode));
  262. BUILD_BUG_ON(64 != sizeof(struct minix2_inode));
  263. sb->s_blocksize = BLOCK_SIZE;
  264. sb->s_blocksize_bits = BLOCK_SIZE_BITS;
  265. ret = sb_bread_intobh(sb, 1, bh);
  266. if (ret != 0)
  267. goto out_bad_sb;
  268. ms = (struct minix_super_block*)bh->b_data;
  269. sbi->s_ms = ms;
  270. sbi->s_sbh = bh;
  271. sbi->s_mount_state = ms->s_state;
  272. sbi->s_ninodes = ms->s_ninodes;
  273. sbi->s_nzones = ms->s_nzones;
  274. sbi->s_imap_blocks = ms->s_imap_blocks;
  275. sbi->s_zmap_blocks = ms->s_zmap_blocks;
  276. sbi->s_firstdatazone = ms->s_firstdatazone;
  277. sbi->s_log_zone_size = ms->s_log_zone_size;
  278. sbi->s_max_size = ms->s_max_size;
  279. sb->s_magic = ms->s_magic;
  280. if (sb->s_magic == MINIX_SUPER_MAGIC) {
  281. sbi->s_version = MINIX_V1;
  282. sbi->s_dirsize = 16;
  283. sbi->s_namelen = 14;
  284. sbi->s_link_max = MINIX_LINK_MAX;
  285. } else if (sb->s_magic == MINIX_SUPER_MAGIC2) {
  286. sbi->s_version = MINIX_V1;
  287. sbi->s_dirsize = 32;
  288. sbi->s_namelen = 30;
  289. sbi->s_link_max = MINIX_LINK_MAX;
  290. } else if (sb->s_magic == MINIX2_SUPER_MAGIC) {
  291. sbi->s_version = MINIX_V2;
  292. sbi->s_nzones = ms->s_zones;
  293. sbi->s_dirsize = 16;
  294. sbi->s_namelen = 14;
  295. sbi->s_link_max = MINIX2_LINK_MAX;
  296. } else if (sb->s_magic == MINIX2_SUPER_MAGIC2) {
  297. sbi->s_version = MINIX_V2;
  298. sbi->s_nzones = ms->s_zones;
  299. sbi->s_dirsize = 32;
  300. sbi->s_namelen = 30;
  301. sbi->s_link_max = MINIX2_LINK_MAX;
  302. } else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) {
  303. m3s = (struct minix3_super_block *) bh->b_data;
  304. sb->s_magic = m3s->s_magic;
  305. sbi->s_imap_blocks = m3s->s_imap_blocks;
  306. sbi->s_zmap_blocks = m3s->s_zmap_blocks;
  307. sbi->s_firstdatazone = m3s->s_firstdatazone;
  308. sbi->s_log_zone_size = m3s->s_log_zone_size;
  309. sbi->s_max_size = m3s->s_max_size;
  310. sbi->s_ninodes = m3s->s_ninodes;
  311. sbi->s_nzones = m3s->s_zones;
  312. sbi->s_dirsize = 64;
  313. sbi->s_namelen = 60;
  314. sbi->s_version = MINIX_V3;
  315. sbi->s_link_max = MINIX2_LINK_MAX;
  316. sbi->s_mount_state = MINIX_VALID_FS;
  317. sb->s_blocksize = m3s->s_blocksize;
  318. sb->s_blocksize_bits = m3s->s_blocksize;
  319. } else
  320. goto out_no_fs;
  321. /*
  322. * Allocate the buffer map to keep the superblock small.
  323. */
  324. if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
  325. goto out_illegal_sb;
  326. i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
  327. map = kzalloc(i, GFP_KERNEL);
  328. if (!map)
  329. goto out_no_map;
  330. sbi->s_imap = &map[0];
  331. sbi->s_zmap = &map[sbi->s_imap_blocks];
  332. block=2;
  333. for (i=0 ; i < sbi->s_imap_blocks ; i++) {
  334. if (!(sbi->s_imap[i] = sb_bread(sb, block)))
  335. goto out_no_bitmap;
  336. block++;
  337. }
  338. for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
  339. if (!(sbi->s_zmap[i] = sb_bread(sb, block)))
  340. goto out_no_bitmap;
  341. block++;
  342. }
  343. minix_set_bit(0, sbi->s_imap[0]->b_data);
  344. minix_set_bit(0, sbi->s_zmap[0]->b_data);
  345. /* read the root inode */
  346. if (!(sbi->s_mount_state & MINIX_VALID_FS))
  347. printk("MINIX-fs: mounting unchecked file system, "
  348. "running fsck is recommended\n");
  349. else if (sbi->s_mount_state & MINIX_ERROR_FS)
  350. printk("MINIX-fs: mounting file system with errors, "
  351. "running fsck is recommended\n");
  352. return sb;
  353. out_no_bitmap:
  354. printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
  355. /* out_freemap: */
  356. for (i = 0; i < sbi->s_imap_blocks; i++)
  357. brelse(sbi->s_imap[i]);
  358. for (i = 0; i < sbi->s_zmap_blocks; i++)
  359. brelse(sbi->s_zmap[i]);
  360. kfree(sbi->s_imap);
  361. goto out_release;
  362. out_no_map:
  363. ret = -ENOMEM;
  364. if (!silent)
  365. printk("MINIX-fs: can't allocate map\n");
  366. goto out_release;
  367. out_illegal_sb:
  368. if (!silent)
  369. printk("MINIX-fs: bad superblock\n");
  370. goto out_release;
  371. out_no_fs:
  372. if (!silent)
  373. printk("VFS: Can't find a Minix filesystem V1 | V2 | V3\n");
  374. out_release:
  375. brelse(bh);
  376. goto out;
  377. /*out_bad_hblock:*/
  378. printk("MINIX-fs: blocksize too small for device\n");
  379. goto out;
  380. out_bad_sb:
  381. printk("MINIX-fs: unable to read superblock\n");
  382. out:
  383. failed_nomem:
  384. if (sb)
  385. free(sb);
  386. if (sbi)
  387. free(sbi);
  388. return NULL;
  389. }
  390. int
  391. minixfs_statvfs(struct super_block* sb, struct statvfs* buf)
  392. {
  393. struct minix_sb_info* sbi = minix_sb(sb);
  394. buf->f_bsize = sb->s_blocksize;
  395. buf->f_frsize = sb->s_blocksize;
  396. buf->f_blocks =
  397. (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
  398. buf->f_bfree = minix_count_free_blocks(sbi);
  399. buf->f_bavail = buf->f_bfree;
  400. buf->f_files = sbi->s_ninodes;
  401. buf->f_ffree = minix_count_free_inodes(sbi);
  402. buf->f_namemax = sbi->s_namelen;
  403. return 0;
  404. }
  405. int
  406. minixfs_iget(struct super_block* sb, struct inode* inode)
  407. {
  408. if (INODE_VERSION(inode) == MINIX_V1)
  409. return minix_iget_v1(sb, inode);
  410. else
  411. return minix_iget_v2(sb, inode);
  412. }
  413. ino_t
  414. minix_inode_by_name(struct inode* dir, const char* name)
  415. {
  416. return minix_find_entry(dir, name);
  417. }
  418. int
  419. minixfs_next_direntry(struct inode* dir, struct unixfs_dirbuf* dirbuf,
  420. off_t* offset, struct unixfs_direntry* dent)
  421. {
  422. struct super_block* sb = dir->I_sb;
  423. struct minix_sb_info* sbi = minix_sb(sb);
  424. unsigned long start, n;
  425. unsigned long npages = minix_dir_pages(dir);
  426. char *dirpagebuf = dirbuf->data;
  427. unsigned chunk_size = sbi->s_dirsize;
  428. *offset = (*offset + chunk_size - 1) & ~(chunk_size - 1);
  429. if (*offset >= dir->I_size)
  430. return -1;
  431. if (npages == 0)
  432. return -1;
  433. start = *offset >> PAGE_CACHE_SHIFT; /* which page from offset */
  434. if (start >= npages)
  435. return -1;
  436. n = start;
  437. if (!dirbuf->flags.initialized || (*offset & ((PAGE_SIZE - 1))) == 0) {
  438. int ret = minixfs_get_page(dir, n, dirpagebuf);
  439. if (ret)
  440. return ret;
  441. dirbuf->flags.initialized = 1;
  442. }
  443. char* name = NULL;
  444. if (sbi->s_version == MINIX_V3) {
  445. minix3_dirent* de3 =
  446. (minix3_dirent*)((char*)dirpagebuf + (*offset & (PAGE_SIZE - 1)));
  447. dent->ino = de3->inode;
  448. name = de3->name;
  449. } else {
  450. minix_dirent* de =
  451. (minix_dirent*)((char*)dirpagebuf + (*offset & (PAGE_SIZE - 1)));
  452. dent->ino = de->inode;
  453. name = de->name;
  454. }
  455. if (dent->ino) {
  456. size_t nl = min(strlen(name), sbi->s_namelen);
  457. memcpy(dent->name, name, nl);
  458. dent->name[nl] = '\0';
  459. }
  460. *offset += chunk_size;
  461. return 0;
  462. }
  463. int
  464. minixfs_get_block(struct inode* inode, sector_t iblock, off_t* result)
  465. {
  466. if (INODE_VERSION(inode) == MINIX_V1)
  467. return minix_get_block_v1(inode, iblock, result);
  468. else
  469. return minix_get_block_v2(inode, iblock, result);
  470. }
  471. int
  472. minixfs_get_page(struct inode* inode, sector_t index, char* pagebuf)
  473. {
  474. sector_t iblock, lblock;
  475. unsigned int blocksize;
  476. int nr, i;
  477. blocksize = 1 << inode->I_blkbits;
  478. iblock = index << (PAGE_CACHE_SHIFT - inode->I_blkbits);
  479. lblock = (inode->I_size + blocksize - 1) >> inode->I_blkbits;
  480. nr = 0;
  481. i = 0;
  482. int bytes = 0, err = 0;
  483. struct super_block* sb = inode->I_sb;
  484. struct buffer_head _bh;
  485. char* p = pagebuf;
  486. do {
  487. off_t phys64 = 0;
  488. int ret = minixfs_get_block(inode, iblock, &phys64);
  489. if (phys64) {
  490. struct buffer_head* bh = &_bh;
  491. bh->b_flags.dynamic = 0;
  492. if (sb_bread_intobh(sb, phys64, bh) == 0) {
  493. memcpy(p, bh->b_data, blocksize);
  494. p += blocksize;
  495. bytes += blocksize;
  496. brelse(bh);
  497. } else {
  498. err = EIO;
  499. fprintf(stderr, "*** fatal error: I/O error reading page\n");
  500. abort();
  501. exit(10);
  502. }
  503. } else if (ret == 0) { /* zero fill */
  504. memset(p, 0, blocksize);
  505. p += blocksize;
  506. bytes += blocksize;
  507. } else {
  508. err = EIO;
  509. fprintf(stderr, "*** fatal error: block mapping failed\n");
  510. abort();
  511. }
  512. iblock++;
  513. if ((bytes >= PAGE_SIZE) || (iblock >= lblock))
  514. break;
  515. } while (1);
  516. if (err)
  517. return -1;
  518. /* check page? */
  519. return 0;
  520. }