PageRenderTime 43ms CodeModel.GetById 11ms RepoModel.GetById 0ms app.codeStats 0ms

/fs/affs/file.c

https://github.com/mstsirkin/linux
C | 942 lines | 767 code | 113 blank | 62 comment | 122 complexity | ee780666536bb2e5b3bdd734124558fb MD5 | raw file
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static int affs_file_open(struct inode *inode, struct file *filp);
  23. static int affs_file_release(struct inode *inode, struct file *filp);
  24. const struct file_operations affs_file_operations = {
  25. .llseek = generic_file_llseek,
  26. .read = do_sync_read,
  27. .aio_read = generic_file_aio_read,
  28. .write = do_sync_write,
  29. .aio_write = generic_file_aio_write,
  30. .mmap = generic_file_mmap,
  31. .open = affs_file_open,
  32. .release = affs_file_release,
  33. .fsync = affs_file_fsync,
  34. .splice_read = generic_file_splice_read,
  35. };
  36. const struct inode_operations affs_file_inode_operations = {
  37. .truncate = affs_truncate,
  38. .setattr = affs_notify_change,
  39. };
  40. static int
  41. affs_file_open(struct inode *inode, struct file *filp)
  42. {
  43. pr_debug("AFFS: open(%lu,%d)\n",
  44. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  45. atomic_inc(&AFFS_I(inode)->i_opencnt);
  46. return 0;
  47. }
  48. static int
  49. affs_file_release(struct inode *inode, struct file *filp)
  50. {
  51. pr_debug("AFFS: release(%lu, %d)\n",
  52. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  53. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  54. mutex_lock(&inode->i_mutex);
  55. if (inode->i_size != AFFS_I(inode)->mmu_private)
  56. affs_truncate(inode);
  57. affs_free_prealloc(inode);
  58. mutex_unlock(&inode->i_mutex);
  59. }
  60. return 0;
  61. }
  62. static int
  63. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  64. {
  65. struct super_block *sb = inode->i_sb;
  66. struct buffer_head *bh;
  67. u32 lc_max;
  68. int i, j, key;
  69. if (!AFFS_I(inode)->i_lc) {
  70. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  71. if (!ptr)
  72. return -ENOMEM;
  73. AFFS_I(inode)->i_lc = (u32 *)ptr;
  74. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  75. }
  76. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  77. if (AFFS_I(inode)->i_extcnt > lc_max) {
  78. u32 lc_shift, lc_mask, tmp, off;
  79. /* need to recalculate linear cache, start from old size */
  80. lc_shift = AFFS_I(inode)->i_lc_shift;
  81. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  82. for (; tmp; tmp >>= 1)
  83. lc_shift++;
  84. lc_mask = (1 << lc_shift) - 1;
  85. /* fix idx and old size to new shift */
  86. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  87. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  88. /* first shrink old cache to make more space */
  89. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  90. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  91. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  92. AFFS_I(inode)->i_lc_shift = lc_shift;
  93. AFFS_I(inode)->i_lc_mask = lc_mask;
  94. }
  95. /* fill cache to the needed index */
  96. i = AFFS_I(inode)->i_lc_size;
  97. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  98. for (; i <= lc_idx; i++) {
  99. if (!i) {
  100. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  101. continue;
  102. }
  103. key = AFFS_I(inode)->i_lc[i - 1];
  104. j = AFFS_I(inode)->i_lc_mask + 1;
  105. // unlock cache
  106. for (; j > 0; j--) {
  107. bh = affs_bread(sb, key);
  108. if (!bh)
  109. goto err;
  110. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  111. affs_brelse(bh);
  112. }
  113. // lock cache
  114. AFFS_I(inode)->i_lc[i] = key;
  115. }
  116. return 0;
  117. err:
  118. // lock cache
  119. return -EIO;
  120. }
  121. static struct buffer_head *
  122. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  123. {
  124. struct super_block *sb = inode->i_sb;
  125. struct buffer_head *new_bh;
  126. u32 blocknr, tmp;
  127. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  128. if (!blocknr)
  129. return ERR_PTR(-ENOSPC);
  130. new_bh = affs_getzeroblk(sb, blocknr);
  131. if (!new_bh) {
  132. affs_free_block(sb, blocknr);
  133. return ERR_PTR(-EIO);
  134. }
  135. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  136. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  137. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  138. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  139. affs_fix_checksum(sb, new_bh);
  140. mark_buffer_dirty_inode(new_bh, inode);
  141. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  142. if (tmp)
  143. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  144. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  145. affs_adjust_checksum(bh, blocknr - tmp);
  146. mark_buffer_dirty_inode(bh, inode);
  147. AFFS_I(inode)->i_extcnt++;
  148. mark_inode_dirty(inode);
  149. return new_bh;
  150. }
  151. static inline struct buffer_head *
  152. affs_get_extblock(struct inode *inode, u32 ext)
  153. {
  154. /* inline the simplest case: same extended block as last time */
  155. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  156. if (ext == AFFS_I(inode)->i_ext_last)
  157. get_bh(bh);
  158. else
  159. /* we have to do more (not inlined) */
  160. bh = affs_get_extblock_slow(inode, ext);
  161. return bh;
  162. }
  163. static struct buffer_head *
  164. affs_get_extblock_slow(struct inode *inode, u32 ext)
  165. {
  166. struct super_block *sb = inode->i_sb;
  167. struct buffer_head *bh;
  168. u32 ext_key;
  169. u32 lc_idx, lc_off, ac_idx;
  170. u32 tmp, idx;
  171. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  172. /* read the next extended block from the current one */
  173. bh = AFFS_I(inode)->i_ext_bh;
  174. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  175. if (ext < AFFS_I(inode)->i_extcnt)
  176. goto read_ext;
  177. if (ext > AFFS_I(inode)->i_extcnt)
  178. BUG();
  179. bh = affs_alloc_extblock(inode, bh, ext);
  180. if (IS_ERR(bh))
  181. return bh;
  182. goto store_ext;
  183. }
  184. if (ext == 0) {
  185. /* we seek back to the file header block */
  186. ext_key = inode->i_ino;
  187. goto read_ext;
  188. }
  189. if (ext >= AFFS_I(inode)->i_extcnt) {
  190. struct buffer_head *prev_bh;
  191. /* allocate a new extended block */
  192. if (ext > AFFS_I(inode)->i_extcnt)
  193. BUG();
  194. /* get previous extended block */
  195. prev_bh = affs_get_extblock(inode, ext - 1);
  196. if (IS_ERR(prev_bh))
  197. return prev_bh;
  198. bh = affs_alloc_extblock(inode, prev_bh, ext);
  199. affs_brelse(prev_bh);
  200. if (IS_ERR(bh))
  201. return bh;
  202. goto store_ext;
  203. }
  204. again:
  205. /* check if there is an extended cache and whether it's large enough */
  206. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  207. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  208. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  209. int err;
  210. err = affs_grow_extcache(inode, lc_idx);
  211. if (err)
  212. return ERR_PTR(err);
  213. goto again;
  214. }
  215. /* every n'th key we find in the linear cache */
  216. if (!lc_off) {
  217. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  218. goto read_ext;
  219. }
  220. /* maybe it's still in the associative cache */
  221. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  222. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  223. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  224. goto read_ext;
  225. }
  226. /* try to find one of the previous extended blocks */
  227. tmp = ext;
  228. idx = ac_idx;
  229. while (--tmp, --lc_off > 0) {
  230. idx = (idx - 1) & AFFS_AC_MASK;
  231. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  232. ext_key = AFFS_I(inode)->i_ac[idx].key;
  233. goto find_ext;
  234. }
  235. }
  236. /* fall back to the linear cache */
  237. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  238. find_ext:
  239. /* read all extended blocks until we find the one we need */
  240. //unlock cache
  241. do {
  242. bh = affs_bread(sb, ext_key);
  243. if (!bh)
  244. goto err_bread;
  245. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  246. affs_brelse(bh);
  247. tmp++;
  248. } while (tmp < ext);
  249. //lock cache
  250. /* store it in the associative cache */
  251. // recalculate ac_idx?
  252. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  253. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  254. read_ext:
  255. /* finally read the right extended block */
  256. //unlock cache
  257. bh = affs_bread(sb, ext_key);
  258. if (!bh)
  259. goto err_bread;
  260. //lock cache
  261. store_ext:
  262. /* release old cached extended block and store the new one */
  263. affs_brelse(AFFS_I(inode)->i_ext_bh);
  264. AFFS_I(inode)->i_ext_last = ext;
  265. AFFS_I(inode)->i_ext_bh = bh;
  266. get_bh(bh);
  267. return bh;
  268. err_bread:
  269. affs_brelse(bh);
  270. return ERR_PTR(-EIO);
  271. }
  272. static int
  273. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  274. {
  275. struct super_block *sb = inode->i_sb;
  276. struct buffer_head *ext_bh;
  277. u32 ext;
  278. pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
  279. BUG_ON(block > (sector_t)0x7fffffffUL);
  280. if (block >= AFFS_I(inode)->i_blkcnt) {
  281. if (block > AFFS_I(inode)->i_blkcnt || !create)
  282. goto err_big;
  283. } else
  284. create = 0;
  285. //lock cache
  286. affs_lock_ext(inode);
  287. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  288. block -= ext * AFFS_SB(sb)->s_hashsize;
  289. ext_bh = affs_get_extblock(inode, ext);
  290. if (IS_ERR(ext_bh))
  291. goto err_ext;
  292. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  293. if (create) {
  294. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  295. if (!blocknr)
  296. goto err_alloc;
  297. set_buffer_new(bh_result);
  298. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  299. AFFS_I(inode)->i_blkcnt++;
  300. /* store new block */
  301. if (bh_result->b_blocknr)
  302. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  303. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  304. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  305. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  306. bh_result->b_blocknr = blocknr;
  307. if (!block) {
  308. /* insert first block into header block */
  309. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  310. if (tmp)
  311. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  312. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  313. affs_adjust_checksum(ext_bh, blocknr - tmp);
  314. }
  315. }
  316. affs_brelse(ext_bh);
  317. //unlock cache
  318. affs_unlock_ext(inode);
  319. return 0;
  320. err_big:
  321. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  322. return -EIO;
  323. err_ext:
  324. // unlock cache
  325. affs_unlock_ext(inode);
  326. return PTR_ERR(ext_bh);
  327. err_alloc:
  328. brelse(ext_bh);
  329. clear_buffer_mapped(bh_result);
  330. bh_result->b_bdev = NULL;
  331. // unlock cache
  332. affs_unlock_ext(inode);
  333. return -ENOSPC;
  334. }
  335. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  336. {
  337. return block_write_full_page(page, affs_get_block, wbc);
  338. }
  339. static int affs_readpage(struct file *file, struct page *page)
  340. {
  341. return block_read_full_page(page, affs_get_block);
  342. }
  343. static int affs_write_begin(struct file *file, struct address_space *mapping,
  344. loff_t pos, unsigned len, unsigned flags,
  345. struct page **pagep, void **fsdata)
  346. {
  347. int ret;
  348. *pagep = NULL;
  349. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  350. affs_get_block,
  351. &AFFS_I(mapping->host)->mmu_private);
  352. if (unlikely(ret)) {
  353. loff_t isize = mapping->host->i_size;
  354. if (pos + len > isize)
  355. vmtruncate(mapping->host, isize);
  356. }
  357. return ret;
  358. }
  359. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  360. {
  361. return generic_block_bmap(mapping,block,affs_get_block);
  362. }
  363. const struct address_space_operations affs_aops = {
  364. .readpage = affs_readpage,
  365. .writepage = affs_writepage,
  366. .write_begin = affs_write_begin,
  367. .write_end = generic_write_end,
  368. .bmap = _affs_bmap
  369. };
  370. static inline struct buffer_head *
  371. affs_bread_ino(struct inode *inode, int block, int create)
  372. {
  373. struct buffer_head *bh, tmp_bh;
  374. int err;
  375. tmp_bh.b_state = 0;
  376. err = affs_get_block(inode, block, &tmp_bh, create);
  377. if (!err) {
  378. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  379. if (bh) {
  380. bh->b_state |= tmp_bh.b_state;
  381. return bh;
  382. }
  383. err = -EIO;
  384. }
  385. return ERR_PTR(err);
  386. }
  387. static inline struct buffer_head *
  388. affs_getzeroblk_ino(struct inode *inode, int block)
  389. {
  390. struct buffer_head *bh, tmp_bh;
  391. int err;
  392. tmp_bh.b_state = 0;
  393. err = affs_get_block(inode, block, &tmp_bh, 1);
  394. if (!err) {
  395. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  396. if (bh) {
  397. bh->b_state |= tmp_bh.b_state;
  398. return bh;
  399. }
  400. err = -EIO;
  401. }
  402. return ERR_PTR(err);
  403. }
  404. static inline struct buffer_head *
  405. affs_getemptyblk_ino(struct inode *inode, int block)
  406. {
  407. struct buffer_head *bh, tmp_bh;
  408. int err;
  409. tmp_bh.b_state = 0;
  410. err = affs_get_block(inode, block, &tmp_bh, 1);
  411. if (!err) {
  412. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  413. if (bh) {
  414. bh->b_state |= tmp_bh.b_state;
  415. return bh;
  416. }
  417. err = -EIO;
  418. }
  419. return ERR_PTR(err);
  420. }
  421. static int
  422. affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
  423. {
  424. struct inode *inode = page->mapping->host;
  425. struct super_block *sb = inode->i_sb;
  426. struct buffer_head *bh;
  427. char *data;
  428. u32 bidx, boff, bsize;
  429. u32 tmp;
  430. pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
  431. BUG_ON(from > to || to > PAGE_CACHE_SIZE);
  432. kmap(page);
  433. data = page_address(page);
  434. bsize = AFFS_SB(sb)->s_data_blksize;
  435. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  436. bidx = tmp / bsize;
  437. boff = tmp % bsize;
  438. while (from < to) {
  439. bh = affs_bread_ino(inode, bidx, 0);
  440. if (IS_ERR(bh))
  441. return PTR_ERR(bh);
  442. tmp = min(bsize - boff, to - from);
  443. BUG_ON(from + tmp > to || tmp > bsize);
  444. memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
  445. affs_brelse(bh);
  446. bidx++;
  447. from += tmp;
  448. boff = 0;
  449. }
  450. flush_dcache_page(page);
  451. kunmap(page);
  452. return 0;
  453. }
  454. static int
  455. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  456. {
  457. struct super_block *sb = inode->i_sb;
  458. struct buffer_head *bh, *prev_bh;
  459. u32 bidx, boff;
  460. u32 size, bsize;
  461. u32 tmp;
  462. pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
  463. bsize = AFFS_SB(sb)->s_data_blksize;
  464. bh = NULL;
  465. size = AFFS_I(inode)->mmu_private;
  466. bidx = size / bsize;
  467. boff = size % bsize;
  468. if (boff) {
  469. bh = affs_bread_ino(inode, bidx, 0);
  470. if (IS_ERR(bh))
  471. return PTR_ERR(bh);
  472. tmp = min(bsize - boff, newsize - size);
  473. BUG_ON(boff + tmp > bsize || tmp > bsize);
  474. memset(AFFS_DATA(bh) + boff, 0, tmp);
  475. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  476. affs_fix_checksum(sb, bh);
  477. mark_buffer_dirty_inode(bh, inode);
  478. size += tmp;
  479. bidx++;
  480. } else if (bidx) {
  481. bh = affs_bread_ino(inode, bidx - 1, 0);
  482. if (IS_ERR(bh))
  483. return PTR_ERR(bh);
  484. }
  485. while (size < newsize) {
  486. prev_bh = bh;
  487. bh = affs_getzeroblk_ino(inode, bidx);
  488. if (IS_ERR(bh))
  489. goto out;
  490. tmp = min(bsize, newsize - size);
  491. BUG_ON(tmp > bsize);
  492. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  493. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  494. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  495. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  496. affs_fix_checksum(sb, bh);
  497. bh->b_state &= ~(1UL << BH_New);
  498. mark_buffer_dirty_inode(bh, inode);
  499. if (prev_bh) {
  500. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  501. if (tmp)
  502. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  503. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  504. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  505. mark_buffer_dirty_inode(prev_bh, inode);
  506. affs_brelse(prev_bh);
  507. }
  508. size += bsize;
  509. bidx++;
  510. }
  511. affs_brelse(bh);
  512. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  513. return 0;
  514. out:
  515. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  516. return PTR_ERR(bh);
  517. }
  518. static int
  519. affs_readpage_ofs(struct file *file, struct page *page)
  520. {
  521. struct inode *inode = page->mapping->host;
  522. u32 to;
  523. int err;
  524. pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
  525. to = PAGE_CACHE_SIZE;
  526. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  527. to = inode->i_size & ~PAGE_CACHE_MASK;
  528. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  529. }
  530. err = affs_do_readpage_ofs(file, page, 0, to);
  531. if (!err)
  532. SetPageUptodate(page);
  533. unlock_page(page);
  534. return err;
  535. }
  536. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  537. loff_t pos, unsigned len, unsigned flags,
  538. struct page **pagep, void **fsdata)
  539. {
  540. struct inode *inode = mapping->host;
  541. struct page *page;
  542. pgoff_t index;
  543. int err = 0;
  544. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  545. if (pos > AFFS_I(inode)->mmu_private) {
  546. /* XXX: this probably leaves a too-big i_size in case of
  547. * failure. Should really be updating i_size at write_end time
  548. */
  549. err = affs_extent_file_ofs(inode, pos);
  550. if (err)
  551. return err;
  552. }
  553. index = pos >> PAGE_CACHE_SHIFT;
  554. page = grab_cache_page_write_begin(mapping, index, flags);
  555. if (!page)
  556. return -ENOMEM;
  557. *pagep = page;
  558. if (PageUptodate(page))
  559. return 0;
  560. /* XXX: inefficient but safe in the face of short writes */
  561. err = affs_do_readpage_ofs(file, page, 0, PAGE_CACHE_SIZE);
  562. if (err) {
  563. unlock_page(page);
  564. page_cache_release(page);
  565. }
  566. return err;
  567. }
  568. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  569. loff_t pos, unsigned len, unsigned copied,
  570. struct page *page, void *fsdata)
  571. {
  572. struct inode *inode = mapping->host;
  573. struct super_block *sb = inode->i_sb;
  574. struct buffer_head *bh, *prev_bh;
  575. char *data;
  576. u32 bidx, boff, bsize;
  577. unsigned from, to;
  578. u32 tmp;
  579. int written;
  580. from = pos & (PAGE_CACHE_SIZE - 1);
  581. to = pos + len;
  582. /*
  583. * XXX: not sure if this can handle short copies (len < copied), but
  584. * we don't have to, because the page should always be uptodate here,
  585. * due to write_begin.
  586. */
  587. pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len);
  588. bsize = AFFS_SB(sb)->s_data_blksize;
  589. data = page_address(page);
  590. bh = NULL;
  591. written = 0;
  592. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  593. bidx = tmp / bsize;
  594. boff = tmp % bsize;
  595. if (boff) {
  596. bh = affs_bread_ino(inode, bidx, 0);
  597. if (IS_ERR(bh))
  598. return PTR_ERR(bh);
  599. tmp = min(bsize - boff, to - from);
  600. BUG_ON(boff + tmp > bsize || tmp > bsize);
  601. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  602. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  603. affs_fix_checksum(sb, bh);
  604. mark_buffer_dirty_inode(bh, inode);
  605. written += tmp;
  606. from += tmp;
  607. bidx++;
  608. } else if (bidx) {
  609. bh = affs_bread_ino(inode, bidx - 1, 0);
  610. if (IS_ERR(bh))
  611. return PTR_ERR(bh);
  612. }
  613. while (from + bsize <= to) {
  614. prev_bh = bh;
  615. bh = affs_getemptyblk_ino(inode, bidx);
  616. if (IS_ERR(bh))
  617. goto out;
  618. memcpy(AFFS_DATA(bh), data + from, bsize);
  619. if (buffer_new(bh)) {
  620. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  621. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  622. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  623. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  624. AFFS_DATA_HEAD(bh)->next = 0;
  625. bh->b_state &= ~(1UL << BH_New);
  626. if (prev_bh) {
  627. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  628. if (tmp)
  629. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  630. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  631. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  632. mark_buffer_dirty_inode(prev_bh, inode);
  633. }
  634. }
  635. affs_brelse(prev_bh);
  636. affs_fix_checksum(sb, bh);
  637. mark_buffer_dirty_inode(bh, inode);
  638. written += bsize;
  639. from += bsize;
  640. bidx++;
  641. }
  642. if (from < to) {
  643. prev_bh = bh;
  644. bh = affs_bread_ino(inode, bidx, 1);
  645. if (IS_ERR(bh))
  646. goto out;
  647. tmp = min(bsize, to - from);
  648. BUG_ON(tmp > bsize);
  649. memcpy(AFFS_DATA(bh), data + from, tmp);
  650. if (buffer_new(bh)) {
  651. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  652. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  653. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  654. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  655. AFFS_DATA_HEAD(bh)->next = 0;
  656. bh->b_state &= ~(1UL << BH_New);
  657. if (prev_bh) {
  658. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  659. if (tmp)
  660. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  661. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  662. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  663. mark_buffer_dirty_inode(prev_bh, inode);
  664. }
  665. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  666. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  667. affs_brelse(prev_bh);
  668. affs_fix_checksum(sb, bh);
  669. mark_buffer_dirty_inode(bh, inode);
  670. written += tmp;
  671. from += tmp;
  672. bidx++;
  673. }
  674. SetPageUptodate(page);
  675. done:
  676. affs_brelse(bh);
  677. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  678. if (tmp > inode->i_size)
  679. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  680. unlock_page(page);
  681. page_cache_release(page);
  682. return written;
  683. out:
  684. bh = prev_bh;
  685. if (!written)
  686. written = PTR_ERR(bh);
  687. goto done;
  688. }
  689. const struct address_space_operations affs_aops_ofs = {
  690. .readpage = affs_readpage_ofs,
  691. //.writepage = affs_writepage_ofs,
  692. .write_begin = affs_write_begin_ofs,
  693. .write_end = affs_write_end_ofs
  694. };
  695. /* Free any preallocated blocks. */
  696. void
  697. affs_free_prealloc(struct inode *inode)
  698. {
  699. struct super_block *sb = inode->i_sb;
  700. pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
  701. while (AFFS_I(inode)->i_pa_cnt) {
  702. AFFS_I(inode)->i_pa_cnt--;
  703. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  704. }
  705. }
  706. /* Truncate (or enlarge) a file to the requested size. */
  707. void
  708. affs_truncate(struct inode *inode)
  709. {
  710. struct super_block *sb = inode->i_sb;
  711. u32 ext, ext_key;
  712. u32 last_blk, blkcnt, blk;
  713. u32 size;
  714. struct buffer_head *ext_bh;
  715. int i;
  716. pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  717. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  718. last_blk = 0;
  719. ext = 0;
  720. if (inode->i_size) {
  721. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  722. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  723. }
  724. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  725. struct address_space *mapping = inode->i_mapping;
  726. struct page *page;
  727. void *fsdata;
  728. u32 size = inode->i_size;
  729. int res;
  730. res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
  731. if (!res)
  732. res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
  733. else
  734. inode->i_size = AFFS_I(inode)->mmu_private;
  735. mark_inode_dirty(inode);
  736. return;
  737. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  738. return;
  739. // lock cache
  740. ext_bh = affs_get_extblock(inode, ext);
  741. if (IS_ERR(ext_bh)) {
  742. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  743. ext, PTR_ERR(ext_bh));
  744. return;
  745. }
  746. if (AFFS_I(inode)->i_lc) {
  747. /* clear linear cache */
  748. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  749. if (AFFS_I(inode)->i_lc_size > i) {
  750. AFFS_I(inode)->i_lc_size = i;
  751. for (; i < AFFS_LC_SIZE; i++)
  752. AFFS_I(inode)->i_lc[i] = 0;
  753. }
  754. /* clear associative cache */
  755. for (i = 0; i < AFFS_AC_SIZE; i++)
  756. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  757. AFFS_I(inode)->i_ac[i].ext = 0;
  758. }
  759. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  760. blkcnt = AFFS_I(inode)->i_blkcnt;
  761. i = 0;
  762. blk = last_blk;
  763. if (inode->i_size) {
  764. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  765. blk++;
  766. } else
  767. AFFS_HEAD(ext_bh)->first_data = 0;
  768. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  769. size = AFFS_SB(sb)->s_hashsize;
  770. if (size > blkcnt - blk + i)
  771. size = blkcnt - blk + i;
  772. for (; i < size; i++, blk++) {
  773. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  774. AFFS_BLOCK(sb, ext_bh, i) = 0;
  775. }
  776. AFFS_TAIL(sb, ext_bh)->extension = 0;
  777. affs_fix_checksum(sb, ext_bh);
  778. mark_buffer_dirty_inode(ext_bh, inode);
  779. affs_brelse(ext_bh);
  780. if (inode->i_size) {
  781. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  782. AFFS_I(inode)->i_extcnt = ext + 1;
  783. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  784. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  785. u32 tmp;
  786. if (IS_ERR(bh)) {
  787. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  788. ext, PTR_ERR(bh));
  789. return;
  790. }
  791. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  792. AFFS_DATA_HEAD(bh)->next = 0;
  793. affs_adjust_checksum(bh, -tmp);
  794. affs_brelse(bh);
  795. }
  796. } else {
  797. AFFS_I(inode)->i_blkcnt = 0;
  798. AFFS_I(inode)->i_extcnt = 1;
  799. }
  800. AFFS_I(inode)->mmu_private = inode->i_size;
  801. // unlock cache
  802. while (ext_key) {
  803. ext_bh = affs_bread(sb, ext_key);
  804. size = AFFS_SB(sb)->s_hashsize;
  805. if (size > blkcnt - blk)
  806. size = blkcnt - blk;
  807. for (i = 0; i < size; i++, blk++)
  808. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  809. affs_free_block(sb, ext_key);
  810. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  811. affs_brelse(ext_bh);
  812. }
  813. affs_free_prealloc(inode);
  814. }
  815. int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
  816. {
  817. struct inode *inode = filp->f_mapping->host;
  818. int ret, err;
  819. err = filemap_write_and_wait_range(inode->i_mapping, start, end);
  820. if (err)
  821. return err;
  822. mutex_lock(&inode->i_mutex);
  823. ret = write_inode_now(inode, 0);
  824. err = sync_blockdev(inode->i_sb->s_bdev);
  825. if (!ret)
  826. ret = err;
  827. mutex_unlock(&inode->i_mutex);
  828. return ret;
  829. }