PageRenderTime 57ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 1ms

/usr.sbin/makefs/ffs/ffs_balloc.c

https://github.com/blacklion/GEOM-Events
C | 578 lines | 375 code | 64 blank | 139 comment | 108 complexity | e5f36e3aa6375f9e162b28f829849b60 MD5 | raw file
  1. /* $NetBSD: ffs_balloc.c,v 1.13 2004/06/20 22:20:18 jmc Exp $ */
  2. /* From NetBSD: ffs_balloc.c,v 1.25 2001/08/08 08:36:36 lukem Exp */
  3. /*
  4. * Copyright (c) 1982, 1986, 1989, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of the University nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  23. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  24. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  25. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  26. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  28. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  29. * SUCH DAMAGE.
  30. *
  31. * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95
  32. */
  33. #include <sys/cdefs.h>
  34. __FBSDID("$FreeBSD$");
  35. #include <sys/param.h>
  36. #include <sys/time.h>
  37. #include <assert.h>
  38. #include <errno.h>
  39. #include <stdio.h>
  40. #include <stdlib.h>
  41. #include <string.h>
  42. #include "makefs.h"
  43. #include <ufs/ufs/dinode.h>
  44. #include <ufs/ffs/fs.h>
  45. #include "ffs/ufs_bswap.h"
  46. #include "ffs/buf.h"
  47. #include "ffs/ufs_inode.h"
  48. #include "ffs/ffs_extern.h"
  49. static int ffs_balloc_ufs1(struct inode *, off_t, int, struct buf **);
  50. static int ffs_balloc_ufs2(struct inode *, off_t, int, struct buf **);
  51. /*
  52. * Balloc defines the structure of file system storage
  53. * by allocating the physical blocks on a device given
  54. * the inode and the logical block number in a file.
  55. *
  56. * Assume: flags == B_SYNC | B_CLRBUF
  57. */
  58. int
  59. ffs_balloc(struct inode *ip, off_t offset, int bufsize, struct buf **bpp)
  60. {
  61. if (ip->i_fs->fs_magic == FS_UFS2_MAGIC)
  62. return ffs_balloc_ufs2(ip, offset, bufsize, bpp);
  63. else
  64. return ffs_balloc_ufs1(ip, offset, bufsize, bpp);
  65. }
  66. static int
  67. ffs_balloc_ufs1(struct inode *ip, off_t offset, int bufsize, struct buf **bpp)
  68. {
  69. daddr_t lbn, lastlbn;
  70. int size;
  71. int32_t nb;
  72. struct buf *bp, *nbp;
  73. struct fs *fs = ip->i_fs;
  74. struct indir indirs[NIADDR + 2];
  75. daddr_t newb, pref;
  76. int32_t *bap;
  77. int osize, nsize, num, i, error;
  78. int32_t *allocblk, allociblk[NIADDR + 1];
  79. int32_t *allocib;
  80. const int needswap = UFS_FSNEEDSWAP(fs);
  81. lbn = lblkno(fs, offset);
  82. size = blkoff(fs, offset) + bufsize;
  83. if (bpp != NULL) {
  84. *bpp = NULL;
  85. }
  86. assert(size <= fs->fs_bsize);
  87. if (lbn < 0)
  88. return (EFBIG);
  89. /*
  90. * If the next write will extend the file into a new block,
  91. * and the file is currently composed of a fragment
  92. * this fragment has to be extended to be a full block.
  93. */
  94. lastlbn = lblkno(fs, ip->i_ffs1_size);
  95. if (lastlbn < NDADDR && lastlbn < lbn) {
  96. nb = lastlbn;
  97. osize = blksize(fs, ip, nb);
  98. if (osize < fs->fs_bsize && osize > 0) {
  99. warnx("need to ffs_realloccg; not supported!");
  100. abort();
  101. }
  102. }
  103. /*
  104. * The first NDADDR blocks are direct blocks
  105. */
  106. if (lbn < NDADDR) {
  107. nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap);
  108. if (nb != 0 && ip->i_ffs1_size >= lblktosize(fs, lbn + 1)) {
  109. /*
  110. * The block is an already-allocated direct block
  111. * and the file already extends past this block,
  112. * thus this must be a whole block.
  113. * Just read the block (if requested).
  114. */
  115. if (bpp != NULL) {
  116. error = bread(ip->i_fd, ip->i_fs, lbn,
  117. fs->fs_bsize, bpp);
  118. if (error) {
  119. brelse(*bpp);
  120. return (error);
  121. }
  122. }
  123. return (0);
  124. }
  125. if (nb != 0) {
  126. /*
  127. * Consider need to reallocate a fragment.
  128. */
  129. osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size));
  130. nsize = fragroundup(fs, size);
  131. if (nsize <= osize) {
  132. /*
  133. * The existing block is already
  134. * at least as big as we want.
  135. * Just read the block (if requested).
  136. */
  137. if (bpp != NULL) {
  138. error = bread(ip->i_fd, ip->i_fs, lbn,
  139. osize, bpp);
  140. if (error) {
  141. brelse(*bpp);
  142. return (error);
  143. }
  144. }
  145. return 0;
  146. } else {
  147. warnx("need to ffs_realloccg; not supported!");
  148. abort();
  149. }
  150. } else {
  151. /*
  152. * the block was not previously allocated,
  153. * allocate a new block or fragment.
  154. */
  155. if (ip->i_ffs1_size < lblktosize(fs, lbn + 1))
  156. nsize = fragroundup(fs, size);
  157. else
  158. nsize = fs->fs_bsize;
  159. error = ffs_alloc(ip, lbn,
  160. ffs_blkpref_ufs1(ip, lbn, (int)lbn,
  161. &ip->i_ffs1_db[0]),
  162. nsize, &newb);
  163. if (error)
  164. return (error);
  165. if (bpp != NULL) {
  166. bp = getblk(ip->i_fd, ip->i_fs, lbn, nsize);
  167. bp->b_blkno = fsbtodb(fs, newb);
  168. clrbuf(bp);
  169. *bpp = bp;
  170. }
  171. }
  172. ip->i_ffs1_db[lbn] = ufs_rw32((int32_t)newb, needswap);
  173. return (0);
  174. }
  175. /*
  176. * Determine the number of levels of indirection.
  177. */
  178. pref = 0;
  179. if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0)
  180. return (error);
  181. if (num < 1) {
  182. warnx("ffs_balloc: ufs_getlbns returned indirect block");
  183. abort();
  184. }
  185. /*
  186. * Fetch the first indirect block allocating if necessary.
  187. */
  188. --num;
  189. nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap);
  190. allocib = NULL;
  191. allocblk = allociblk;
  192. if (nb == 0) {
  193. pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
  194. error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
  195. if (error)
  196. return error;
  197. nb = newb;
  198. *allocblk++ = nb;
  199. bp = getblk(ip->i_fd, ip->i_fs, indirs[1].in_lbn, fs->fs_bsize);
  200. bp->b_blkno = fsbtodb(fs, nb);
  201. clrbuf(bp);
  202. /*
  203. * Write synchronously so that indirect blocks
  204. * never point at garbage.
  205. */
  206. if ((error = bwrite(bp)) != 0)
  207. return error;
  208. allocib = &ip->i_ffs1_ib[indirs[0].in_off];
  209. *allocib = ufs_rw32((int32_t)nb, needswap);
  210. }
  211. /*
  212. * Fetch through the indirect blocks, allocating as necessary.
  213. */
  214. for (i = 1;;) {
  215. error = bread(ip->i_fd, ip->i_fs, indirs[i].in_lbn,
  216. fs->fs_bsize, &bp);
  217. if (error) {
  218. brelse(bp);
  219. return error;
  220. }
  221. bap = (int32_t *)bp->b_data;
  222. nb = ufs_rw32(bap[indirs[i].in_off], needswap);
  223. if (i == num)
  224. break;
  225. i++;
  226. if (nb != 0) {
  227. brelse(bp);
  228. continue;
  229. }
  230. if (pref == 0)
  231. pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
  232. error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
  233. if (error) {
  234. brelse(bp);
  235. return error;
  236. }
  237. nb = newb;
  238. *allocblk++ = nb;
  239. nbp = getblk(ip->i_fd, ip->i_fs, indirs[i].in_lbn,
  240. fs->fs_bsize);
  241. nbp->b_blkno = fsbtodb(fs, nb);
  242. clrbuf(nbp);
  243. /*
  244. * Write synchronously so that indirect blocks
  245. * never point at garbage.
  246. */
  247. if ((error = bwrite(nbp)) != 0) {
  248. brelse(bp);
  249. return error;
  250. }
  251. bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap);
  252. bwrite(bp);
  253. }
  254. /*
  255. * Get the data block, allocating if necessary.
  256. */
  257. if (nb == 0) {
  258. pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]);
  259. error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
  260. if (error) {
  261. brelse(bp);
  262. return error;
  263. }
  264. nb = newb;
  265. *allocblk++ = nb;
  266. if (bpp != NULL) {
  267. nbp = getblk(ip->i_fd, ip->i_fs, lbn, fs->fs_bsize);
  268. nbp->b_blkno = fsbtodb(fs, nb);
  269. clrbuf(nbp);
  270. *bpp = nbp;
  271. }
  272. bap[indirs[num].in_off] = ufs_rw32(nb, needswap);
  273. /*
  274. * If required, write synchronously, otherwise use
  275. * delayed write.
  276. */
  277. bwrite(bp);
  278. return (0);
  279. }
  280. brelse(bp);
  281. if (bpp != NULL) {
  282. error = bread(ip->i_fd, ip->i_fs, lbn, (int)fs->fs_bsize, &nbp);
  283. if (error) {
  284. brelse(nbp);
  285. return error;
  286. }
  287. *bpp = nbp;
  288. }
  289. return (0);
  290. }
  291. static int
  292. ffs_balloc_ufs2(struct inode *ip, off_t offset, int bufsize, struct buf **bpp)
  293. {
  294. daddr_t lbn, lastlbn;
  295. int size;
  296. struct buf *bp, *nbp;
  297. struct fs *fs = ip->i_fs;
  298. struct indir indirs[NIADDR + 2];
  299. daddr_t newb, pref, nb;
  300. int64_t *bap;
  301. int osize, nsize, num, i, error;
  302. int64_t *allocblk, allociblk[NIADDR + 1];
  303. int64_t *allocib;
  304. const int needswap = UFS_FSNEEDSWAP(fs);
  305. lbn = lblkno(fs, offset);
  306. size = blkoff(fs, offset) + bufsize;
  307. if (bpp != NULL) {
  308. *bpp = NULL;
  309. }
  310. assert(size <= fs->fs_bsize);
  311. if (lbn < 0)
  312. return (EFBIG);
  313. /*
  314. * If the next write will extend the file into a new block,
  315. * and the file is currently composed of a fragment
  316. * this fragment has to be extended to be a full block.
  317. */
  318. lastlbn = lblkno(fs, ip->i_ffs2_size);
  319. if (lastlbn < NDADDR && lastlbn < lbn) {
  320. nb = lastlbn;
  321. osize = blksize(fs, ip, nb);
  322. if (osize < fs->fs_bsize && osize > 0) {
  323. warnx("need to ffs_realloccg; not supported!");
  324. abort();
  325. }
  326. }
  327. /*
  328. * The first NDADDR blocks are direct blocks
  329. */
  330. if (lbn < NDADDR) {
  331. nb = ufs_rw64(ip->i_ffs2_db[lbn], needswap);
  332. if (nb != 0 && ip->i_ffs2_size >= lblktosize(fs, lbn + 1)) {
  333. /*
  334. * The block is an already-allocated direct block
  335. * and the file already extends past this block,
  336. * thus this must be a whole block.
  337. * Just read the block (if requested).
  338. */
  339. if (bpp != NULL) {
  340. error = bread(ip->i_fd, ip->i_fs, lbn,
  341. fs->fs_bsize, bpp);
  342. if (error) {
  343. brelse(*bpp);
  344. return (error);
  345. }
  346. }
  347. return (0);
  348. }
  349. if (nb != 0) {
  350. /*
  351. * Consider need to reallocate a fragment.
  352. */
  353. osize = fragroundup(fs, blkoff(fs, ip->i_ffs2_size));
  354. nsize = fragroundup(fs, size);
  355. if (nsize <= osize) {
  356. /*
  357. * The existing block is already
  358. * at least as big as we want.
  359. * Just read the block (if requested).
  360. */
  361. if (bpp != NULL) {
  362. error = bread(ip->i_fd, ip->i_fs, lbn,
  363. osize, bpp);
  364. if (error) {
  365. brelse(*bpp);
  366. return (error);
  367. }
  368. }
  369. return 0;
  370. } else {
  371. warnx("need to ffs_realloccg; not supported!");
  372. abort();
  373. }
  374. } else {
  375. /*
  376. * the block was not previously allocated,
  377. * allocate a new block or fragment.
  378. */
  379. if (ip->i_ffs2_size < lblktosize(fs, lbn + 1))
  380. nsize = fragroundup(fs, size);
  381. else
  382. nsize = fs->fs_bsize;
  383. error = ffs_alloc(ip, lbn,
  384. ffs_blkpref_ufs2(ip, lbn, (int)lbn,
  385. &ip->i_ffs2_db[0]),
  386. nsize, &newb);
  387. if (error)
  388. return (error);
  389. if (bpp != NULL) {
  390. bp = getblk(ip->i_fd, ip->i_fs, lbn, nsize);
  391. bp->b_blkno = fsbtodb(fs, newb);
  392. clrbuf(bp);
  393. *bpp = bp;
  394. }
  395. }
  396. ip->i_ffs2_db[lbn] = ufs_rw64(newb, needswap);
  397. return (0);
  398. }
  399. /*
  400. * Determine the number of levels of indirection.
  401. */
  402. pref = 0;
  403. if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0)
  404. return (error);
  405. if (num < 1) {
  406. warnx("ffs_balloc: ufs_getlbns returned indirect block");
  407. abort();
  408. }
  409. /*
  410. * Fetch the first indirect block allocating if necessary.
  411. */
  412. --num;
  413. nb = ufs_rw64(ip->i_ffs2_ib[indirs[0].in_off], needswap);
  414. allocib = NULL;
  415. allocblk = allociblk;
  416. if (nb == 0) {
  417. pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
  418. error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
  419. if (error)
  420. return error;
  421. nb = newb;
  422. *allocblk++ = nb;
  423. bp = getblk(ip->i_fd, ip->i_fs, indirs[1].in_lbn, fs->fs_bsize);
  424. bp->b_blkno = fsbtodb(fs, nb);
  425. clrbuf(bp);
  426. /*
  427. * Write synchronously so that indirect blocks
  428. * never point at garbage.
  429. */
  430. if ((error = bwrite(bp)) != 0)
  431. return error;
  432. allocib = &ip->i_ffs2_ib[indirs[0].in_off];
  433. *allocib = ufs_rw64(nb, needswap);
  434. }
  435. /*
  436. * Fetch through the indirect blocks, allocating as necessary.
  437. */
  438. for (i = 1;;) {
  439. error = bread(ip->i_fd, ip->i_fs, indirs[i].in_lbn,
  440. fs->fs_bsize, &bp);
  441. if (error) {
  442. brelse(bp);
  443. return error;
  444. }
  445. bap = (int64_t *)bp->b_data;
  446. nb = ufs_rw64(bap[indirs[i].in_off], needswap);
  447. if (i == num)
  448. break;
  449. i++;
  450. if (nb != 0) {
  451. brelse(bp);
  452. continue;
  453. }
  454. if (pref == 0)
  455. pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
  456. error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
  457. if (error) {
  458. brelse(bp);
  459. return error;
  460. }
  461. nb = newb;
  462. *allocblk++ = nb;
  463. nbp = getblk(ip->i_fd, ip->i_fs, indirs[i].in_lbn,
  464. fs->fs_bsize);
  465. nbp->b_blkno = fsbtodb(fs, nb);
  466. clrbuf(nbp);
  467. /*
  468. * Write synchronously so that indirect blocks
  469. * never point at garbage.
  470. */
  471. if ((error = bwrite(nbp)) != 0) {
  472. brelse(bp);
  473. return error;
  474. }
  475. bap[indirs[i - 1].in_off] = ufs_rw64(nb, needswap);
  476. bwrite(bp);
  477. }
  478. /*
  479. * Get the data block, allocating if necessary.
  480. */
  481. if (nb == 0) {
  482. pref = ffs_blkpref_ufs2(ip, lbn, indirs[num].in_off, &bap[0]);
  483. error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb);
  484. if (error) {
  485. brelse(bp);
  486. return error;
  487. }
  488. nb = newb;
  489. *allocblk++ = nb;
  490. if (bpp != NULL) {
  491. nbp = getblk(ip->i_fd, ip->i_fs, lbn, fs->fs_bsize);
  492. nbp->b_blkno = fsbtodb(fs, nb);
  493. clrbuf(nbp);
  494. *bpp = nbp;
  495. }
  496. bap[indirs[num].in_off] = ufs_rw64(nb, needswap);
  497. /*
  498. * If required, write synchronously, otherwise use
  499. * delayed write.
  500. */
  501. bwrite(bp);
  502. return (0);
  503. }
  504. brelse(bp);
  505. if (bpp != NULL) {
  506. error = bread(ip->i_fd, ip->i_fs, lbn, (int)fs->fs_bsize, &nbp);
  507. if (error) {
  508. brelse(nbp);
  509. return error;
  510. }
  511. *bpp = nbp;
  512. }
  513. return (0);
  514. }