PageRenderTime 70ms CodeModel.GetById 31ms RepoModel.GetById 0ms app.codeStats 1ms

/sys/ufs/chfs/chfs_vnops.c

https://bitbucket.org/gthummalapalle/minix
C | 1765 lines | 1258 code | 281 blank | 226 comment | 311 complexity | 293b77fd7126401922520b7e07a07e58 MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.0, WTFPL, AGPL-1.0
  1. /* $NetBSD: chfs_vnops.c,v 1.2 2011/11/24 21:09:37 agc Exp $ */
  2. /*-
  3. * Copyright (c) 2010 Department of Software Engineering,
  4. * University of Szeged, Hungary
  5. * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
  6. * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
  7. * All rights reserved.
  8. *
  9. * This code is derived from software contributed to The NetBSD Foundation
  10. * by the Department of Software Engineering, University of Szeged, Hungary
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in the
  19. * documentation and/or other materials provided with the distribution.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  22. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  23. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  24. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  25. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  26. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  27. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  28. * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  29. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. */
  33. #include <sys/param.h>
  34. #include <miscfs/specfs/specdev.h>
  35. #include <miscfs/fifofs/fifo.h>
  36. #include <miscfs/genfs/genfs.h>
  37. #include <ufs/ufs/dir.h>
  38. #include <ufs/ufs/ufs_extern.h>
  39. #include <uvm/uvm.h>
  40. #include <sys/namei.h>
  41. #include <sys/stat.h>
  42. #include <sys/fcntl.h>
  43. #include <sys/buf.h>
  44. #include <sys/fstrans.h>
  45. #include <sys/vnode.h>
  46. #include "chfs.h"
  47. #define READ_S "chfs_read"
  48. int
  49. chfs_lookup(void *v)
  50. {
  51. struct vnode *dvp = ((struct vop_lookup_args *) v)->a_dvp;
  52. struct vnode **vpp = ((struct vop_lookup_args *) v)->a_vpp;
  53. struct componentname *cnp = ((struct vop_lookup_args *) v)->a_cnp;
  54. int error;
  55. struct chfs_inode* ip;
  56. struct ufsmount* ump;
  57. struct chfs_mount* chmp;
  58. struct chfs_vnode_cache* chvc;
  59. struct chfs_dirent* fd;
  60. dbg("lookup(): %s\n", cnp->cn_nameptr);
  61. KASSERT(VOP_ISLOCKED(dvp));
  62. *vpp = NULL;
  63. // Check accessibility of requested node as a first step.
  64. error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
  65. if (error != 0) {
  66. goto out;
  67. }
  68. // If requesting the last path component on a read-only file system
  69. // with a write operation, deny it.
  70. if ((cnp->cn_flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY)
  71. && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
  72. error = EROFS;
  73. goto out;
  74. }
  75. // Avoid doing a linear scan of the directory if the requested
  76. // directory/name couple is already in the cache.
  77. error = cache_lookup(dvp, vpp, cnp);
  78. if (error >= 0) {
  79. goto out;
  80. }
  81. ip = VTOI(dvp);
  82. ump = VFSTOUFS(dvp->v_mount);
  83. chmp = ump->um_chfs;
  84. if (ip->ino == 0) {
  85. ip->ino = ++chmp->chm_max_vno;
  86. }
  87. mutex_enter(&chmp->chm_lock_vnocache);
  88. chvc = chfs_vnode_cache_get(chmp, ip->ino);
  89. mutex_exit(&chmp->chm_lock_vnocache);
  90. // We cannot be requesting the parent directory of the root node.
  91. KASSERT(IMPLIES(dvp->v_type == VDIR && chvc->pvno == chvc->vno,
  92. !(cnp->cn_flags & ISDOTDOT)));
  93. if (cnp->cn_flags & ISDOTDOT) {
  94. VOP_UNLOCK(dvp);
  95. error = VFS_VGET(dvp->v_mount, ip->chvc->pvno, vpp);
  96. vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
  97. } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
  98. vref(dvp);
  99. *vpp = dvp;
  100. error = 0;
  101. } else {
  102. fd = chfs_dir_lookup(ip, cnp);
  103. if (fd == NULL) {
  104. dbg("fd null\n");
  105. // The entry was not found in the directory.
  106. // This is OK if we are creating or renaming an
  107. // entry and are working on the last component of
  108. // the path name.
  109. if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_nameiop == CREATE
  110. || cnp->cn_nameiop == RENAME)) {
  111. error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
  112. if (error) {
  113. dbg("after the entry was not found in dir\n");
  114. goto out;
  115. }
  116. dbg("return EJUSTRETURN\n");
  117. error = EJUSTRETURN;
  118. } else {
  119. error = ENOENT;
  120. }
  121. } else {
  122. // If we are not at the last path component and
  123. // found a non-directory or non-link entry (which
  124. // may itself be pointing to a directory), raise
  125. // an error.
  126. if ((fd->type != VDIR && fd->type != VLNK) && !(cnp->cn_flags
  127. & ISLASTCN)) {
  128. error = ENOTDIR;
  129. goto out;
  130. }
  131. dbg("vno@allocating new vnode: %llu\n",
  132. (unsigned long long)fd->vno);
  133. error = VFS_VGET(dvp->v_mount, fd->vno, vpp);
  134. }
  135. }
  136. // Store the result of this lookup in the cache. Avoid this if the
  137. // request was for creation, as it does not improve timings on
  138. // emprical tests.
  139. if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE
  140. && (cnp->cn_flags & ISDOTDOT) == 0)
  141. cache_enter(dvp, *vpp, cnp);
  142. out:
  143. // If there were no errors, *vpp cannot be null and it must be
  144. // locked.
  145. KASSERT(IFF(error == 0, *vpp != NULL && VOP_ISLOCKED(*vpp)));
  146. // dvp must always be locked.
  147. KASSERT(VOP_ISLOCKED(dvp));
  148. return error;
  149. }
  150. /* --------------------------------------------------------------------- */
  151. int
  152. chfs_create(void *v)
  153. {
  154. struct vop_create_args /* {
  155. struct vnode *a_dvp;
  156. struct vnode **a_vpp;
  157. struct componentname *a_cnp;
  158. struct vattr *a_vap;
  159. } */*ap = v;
  160. int error, mode;
  161. dbg("create()\n");
  162. mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
  163. if ((mode & IFMT) == 0) {
  164. if (ap->a_vap->va_type == VREG)
  165. mode |= IFREG;
  166. if (ap->a_vap->va_type == VSOCK)
  167. mode |= IFSOCK;
  168. }
  169. error = chfs_makeinode(mode, ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap->va_type);
  170. if (error) {
  171. dbg("error: %d\n", error);
  172. return error;
  173. }
  174. VN_KNOTE(ap->a_dvp, NOTE_WRITE);
  175. return 0;
  176. }
  177. /* --------------------------------------------------------------------- */
  178. int
  179. chfs_mknod(void *v)
  180. {
  181. struct vnode *dvp = ((struct vop_mknod_args *) v)->a_dvp;
  182. struct vnode **vpp = ((struct vop_mknod_args *) v)->a_vpp;
  183. struct componentname *cnp = ((struct vop_mknod_args *) v)->a_cnp;
  184. struct vattr *vap = ((struct vop_mknod_args *) v)->a_vap;
  185. int mode, err = 0;
  186. struct chfs_inode *ip;
  187. struct vnode *vp;
  188. struct ufsmount *ump;
  189. struct chfs_mount *chmp;
  190. ino_t ino;
  191. struct chfs_full_dnode *fd;
  192. struct buf *bp;
  193. int len;
  194. dbg("mknod()\n");
  195. ump = VFSTOUFS(dvp->v_mount);
  196. chmp = ump->um_chfs;
  197. if (vap->va_type != VBLK && vap->va_type != VCHR && vap->va_type != VFIFO)
  198. return EINVAL;
  199. vp = *vpp;
  200. mode = MAKEIMODE(vap->va_type, vap->va_mode);
  201. if ((mode & IFMT) == 0) {
  202. switch (vap->va_type) {
  203. case VBLK:
  204. mode |= IFBLK;
  205. break;
  206. case VCHR:
  207. mode |= IFCHR;
  208. break;
  209. case VFIFO:
  210. mode |= IFIFO;
  211. break;
  212. default:
  213. break;
  214. }
  215. }
  216. err = chfs_makeinode(mode, dvp, &vp, cnp, vap->va_type);
  217. ip = VTOI(vp);
  218. ino = ip->ino;
  219. if (vap->va_rdev != VNOVAL)
  220. ip->rdev = vap->va_rdev;
  221. if (vap->va_type == VFIFO)
  222. vp->v_op = chfs_fifoop_p;
  223. else {
  224. vp->v_op = chfs_specop_p;
  225. spec_node_init(vp, ip->rdev);
  226. }
  227. if (err)
  228. return err;
  229. len = sizeof(dev_t);
  230. chfs_set_vnode_size(vp, len);
  231. bp = getiobuf(vp, true);
  232. bp->b_bufsize = bp->b_resid = len;
  233. bp->b_data = kmem_alloc(len, KM_SLEEP);
  234. memcpy(bp->b_data, &ip->rdev, len);
  235. bp->b_blkno = 0;
  236. fd = chfs_alloc_full_dnode();
  237. mutex_enter(&chmp->chm_lock_mountfields);
  238. err = chfs_write_flash_dnode(chmp, vp, bp, fd);
  239. if (err) {
  240. mutex_exit(&chmp->chm_lock_mountfields);
  241. kmem_free(bp->b_data, len);
  242. return err;
  243. }
  244. err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
  245. if (err) {
  246. mutex_exit(&chmp->chm_lock_mountfields);
  247. kmem_free(bp->b_data, len);
  248. return err;
  249. }
  250. mutex_exit(&chmp->chm_lock_mountfields);
  251. *vpp = vp;
  252. kmem_free(bp->b_data, len);
  253. putiobuf(bp);
  254. return 0;
  255. }
  256. /* --------------------------------------------------------------------- */
  257. int
  258. chfs_open(void *v)
  259. {
  260. struct vnode *vp = ((struct vop_open_args *) v)->a_vp;
  261. int mode = ((struct vop_open_args *) v)->a_mode;
  262. dbg("open()\n");
  263. int error;
  264. struct chfs_inode *ip;
  265. KASSERT(VOP_ISLOCKED(vp));
  266. ip = VTOI(vp);
  267. KASSERT(vp->v_size == ip->size);
  268. if (ip->chvc->nlink < 1) {
  269. error = ENOENT;
  270. goto out;
  271. }
  272. // If the file is marked append-only, deny write requests.
  273. if (ip->flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
  274. error = EPERM;
  275. else
  276. error = 0;
  277. out:
  278. KASSERT(VOP_ISLOCKED(vp));
  279. return error;
  280. }
  281. /* --------------------------------------------------------------------- */
  282. int
  283. chfs_close(void *v)
  284. {
  285. struct vnode *vp = ((struct vop_close_args *) v)->a_vp;
  286. dbg("close()\n");
  287. struct chfs_inode *ip;
  288. KASSERT(VOP_ISLOCKED(vp));
  289. ip = VTOI(vp);
  290. if (ip->chvc->nlink > 0) {
  291. //ip->chvc->nlink = 0;
  292. chfs_update(vp, NULL, NULL, UPDATE_CLOSE);
  293. }
  294. return 0;
  295. }
  296. /* --------------------------------------------------------------------- */
  297. int
  298. chfs_access(void *v)
  299. {
  300. struct vnode *vp = ((struct vop_access_args *) v)->a_vp;
  301. int mode = ((struct vop_access_args *) v)->a_mode;
  302. kauth_cred_t cred = ((struct vop_access_args *) v)->a_cred;
  303. dbg("access()\n");
  304. struct chfs_inode *ip = VTOI(vp);
  305. if (mode & VWRITE) {
  306. switch (vp->v_type) {
  307. case VLNK:
  308. case VDIR:
  309. case VREG:
  310. if (vp->v_mount->mnt_flag & MNT_RDONLY)
  311. return (EROFS);
  312. break;
  313. case VBLK:
  314. case VCHR:
  315. case VSOCK:
  316. case VFIFO:
  317. break;
  318. default:
  319. break;
  320. }
  321. }
  322. if (mode & VWRITE && ip->flags & IMMUTABLE)
  323. return (EPERM);
  324. return genfs_can_access(vp->v_type, ip->mode & ALLPERMS,
  325. ip->uid, ip->gid, mode, cred);
  326. }
  327. /* --------------------------------------------------------------------- */
  328. int
  329. chfs_getattr(void *v)
  330. {
  331. struct vnode *vp = ((struct vop_getattr_args *) v)->a_vp;
  332. struct vattr *vap = ((struct vop_getattr_args *) v)->a_vap;
  333. struct chfs_inode *ip = VTOI(vp);
  334. dbg("getattr()\n");
  335. KASSERT(vp->v_size == ip->size);
  336. vattr_null(vap);
  337. CHFS_ITIMES(ip, NULL, NULL, NULL);
  338. vap->va_type = vp->v_type;
  339. vap->va_mode = ip->mode & ALLPERMS;
  340. vap->va_nlink = ip->chvc->nlink;
  341. vap->va_uid = ip->uid;
  342. vap->va_gid = ip->gid;
  343. vap->va_fsid = ip->dev;
  344. vap->va_fileid = ip->ino;
  345. vap->va_size = ip->size;
  346. vap->va_blocksize = PAGE_SIZE;
  347. vap->va_atime.tv_sec = ip->atime;
  348. vap->va_atime.tv_nsec = 0;
  349. vap->va_mtime.tv_sec = ip->mtime;
  350. vap->va_mtime.tv_nsec = 0;
  351. vap->va_ctime.tv_sec = ip->ctime;
  352. vap->va_ctime.tv_nsec = 0;
  353. vap->va_gen = ip->version;
  354. vap->va_flags = ip->flags;
  355. vap->va_rdev = ip->rdev;
  356. vap->va_bytes = round_page(ip->size);
  357. vap->va_filerev = VNOVAL;
  358. vap->va_vaflags = 0;
  359. vap->va_spare = VNOVAL;
  360. return 0;
  361. }
  362. /* --------------------------------------------------------------------- */
  363. /* Note: modelled after tmpfs's same function */
  364. int
  365. chfs_setattr(void *v)
  366. {
  367. struct vnode *vp = ((struct vop_setattr_args *) v)->a_vp;
  368. struct vattr *vap = ((struct vop_setattr_args *) v)->a_vap;
  369. kauth_cred_t cred = ((struct vop_setattr_args *) v)->a_cred;
  370. struct chfs_inode *ip;
  371. struct ufsmount *ump = VFSTOUFS(vp->v_mount);
  372. struct chfs_mount *chmp = ump->um_chfs;
  373. int error = 0;
  374. dbg("setattr()\n");
  375. KASSERT(VOP_ISLOCKED(vp));
  376. ip = VTOI(vp);
  377. /* Abort if any unsettable attribute is given. */
  378. if (vap->va_type != VNON || vap->va_nlink != VNOVAL ||
  379. vap->va_fsid != VNOVAL || vap->va_fileid != VNOVAL ||
  380. vap->va_blocksize != VNOVAL /*|| GOODTIME(&vap->va_ctime)*/ ||
  381. vap->va_gen != VNOVAL || vap->va_rdev != VNOVAL ||
  382. vap->va_bytes != VNOVAL) {
  383. return EINVAL;
  384. }
  385. if (error == 0 && (vap->va_flags != VNOVAL))
  386. error = chfs_chflags(vp, vap->va_flags, cred);
  387. if (error == 0 && (vap->va_size != VNOVAL))
  388. error = chfs_chsize(vp, vap->va_size, cred);
  389. if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
  390. error = chfs_chown(vp, vap->va_uid, vap->va_gid, cred);
  391. if (error == 0 && (vap->va_mode != VNOVAL))
  392. error = chfs_chmod(vp, vap->va_mode, cred);
  393. #if 0
  394. /* why do we need that? */
  395. if (ip->flags & (IMMUTABLE | APPEND))
  396. return EPERM;
  397. #endif
  398. if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
  399. error = genfs_can_chtimes(vp, vap->va_vaflags, ip->uid, cred);
  400. if (error)
  401. return error;
  402. if (vap->va_atime.tv_sec != VNOVAL)
  403. ip->iflag |= IN_ACCESS;
  404. if (vap->va_mtime.tv_sec != VNOVAL)
  405. ip->iflag |= IN_CHANGE | IN_UPDATE;
  406. error = chfs_update(vp,
  407. &vap->va_atime, &vap->va_mtime, UPDATE_WAIT);
  408. if (error)
  409. return error;
  410. }
  411. mutex_enter(&chmp->chm_lock_mountfields);
  412. error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
  413. mutex_exit(&chmp->chm_lock_mountfields);
  414. return error;
  415. }
  416. int
  417. chfs_chmod(struct vnode *vp, int mode, kauth_cred_t cred)
  418. {
  419. struct chfs_inode *ip = VTOI(vp);
  420. int error;
  421. dbg("chmod\n");
  422. error = genfs_can_chmod(vp, cred, ip->uid, ip->gid, mode);
  423. if (error)
  424. return error;
  425. ip->mode &= ~ALLPERMS;
  426. ip->mode |= (mode & ALLPERMS);
  427. ip->iflag |= IN_CHANGE;
  428. error = chfs_update(vp, NULL, NULL, UPDATE_WAIT);
  429. if (error)
  430. return error;
  431. return 0;
  432. }
  433. int
  434. chfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred)
  435. {
  436. struct chfs_inode *ip = VTOI(vp);
  437. int error;
  438. dbg("chown\n");
  439. if (uid == (uid_t)VNOVAL)
  440. uid = ip->uid;
  441. if (gid == (gid_t)VNOVAL)
  442. gid = ip->gid;
  443. error = genfs_can_chown(vp, cred, ip->uid, ip->gid, uid, gid);
  444. if (error)
  445. return error;
  446. ip->gid = gid;
  447. ip->uid = uid;
  448. ip->iflag |= IN_CHANGE;
  449. error = chfs_update(vp, NULL, NULL, UPDATE_WAIT);
  450. if (error)
  451. return error;
  452. return 0;
  453. }
  454. /* --------------------------------------------------------------------- */
  455. /* calculates ((off_t)blk * chmp->chm_chm_fs_bsize) */
  456. #define lblktosize(chmp, blk) \
  457. (((off_t)(blk)) << (chmp)->chm_fs_bshift)
  458. /* calculates (loc % chmp->chm_chm_fs_bsize) */
  459. #define blkoff(chmp, loc) \
  460. ((loc) & (chmp)->chm_fs_qbmask)
  461. /* calculates (loc / chmp->chm_chm_fs_bsize) */
  462. #define lblkno(chmp, loc) \
  463. ((loc) >> (chmp)->chm_fs_bshift)
  464. /* calculates roundup(size, chmp->chm_chm_fs_fsize) */
  465. #define fragroundup(chmp, size) \
  466. (((size) + (chmp)->chm_fs_qfmask) & (chmp)->chm_fs_fmask)
  467. #define blksize(chmp, ip, lbn) \
  468. (((lbn) >= NDADDR || (ip)->size >= lblktosize(chmp, (lbn) + 1)) \
  469. ? (chmp)->chm_fs_bsize \
  470. : (fragroundup(chmp, blkoff(chmp, (ip)->size))))
  471. /* calculates roundup(size, chmp->chm_chm_fs_bsize) */
  472. #define blkroundup(chmp, size) \
  473. (((size) + (chmp)->chm_fs_qbmask) & (chmp)->chm_fs_bmask)
  474. int
  475. chfs_read(void *v)
  476. {
  477. struct vop_read_args /* {
  478. struct vnode *a_vp;
  479. struct uio *a_uio;
  480. int a_ioflag;
  481. kauth_cred_t a_cred;
  482. } */ *ap = v;
  483. struct vnode *vp;
  484. struct chfs_inode *ip;
  485. struct uio *uio;
  486. struct ufsmount *ump;
  487. struct buf *bp;
  488. struct chfs_mount *chmp;
  489. daddr_t lbn, nextlbn;
  490. off_t bytesinfile;
  491. long size, xfersize, blkoffset;
  492. int error, ioflag;
  493. vsize_t bytelen;
  494. bool usepc = false;
  495. dbg("chfs_read\n");
  496. vp = ap->a_vp;
  497. ip = VTOI(vp);
  498. ump = ip->ump;
  499. uio = ap->a_uio;
  500. ioflag = ap->a_ioflag;
  501. error = 0;
  502. dbg("ip->size:%llu\n", (unsigned long long)ip->size);
  503. #ifdef DIAGNOSTIC
  504. if (uio->uio_rw != UIO_READ)
  505. panic("%s: mode", READ_S);
  506. if (vp->v_type == VLNK) {
  507. if (ip->size < ump->um_maxsymlinklen)
  508. panic("%s: short symlink", READ_S);
  509. } else if (vp->v_type != VREG && vp->v_type != VDIR)
  510. panic("%s: type %d", READ_S, vp->v_type);
  511. #endif
  512. chmp = ip->chmp;
  513. if ((u_int64_t)uio->uio_offset > ump->um_maxfilesize)
  514. return (EFBIG);
  515. if (uio->uio_resid == 0)
  516. return (0);
  517. fstrans_start(vp->v_mount, FSTRANS_SHARED);
  518. if (uio->uio_offset >= ip->size)
  519. goto out;
  520. usepc = vp->v_type == VREG;
  521. bytelen = 0;
  522. if (usepc) {
  523. const int advice = IO_ADV_DECODE(ap->a_ioflag);
  524. while (uio->uio_resid > 0) {
  525. if (ioflag & IO_DIRECT) {
  526. genfs_directio(vp, uio, ioflag);
  527. }
  528. bytelen = MIN(ip->size - uio->uio_offset,
  529. uio->uio_resid);
  530. if (bytelen == 0)
  531. break;
  532. error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
  533. UBC_READ | UBC_PARTIALOK |
  534. (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
  535. if (error)
  536. break;
  537. }
  538. goto out;
  539. }
  540. dbg("start reading\n");
  541. for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  542. bytesinfile = ip->size - uio->uio_offset;
  543. if (bytesinfile <= 0)
  544. break;
  545. lbn = lblkno(chmp, uio->uio_offset);
  546. nextlbn = lbn + 1;
  547. size = blksize(chmp, ip, lbn);
  548. blkoffset = blkoff(chmp, uio->uio_offset);
  549. xfersize = MIN(MIN(chmp->chm_fs_bsize - blkoffset, uio->uio_resid),
  550. bytesinfile);
  551. if (lblktosize(chmp, nextlbn) >= ip->size) {
  552. error = bread(vp, lbn, size, NOCRED, 0, &bp);
  553. dbg("after bread\n");
  554. } else {
  555. int nextsize = blksize(chmp, ip, nextlbn);
  556. dbg("size: %ld\n", size);
  557. error = breadn(vp, lbn,
  558. size, &nextlbn, &nextsize, 1, NOCRED, 0, &bp);
  559. dbg("after breadN\n");
  560. }
  561. if (error)
  562. break;
  563. /*
  564. * We should only get non-zero b_resid when an I/O error
  565. * has occurred, which should cause us to break above.
  566. * However, if the short read did not cause an error,
  567. * then we want to ensure that we do not uiomove bad
  568. * or uninitialized data.
  569. */
  570. size -= bp->b_resid;
  571. if (size < xfersize) {
  572. if (size == 0)
  573. break;
  574. xfersize = size;
  575. }
  576. dbg("uiomove\n");
  577. error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
  578. if (error)
  579. break;
  580. brelse(bp, 0);
  581. }
  582. if (bp != NULL)
  583. brelse(bp, 0);
  584. out:
  585. if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
  586. ip->iflag |= IN_ACCESS;
  587. if ((ap->a_ioflag & IO_SYNC) == IO_SYNC) {
  588. //error = UFS_WAPBL_BEGIN(vp->v_mount);
  589. if (error) {
  590. fstrans_done(vp->v_mount);
  591. return error;
  592. }
  593. error = chfs_update(vp, NULL, NULL, UPDATE_WAIT);
  594. //UFS_WAPBL_END(vp->v_mount);
  595. }
  596. }
  597. dbg("[END]\n");
  598. fstrans_done(vp->v_mount);
  599. return (error);
  600. }
  601. /* --------------------------------------------------------------------- */
  602. /*from ffs write*/
  603. int
  604. chfs_write(void *v)
  605. {
  606. struct vop_write_args /* {
  607. struct vnode *a_vp;
  608. struct uio *a_uio;
  609. int a_ioflag;
  610. kauth_cred_t a_cred;
  611. } */ *ap = v;
  612. struct vnode *vp ;
  613. struct uio *uio;
  614. struct chfs_inode *ip;
  615. struct chfs_mount *chmp;
  616. struct lwp *l;
  617. kauth_cred_t cred;
  618. off_t osize, origoff, oldoff, preallocoff, endallocoff, nsize;
  619. int blkoffset, error, flags, ioflag, resid;
  620. int aflag;
  621. int extended=0;
  622. vsize_t bytelen;
  623. bool async;
  624. struct ufsmount *ump;
  625. cred = ap->a_cred;
  626. ioflag = ap->a_ioflag;
  627. uio = ap->a_uio;
  628. vp = ap->a_vp;
  629. ip = VTOI(vp);
  630. //dbg("file size (vp): %llu\n", (unsigned long long)vp->v_size);
  631. //dbg("file size (ip): %llu\n", (unsigned long long)ip->i_size);
  632. ump = ip->ump;
  633. //dbg("uio->resid: %d\n", uio->uio_resid);
  634. dbg("write\n");
  635. KASSERT(vp->v_size == ip->size);
  636. switch (vp->v_type) {
  637. case VREG:
  638. if (ioflag & IO_APPEND)
  639. uio->uio_offset = ip->size;
  640. if ((ip->flags & APPEND) && uio->uio_offset != ip->size)
  641. return (EPERM);
  642. /* FALLTHROUGH */
  643. case VLNK:
  644. break;
  645. case VDIR:
  646. if ((ioflag & IO_SYNC) == 0)
  647. panic("chfs_write: nonsync dir write");
  648. break;
  649. default:
  650. panic("chfs_write: type");
  651. }
  652. chmp = ip->chmp;
  653. if (uio->uio_offset < 0 ||
  654. (u_int64_t)uio->uio_offset +
  655. uio->uio_resid > ump->um_maxfilesize) {
  656. dbg("uio->uio_offset = %lld | uio->uio_offset + "
  657. "uio->uio_resid (%llu) > ump->um_maxfilesize (%lld)\n",
  658. (long long)uio->uio_offset,
  659. (uint64_t)uio->uio_offset + uio->uio_resid,
  660. (long long)ump->um_maxfilesize);
  661. return (EFBIG);
  662. }
  663. /*
  664. * Maybe this should be above the vnode op call, but so long as
  665. * file servers have no limits, I don't think it matters.
  666. */
  667. l = curlwp;
  668. if (vp->v_type == VREG && l &&
  669. uio->uio_offset + uio->uio_resid >
  670. l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
  671. mutex_enter(proc_lock);
  672. psignal(l->l_proc, SIGXFSZ);
  673. mutex_exit(proc_lock);
  674. return (EFBIG);
  675. }
  676. if (uio->uio_resid == 0)
  677. return (0);
  678. //mutex_enter(&ip->inode_lock);
  679. fstrans_start(vp->v_mount, FSTRANS_SHARED);
  680. flags = ioflag & IO_SYNC ? B_SYNC : 0;
  681. async = vp->v_mount->mnt_flag & MNT_ASYNC;
  682. origoff = uio->uio_offset;
  683. resid = uio->uio_resid;
  684. osize = ip->size;
  685. error = 0;
  686. /*if ((ioflag & IO_JOURNALLOCKED) == 0) {
  687. error = UFS_WAPBL_BEGIN(vp->v_mount);
  688. if (error) {
  689. fstrans_done(vp->v_mount);
  690. return error;
  691. }
  692. }*/
  693. preallocoff = round_page(blkroundup(chmp,
  694. MAX(osize, uio->uio_offset)));
  695. aflag = ioflag & IO_SYNC ? B_SYNC : 0;
  696. nsize = MAX(osize, uio->uio_offset + uio->uio_resid);
  697. endallocoff = nsize - blkoff(chmp, nsize);
  698. /*
  699. * if we're increasing the file size, deal with expanding
  700. * the fragment if there is one.
  701. */
  702. if (nsize > osize && lblkno(chmp, osize) < NDADDR &&
  703. lblkno(chmp, osize) != lblkno(chmp, nsize) &&
  704. blkroundup(chmp, osize) != osize) {
  705. off_t eob;
  706. eob = blkroundup(chmp, osize);
  707. uvm_vnp_setwritesize(vp, eob);
  708. error = ufs_balloc_range(vp, osize, eob - osize, cred, aflag);
  709. if (error)
  710. goto out;
  711. if (flags & B_SYNC) {
  712. mutex_enter(vp->v_interlock);
  713. VOP_PUTPAGES(vp,
  714. trunc_page(osize & chmp->chm_fs_bmask),
  715. round_page(eob),
  716. PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
  717. }
  718. }
  719. while (uio->uio_resid > 0) {
  720. int ubc_flags = UBC_WRITE;
  721. bool overwrite; /* if we're overwrite a whole block */
  722. off_t newoff;
  723. if (ioflag & IO_DIRECT) {
  724. genfs_directio(vp, uio, ioflag | IO_JOURNALLOCKED);
  725. }
  726. oldoff = uio->uio_offset;
  727. blkoffset = blkoff(chmp, uio->uio_offset);
  728. bytelen = MIN(chmp->chm_fs_bsize - blkoffset, uio->uio_resid);
  729. if (bytelen == 0) {
  730. break;
  731. }
  732. /*
  733. * if we're filling in a hole, allocate the blocks now and
  734. * initialize the pages first. if we're extending the file,
  735. * we can safely allocate blocks without initializing pages
  736. * since the new blocks will be inaccessible until the write
  737. * is complete.
  738. */
  739. overwrite = uio->uio_offset >= preallocoff &&
  740. uio->uio_offset < endallocoff;
  741. if (!overwrite && (vp->v_vflag & VV_MAPPED) == 0 &&
  742. blkoff(chmp, uio->uio_offset) == 0 &&
  743. (uio->uio_offset & PAGE_MASK) == 0) {
  744. vsize_t len;
  745. len = trunc_page(bytelen);
  746. len -= blkoff(chmp, len);
  747. if (len > 0) {
  748. overwrite = true;
  749. bytelen = len;
  750. }
  751. }
  752. newoff = oldoff + bytelen;
  753. if (vp->v_size < newoff) {
  754. uvm_vnp_setwritesize(vp, newoff);
  755. }
  756. if (!overwrite) {
  757. error = ufs_balloc_range(vp, uio->uio_offset, bytelen,
  758. cred, aflag);
  759. if (error)
  760. break;
  761. } else {
  762. genfs_node_wrlock(vp);
  763. error = GOP_ALLOC(vp, uio->uio_offset, bytelen,
  764. aflag, cred);
  765. genfs_node_unlock(vp);
  766. if (error)
  767. break;
  768. ubc_flags |= UBC_FAULTBUSY;
  769. }
  770. /*
  771. * copy the data.
  772. */
  773. ubc_flags |= UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
  774. error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
  775. IO_ADV_DECODE(ioflag), ubc_flags);
  776. /*
  777. * update UVM's notion of the size now that we've
  778. * copied the data into the vnode's pages.
  779. *
  780. * we should update the size even when uiomove failed.
  781. */
  782. if (vp->v_size < newoff) {
  783. uvm_vnp_setsize(vp, newoff);
  784. extended = 1;
  785. }
  786. if (error)
  787. break;
  788. /*
  789. * flush what we just wrote if necessary.
  790. * XXXUBC simplistic async flushing.
  791. */
  792. if (!async && oldoff >> 16 != uio->uio_offset >> 16) {
  793. mutex_enter(vp->v_interlock);
  794. error = VOP_PUTPAGES(vp, (oldoff >> 16) << 16,
  795. (uio->uio_offset >> 16) << 16,
  796. PGO_CLEANIT | PGO_JOURNALLOCKED);
  797. if (error)
  798. break;
  799. }
  800. }
  801. out:
  802. if (error == 0 && ioflag & IO_SYNC) {
  803. mutex_enter(vp->v_interlock);
  804. error = VOP_PUTPAGES(vp,
  805. trunc_page(origoff & chmp->chm_fs_bmask),
  806. round_page(blkroundup(chmp, uio->uio_offset)),
  807. PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
  808. }
  809. ip->iflag |= IN_CHANGE | IN_UPDATE;
  810. if (resid > uio->uio_resid && ap->a_cred &&
  811. kauth_authorize_generic(ap->a_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
  812. ip->mode &= ~(ISUID | ISGID);
  813. }
  814. if (resid > uio->uio_resid)
  815. VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
  816. if (error) {
  817. (void) UFS_TRUNCATE(vp, osize, ioflag & IO_SYNC, ap->a_cred);
  818. uio->uio_offset -= resid - uio->uio_resid;
  819. uio->uio_resid = resid;
  820. } else if (resid > uio->uio_resid && (ioflag & IO_SYNC) == IO_SYNC)
  821. error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
  822. //XXX hack, i write the next line after i know ip->i_size and vp->v_size don't equal
  823. chfs_set_vnode_size(vp, vp->v_size);
  824. //dbg("end file size (vp): %llu\n", (unsigned long long)vp->v_size);
  825. //dbg("end file size (ip): %llu\n", (unsigned long long)ip->i_size);
  826. KASSERT(vp->v_size == ip->size);
  827. fstrans_done(vp->v_mount);
  828. mutex_enter(&chmp->chm_lock_mountfields);
  829. error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
  830. mutex_exit(&chmp->chm_lock_mountfields);
  831. //mutex_exit(&ip->inode_lock);
  832. //dbg("end\n");
  833. return (error);
  834. }
  835. /* --------------------------------------------------------------------- */
  836. int
  837. chfs_fsync(void *v)
  838. {
  839. //dbg("fsync\n");
  840. struct vop_fsync_args /* {
  841. struct vnode *a_vp;
  842. kauth_cred_t a_cred;
  843. int a_flags;
  844. off_t offlo;
  845. off_t offhi;
  846. } */ *ap = v;
  847. struct vnode *vp = ap->a_vp;
  848. int wait;
  849. if (ap->a_flags & FSYNC_CACHE) {
  850. return ENODEV;
  851. }
  852. wait = (ap->a_flags & FSYNC_WAIT) != 0;
  853. vflushbuf(vp, wait);
  854. //struct chfs_inode *ip = VTOI(vp);
  855. //chfs_set_vnode_size(vp, ip->write_size);
  856. return 0;
  857. }
  858. /* --------------------------------------------------------------------- */
  859. int
  860. chfs_remove(void *v)
  861. {
  862. struct vnode *dvp = ((struct vop_remove_args *) v)->a_dvp;
  863. struct vnode *vp = ((struct vop_remove_args *) v)->a_vp;
  864. struct componentname *cnp = (((struct vop_remove_args *) v)->a_cnp);
  865. dbg("remove\n");
  866. KASSERT(VOP_ISLOCKED(dvp));
  867. KASSERT(VOP_ISLOCKED(vp));
  868. struct chfs_inode *ip = VTOI(vp);
  869. struct chfs_inode *parent = VTOI(dvp);
  870. int error = 0;
  871. KASSERT(ip->chvc->vno != ip->chvc->pvno);
  872. error = chfs_do_unlink(ip,
  873. parent, cnp->cn_nameptr, cnp->cn_namelen);
  874. vput(dvp);
  875. vput(vp);
  876. return error;
  877. }
  878. /* --------------------------------------------------------------------- */
  879. int
  880. chfs_link(void *v)
  881. {
  882. struct vnode *dvp = ((struct vop_link_args *) v)->a_dvp;
  883. struct vnode *vp = ((struct vop_link_args *) v)->a_vp;
  884. struct componentname *cnp = ((struct vop_link_args *) v)->a_cnp;
  885. struct chfs_inode *ip, *parent;
  886. int error = 0;
  887. if (vp->v_type == VDIR) {
  888. VOP_ABORTOP(dvp, cnp);
  889. error = EISDIR;
  890. goto out;
  891. }
  892. if (dvp->v_mount != vp->v_mount) {
  893. VOP_ABORTOP(dvp, cnp);
  894. error = EXDEV;
  895. goto out;
  896. }
  897. if (dvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE))) {
  898. VOP_ABORTOP(dvp, cnp);
  899. goto out;
  900. }
  901. parent = VTOI(dvp);
  902. ip = VTOI(vp);
  903. error = chfs_do_link(ip,
  904. parent, cnp->cn_nameptr, cnp->cn_namelen, vp->v_type);
  905. if (dvp != vp)
  906. VOP_UNLOCK(vp);
  907. out:
  908. vput(dvp);
  909. return error;
  910. }
  911. /* --------------------------------------------------------------------- */
  912. int
  913. chfs_rename(void *v)
  914. {
  915. struct vnode *fdvp = ((struct vop_rename_args *) v)->a_fdvp;
  916. struct vnode *fvp = ((struct vop_rename_args *) v)->a_fvp;
  917. struct componentname *fcnp = ((struct vop_rename_args *) v)->a_fcnp;
  918. struct vnode *tdvp = ((struct vop_rename_args *) v)->a_tdvp;
  919. struct vnode *tvp = ((struct vop_rename_args *) v)->a_tvp;
  920. struct componentname *tcnp = ((struct vop_rename_args *) v)->a_tcnp;
  921. struct chfs_inode *oldparent, *old;
  922. struct chfs_inode *newparent;
  923. struct chfs_dirent *fd;//, *oldfd;
  924. struct chfs_inode *ip;
  925. int error = 0;
  926. dbg("rename\n");
  927. KASSERT(VOP_ISLOCKED(tdvp));
  928. KASSERT(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp) == LK_EXCLUSIVE));
  929. oldparent = VTOI(fdvp);
  930. old = VTOI(fvp);
  931. newparent = VTOI(tdvp);
  932. if (tvp) {
  933. dbg("tvp not null\n");
  934. ip = VTOI(tvp);
  935. if (tvp->v_type == VDIR) {
  936. //TODO: lock
  937. // fd = ip->dents;
  938. // while (fd) {
  939. TAILQ_FOREACH(fd, &ip->dents, fds) {
  940. if (fd->vno) {
  941. //TODO: unlock
  942. error = ENOTEMPTY;
  943. goto out_unlocked;
  944. }
  945. // fd = fd->next;
  946. }
  947. //TODO: unlock
  948. }
  949. error = chfs_do_unlink(ip,
  950. newparent, tcnp->cn_nameptr, tcnp->cn_namelen);
  951. vput(tvp);
  952. }
  953. VFS_VGET(tdvp->v_mount, old->ino, &tvp);
  954. ip = VTOI(tvp);
  955. // for (oldfd = oldparent->dents;
  956. // oldfd->vno != old->ino;
  957. // oldfd = oldfd->next);
  958. error = chfs_do_link(ip,
  959. newparent, tcnp->cn_nameptr, tcnp->cn_namelen, tvp->v_type);
  960. error = chfs_do_unlink(old,
  961. oldparent, fcnp->cn_nameptr, fcnp->cn_namelen);
  962. //out:
  963. // if (fchnode != tchnode)
  964. // VOP_UNLOCK(fdvp, 0);
  965. out_unlocked:
  966. // Release target nodes.
  967. if (tdvp == tvp)
  968. vrele(tdvp);
  969. else
  970. vput(tdvp);
  971. if (tvp != NULL)
  972. vput(tvp);
  973. // Release source nodes.
  974. vrele(fdvp);
  975. vrele(fvp);
  976. return error;
  977. }
  978. /* --------------------------------------------------------------------- */
  979. int
  980. chfs_mkdir(void *v)
  981. {
  982. struct vnode *dvp = ((struct vop_mkdir_args *) v)->a_dvp;
  983. struct vnode **vpp = ((struct vop_mkdir_args *)v)->a_vpp;
  984. struct componentname *cnp = ((struct vop_mkdir_args *) v)->a_cnp;
  985. struct vattr *vap = ((struct vop_mkdir_args *) v)->a_vap;
  986. dbg("mkdir()\n");
  987. int mode;
  988. mode = vap->va_mode & ACCESSPERMS;
  989. if ((mode & IFMT) == 0) {
  990. mode |= IFDIR;
  991. }
  992. KASSERT(vap->va_type == VDIR);
  993. return chfs_makeinode(mode, dvp, vpp, cnp, VDIR);
  994. }
  995. /* --------------------------------------------------------------------- */
  996. int
  997. chfs_rmdir(void *v)
  998. {
  999. struct vnode *dvp = ((struct vop_rmdir_args *) v)->a_dvp;
  1000. struct vnode *vp = ((struct vop_rmdir_args *) v)->a_vp;
  1001. struct componentname *cnp = ((struct vop_rmdir_args *) v)->a_cnp;
  1002. dbg("rmdir()\n");
  1003. KASSERT(VOP_ISLOCKED(dvp));
  1004. KASSERT(VOP_ISLOCKED(vp));
  1005. struct chfs_inode *ip = VTOI(vp);
  1006. struct chfs_inode *parent = VTOI(dvp);
  1007. struct chfs_dirent *fd;
  1008. int error = 0;
  1009. if (vp->v_type != VDIR) {
  1010. error = ENOTDIR;
  1011. goto out;
  1012. }
  1013. KASSERT(ip->chvc->vno != ip->chvc->pvno);
  1014. // for (fd = ip->dents; fd; fd = fd->next) {
  1015. TAILQ_FOREACH(fd, &ip->dents, fds) {
  1016. if (fd->vno) {
  1017. error = ENOTEMPTY;
  1018. goto out;
  1019. }
  1020. }
  1021. error = chfs_do_unlink(ip,
  1022. parent, cnp->cn_nameptr, cnp->cn_namelen);
  1023. out:
  1024. vput(dvp);
  1025. vput(vp);
  1026. return error;
  1027. }
  1028. /* --------------------------------------------------------------------- */
  1029. int
  1030. chfs_symlink(void *v)
  1031. {
  1032. struct vnode *dvp = ((struct vop_symlink_args *) v)->a_dvp;
  1033. struct vnode **vpp = ((struct vop_symlink_args *) v)->a_vpp;
  1034. struct componentname *cnp = ((struct vop_symlink_args *) v)->a_cnp;
  1035. struct vattr *vap = ((struct vop_symlink_args *) v)->a_vap;
  1036. char *target = ((struct vop_symlink_args *) v)->a_target;
  1037. struct ufsmount *ump;
  1038. struct chfs_mount *chmp;
  1039. struct vnode *vp;
  1040. struct chfs_inode *ip;
  1041. int len, err;
  1042. struct chfs_full_dnode *fd;
  1043. struct buf *bp;
  1044. dbg("symlink()\n");
  1045. ump = VFSTOUFS(dvp->v_mount);
  1046. chmp = ump->um_chfs;
  1047. err = chfs_makeinode(IFLNK | vap->va_mode, dvp, vpp, cnp, VLNK);
  1048. if (err)
  1049. return (err);
  1050. VN_KNOTE(dvp, NOTE_WRITE);
  1051. vp = *vpp;
  1052. len = strlen(target);
  1053. ip = VTOI(vp);
  1054. /* TODO max symlink len instead of "100" */
  1055. if (len < 100) {
  1056. ip->target = kmem_alloc(len, KM_SLEEP);
  1057. memcpy(ip->target, target, len);
  1058. chfs_set_vnode_size(vp, len);
  1059. ip->iflag |= IN_CHANGE | IN_UPDATE;
  1060. bp = getiobuf(vp, true);
  1061. bp->b_bufsize = bp->b_resid = len;
  1062. bp->b_data = kmem_alloc(len, KM_SLEEP);
  1063. memcpy(bp->b_data, target, len);
  1064. bp->b_blkno = 0;
  1065. fd = chfs_alloc_full_dnode();
  1066. mutex_enter(&chmp->chm_lock_mountfields);
  1067. err = chfs_write_flash_dnode(chmp, vp, bp, fd);
  1068. if (err) {
  1069. mutex_exit(&chmp->chm_lock_mountfields);
  1070. goto out;
  1071. }
  1072. err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
  1073. if (err) {
  1074. mutex_exit(&chmp->chm_lock_mountfields);
  1075. goto out;
  1076. }
  1077. mutex_exit(&chmp->chm_lock_mountfields);
  1078. kmem_free(bp->b_data, len);
  1079. putiobuf(bp);
  1080. uvm_vnp_setsize(vp, len);
  1081. } else {
  1082. err = vn_rdwr(UIO_WRITE, vp, target, len, (off_t)0,
  1083. UIO_SYSSPACE, IO_NODELOCKED, cnp->cn_cred,
  1084. (size_t *)0, NULL);
  1085. }
  1086. out:
  1087. if (err)
  1088. vput(vp);
  1089. return (err);
  1090. }
  1091. /* --------------------------------------------------------------------- */
  1092. int
  1093. chfs_readdir(void *v)
  1094. {
  1095. struct vnode *vp = ((struct vop_readdir_args *) v)->a_vp;
  1096. struct uio *uio = ((struct vop_readdir_args *) v)->a_uio;
  1097. int *eofflag = ((struct vop_readdir_args *) v)->a_eofflag;
  1098. int error = 0;
  1099. off_t skip, offset;
  1100. struct chfs_inode *ip;
  1101. struct chfs_dirent *fd;
  1102. struct ufsmount *ump;
  1103. struct chfs_mount *chmp;
  1104. struct chfs_vnode_cache *chvc;
  1105. KASSERT(VOP_ISLOCKED(vp));
  1106. /* This operation only makes sense on directory nodes. */
  1107. if (vp->v_type != VDIR) {
  1108. error = ENOTDIR;
  1109. goto out;
  1110. }
  1111. ip = VTOI(vp);
  1112. /* uiomove in chfs_filldir automatically increments the
  1113. * uio_offset by an arbitrary size, so we discard any change
  1114. * to uio_offset and set it to our own value on return
  1115. */
  1116. offset = uio->uio_offset;
  1117. if (offset == CHFS_OFFSET_DOT) {
  1118. error = chfs_filldir(uio, ip->ino, ".", 1, VDIR);
  1119. if (error == -1) {
  1120. error = 0;
  1121. goto outok;
  1122. } else if (error != 0)
  1123. goto outok;
  1124. offset = CHFS_OFFSET_DOTDOT;
  1125. }
  1126. if (offset == CHFS_OFFSET_DOTDOT) {
  1127. ump = VFSTOUFS(vp->v_mount);
  1128. chmp = ump->um_chfs;
  1129. mutex_enter(&chmp->chm_lock_vnocache);
  1130. chvc = chfs_vnode_cache_get(chmp, ip->ino);
  1131. mutex_exit(&chmp->chm_lock_vnocache);
  1132. error = chfs_filldir(uio, chvc->pvno, "..", 2, VDIR);
  1133. if (error == -1) {
  1134. error = 0;
  1135. goto outok;
  1136. } else if (error != 0) {
  1137. goto outok;
  1138. }
  1139. if (TAILQ_EMPTY(&ip->dents)) {
  1140. offset = CHFS_OFFSET_EOF;
  1141. } else {
  1142. offset = CHFS_OFFSET_FIRST;
  1143. }
  1144. }
  1145. if (offset != CHFS_OFFSET_EOF) {
  1146. skip = offset - CHFS_OFFSET_FIRST;
  1147. TAILQ_FOREACH(fd, &ip->dents, fds) {
  1148. /* seek to offset by skipping items */
  1149. /* XXX race conditions by changed dirent? */
  1150. if (skip > 0) {
  1151. skip--;
  1152. continue;
  1153. }
  1154. if (fd->vno != 0) {
  1155. error = chfs_filldir(uio, fd->vno,
  1156. fd->name, fd->nsize, fd->type);
  1157. if (error == -1) {
  1158. error = 0;
  1159. goto outok;
  1160. } else if (error != 0) {
  1161. dbg("err %d\n", error);
  1162. goto outok;
  1163. }
  1164. }
  1165. offset++;
  1166. }
  1167. }
  1168. offset = CHFS_OFFSET_EOF;
  1169. outok:
  1170. uio->uio_offset = offset;
  1171. if (eofflag != NULL) {
  1172. *eofflag = (error == 0 &&
  1173. uio->uio_offset == CHFS_OFFSET_EOF);
  1174. }
  1175. out:
  1176. KASSERT(VOP_ISLOCKED(vp));
  1177. return error;
  1178. }
  1179. /* --------------------------------------------------------------------- */
  1180. int
  1181. chfs_readlink(void *v)
  1182. {
  1183. struct vnode *vp = ((struct vop_readlink_args *) v)->a_vp;
  1184. struct uio *uio = ((struct vop_readlink_args *) v)->a_uio;
  1185. kauth_cred_t cred = ((struct vop_readlink_args *) v)->a_cred;
  1186. struct chfs_inode *ip = VTOI(vp);
  1187. dbg("readlink()\n");
  1188. /* TODO max symlink len instead of "100" */
  1189. if (ip->size < 100) {
  1190. uiomove(ip->target, ip->size, uio);
  1191. return (0);
  1192. }
  1193. return (VOP_READ(vp, uio, 0, cred));
  1194. }
  1195. /* --------------------------------------------------------------------- */
  1196. int
  1197. chfs_inactive(void *v)
  1198. {
  1199. struct vnode *vp = ((struct vop_inactive_args *) v)->a_vp;
  1200. struct chfs_inode *ip = VTOI(vp);
  1201. struct chfs_vnode_cache *chvc;
  1202. dbg("inactive | vno: %llu\n", (unsigned long long)ip->ino);
  1203. KASSERT(VOP_ISLOCKED(vp));
  1204. if (ip->ino) {
  1205. chvc = ip->chvc;
  1206. if (chvc->nlink)
  1207. *((struct vop_inactive_args *) v)->a_recycle = 0;
  1208. } else {
  1209. *((struct vop_inactive_args *) v)->a_recycle = 1;
  1210. }
  1211. VOP_UNLOCK(vp);
  1212. return 0;
  1213. }
  1214. /* --------------------------------------------------------------------- */
  1215. int
  1216. chfs_reclaim(void *v)
  1217. {
  1218. struct vop_reclaim_args *ap = v;
  1219. struct vnode *vp = ap->a_vp;
  1220. struct chfs_inode *ip = VTOI(vp);
  1221. struct chfs_mount *chmp = ip->chmp;
  1222. struct chfs_dirent *fd;
  1223. //dbg("reclaim() | ino: %llu\n", (unsigned long long)ip->ino);
  1224. //mutex_enter(&ip->inode_lock);
  1225. mutex_enter(&chmp->chm_lock_vnocache);
  1226. chfs_vnode_cache_set_state(chmp,
  1227. ip->chvc, VNO_STATE_CHECKEDABSENT);
  1228. mutex_exit(&chmp->chm_lock_vnocache);
  1229. chfs_update(vp, NULL, NULL, UPDATE_CLOSE);
  1230. if (vp->v_type == VREG || vp->v_type == VLNK || vp->v_type == VCHR ||
  1231. vp->v_type == VBLK || vp->v_type == VFIFO || vp->v_type == VSOCK)
  1232. chfs_kill_fragtree(&ip->fragtree);
  1233. fd = TAILQ_FIRST(&ip->dents);
  1234. while(fd) {
  1235. TAILQ_REMOVE(&ip->dents, fd, fds);
  1236. chfs_free_dirent(fd);
  1237. fd = TAILQ_FIRST(&ip->dents);
  1238. }
  1239. //mutex_exit(&ip->inode_lock);
  1240. //mutex_destroy(&ip->inode_lock);
  1241. cache_purge(vp);
  1242. if (ip->devvp) {
  1243. vrele(ip->devvp);
  1244. ip->devvp = 0;
  1245. }
  1246. chfs_ihashrem(ip);
  1247. genfs_node_destroy(vp);
  1248. pool_put(&chfs_inode_pool, vp->v_data);
  1249. vp->v_data = NULL;
  1250. return (0);
  1251. }
  1252. /* --------------------------------------------------------------------- */
  1253. int
  1254. chfs_advlock(void *v)
  1255. {
  1256. //struct vnode *vp = ((struct vop_advlock_args *) v)->a_vp;
  1257. dbg("advlock()\n");
  1258. /*
  1259. struct chfs_node *node;
  1260. node = VP_TO_CHFS_NODE(vp);
  1261. return lf_advlock(v, &node->chn_lockf, node->chn_size);
  1262. */
  1263. return 0;
  1264. }
  1265. /* --------------------------------------------------------------------- */
  1266. int
  1267. chfs_strategy(void *v)
  1268. {
  1269. struct vop_strategy_args /* {
  1270. const struct vnodeop_desc *a_desc;
  1271. struct vnode *a_vp;
  1272. struct buf *a_bp;
  1273. } */ *ap = v;
  1274. struct chfs_full_dnode *fd;
  1275. struct buf *bp = ap->a_bp;
  1276. struct vnode *vp = ap->a_vp;
  1277. struct chfs_inode *ip = VTOI(vp);
  1278. struct chfs_mount *chmp = ip->chmp;
  1279. int read = (bp->b_flags & B_READ) ? 1 : 0;
  1280. int err = 0;
  1281. /* dbg("bp dump:\n");
  1282. dbg(" ->b_bcount: %d\n", bp->b_bcount);
  1283. dbg(" ->b_resid: %d\n", bp->b_resid);
  1284. dbg(" ->b_blkno: %llu\n", (unsigned long long)bp->b_blkno);
  1285. dbg(" ->b_error: %d\n", bp->b_error);*/
  1286. if (read) {
  1287. err = chfs_read_data(chmp, vp, bp);
  1288. } else {
  1289. fd = chfs_alloc_full_dnode();
  1290. mutex_enter(&chmp->chm_lock_mountfields);
  1291. err = chfs_write_flash_dnode(chmp, vp, bp, fd);
  1292. if (err) {
  1293. mutex_exit(&chmp->chm_lock_mountfields);
  1294. goto out;
  1295. }
  1296. err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
  1297. /*if (err) {
  1298. mutex_exit(&chmp->chm_lock_mountfields);
  1299. goto out;
  1300. }*/
  1301. mutex_exit(&chmp->chm_lock_mountfields);
  1302. }
  1303. out:
  1304. biodone(bp);
  1305. //dbg("end\n");
  1306. return err;
  1307. }
  1308. int
  1309. chfs_bmap(void *v)
  1310. {
  1311. struct vop_bmap_args /* {
  1312. struct vnode *a_vp;
  1313. daddr_t a_bn;
  1314. struct vnode **a_vpp;
  1315. daddr_t *a_bnp;
  1316. int *a_runp;
  1317. int *a_runb;
  1318. } */ *ap = v;
  1319. if (ap->a_vpp != NULL)
  1320. *ap->a_vpp = ap->a_vp;
  1321. if (ap->a_bnp != NULL)
  1322. *ap->a_bnp = ap->a_bn;
  1323. if (ap->a_runp != NULL)
  1324. *ap->a_runp = 0;
  1325. return (0);
  1326. }
  1327. /*
  1328. * vnode operations vector used for files stored in a chfs file system.
  1329. */
  1330. int
  1331. (**chfs_vnodeop_p)(void *);
  1332. const struct vnodeopv_entry_desc chfs_vnodeop_entries[] =
  1333. {
  1334. { &vop_default_desc, vn_default_error },
  1335. { &vop_lookup_desc, chfs_lookup },
  1336. { &vop_create_desc, chfs_create },
  1337. { &vop_mknod_desc, chfs_mknod },
  1338. { &vop_open_desc, chfs_open },
  1339. { &vop_close_desc, chfs_close },
  1340. { &vop_access_desc, chfs_access },
  1341. { &vop_getattr_desc, chfs_getattr },
  1342. { &vop_setattr_desc, chfs_setattr },
  1343. { &vop_read_desc, chfs_read },
  1344. { &vop_write_desc, chfs_write },
  1345. { &vop_ioctl_desc, genfs_enoioctl },
  1346. { &vop_fcntl_desc, genfs_fcntl },
  1347. { &vop_poll_desc, genfs_poll },
  1348. { &vop_kqfilter_desc, genfs_kqfilter },
  1349. { &vop_revoke_desc, genfs_revoke },
  1350. { &vop_mmap_desc, genfs_mmap },
  1351. { &vop_fsync_desc, chfs_fsync },
  1352. { &vop_seek_desc, genfs_seek },
  1353. { &vop_remove_desc, chfs_remove },
  1354. { &vop_link_desc, chfs_link },
  1355. { &vop_rename_desc, chfs_rename },
  1356. { &vop_mkdir_desc, chfs_mkdir },
  1357. { &vop_rmdir_desc, chfs_rmdir },
  1358. { &vop_symlink_desc, chfs_symlink },
  1359. { &vop_readdir_desc, chfs_readdir },
  1360. { &vop_readlink_desc, chfs_readlink },
  1361. { &vop_abortop_desc, genfs_abortop },
  1362. { &vop_inactive_desc, chfs_inactive },
  1363. { &vop_reclaim_desc, chfs_reclaim },
  1364. { &vop_lock_desc, genfs_lock },
  1365. { &vop_unlock_desc, genfs_unlock },
  1366. { &vop_bmap_desc, chfs_bmap },
  1367. { &vop_strategy_desc, chfs_strategy },
  1368. { &vop_print_desc, ufs_print },
  1369. { &vop_pathconf_desc, ufs_pathconf },
  1370. { &vop_islocked_desc, genfs_islocked },
  1371. { &vop_advlock_desc, chfs_advlock },
  1372. { &vop_bwrite_desc, vn_bwrite },
  1373. { &vop_getpages_desc, genfs_getpages },
  1374. { &vop_putpages_desc, genfs_putpages },
  1375. { NULL, NULL } };
  1376. const struct vnodeopv_desc chfs_vnodeop_opv_desc =
  1377. { &chfs_vnodeop_p, chfs_vnodeop_entries };
  1378. /* --------------------------------------------------------------------- */
  1379. /*
  1380. * vnode operations vector used for special devices stored in a chfs
  1381. * file system.
  1382. */
  1383. int
  1384. (**chfs_specop_p)(void *);
  1385. const struct vnodeopv_entry_desc chfs_specop_entries[] =
  1386. {
  1387. { &vop_default_desc, vn_default_error },
  1388. { &vop_lookup_desc, spec_lookup },
  1389. { &vop_create_desc, spec_create },
  1390. { &vop_mknod_desc, spec_mknod },
  1391. { &vop_open_desc, spec_open },
  1392. { &vop_close_desc, ufsspec_close },
  1393. { &vop_access_desc, chfs_access },
  1394. { &vop_getattr_desc, chfs_getattr },
  1395. { &vop_setattr_desc, chfs_setattr },
  1396. { &vop_read_desc, chfs_read },
  1397. { &vop_write_desc, chfs_write },
  1398. { &vop_ioctl_desc, spec_ioctl },
  1399. { &vop_fcntl_desc, genfs_fcntl },
  1400. { &vop_poll_desc, spec_poll },
  1401. { &vop_kqfilter_desc, spec_kqfilter },
  1402. { &vop_revoke_desc, spec_revoke },
  1403. { &vop_mmap_desc, spec_mmap },
  1404. { &vop_fsync_desc, spec_fsync },
  1405. { &vop_seek_desc, spec_seek },
  1406. { &vop_remove_desc, spec_remove },
  1407. { &vop_link_desc, spec_link },
  1408. { &vop_rename_desc, spec_rename },
  1409. { &vop_mkdir_desc, spec_mkdir },
  1410. { &vop_rmdir_desc, spec_rmdir },
  1411. { &vop_symlink_desc, spec_symlink },
  1412. { &vop_readdir_desc, spec_readdir },
  1413. { &vop_readlink_desc, spec_readlink },
  1414. { &vop_abortop_desc, spec_abortop },
  1415. { &vop_inactive_desc, chfs_inactive },
  1416. { &vop_reclaim_desc, chfs_reclaim },
  1417. { &vop_lock_desc, genfs_lock },
  1418. { &vop_unlock_desc, genfs_unlock },
  1419. { &vop_bmap_desc, spec_bmap },
  1420. { &vop_strategy_desc, spec_strategy },
  1421. { &vop_print_desc, ufs_print },
  1422. { &vop_pathconf_desc, spec_pathconf },
  1423. { &vop_islocked_desc, genfs_islocked },
  1424. { &vop_advlock_desc, spec_advlock },
  1425. { &vop_bwrite_desc, vn_bwrite },
  1426. { &vop_getpages_desc, spec_getpages },
  1427. { &vop_putpages_desc, spec_putpages },
  1428. { NULL, NULL } };
  1429. const struct vnodeopv_desc chfs_specop_opv_desc =
  1430. { &chfs_specop_p, chfs_specop_entries };
  1431. /* --------------------------------------------------------------------- */
  1432. /*
  1433. * vnode operations vector used for fifos stored in a chfs file system.
  1434. */
  1435. int
  1436. (**chfs_fifoop_p)(void *);
  1437. const struct vnodeopv_entry_desc chfs_fifoop_entries[] =
  1438. {
  1439. { &vop_default_desc, vn_default_error },
  1440. { &vop_lookup_desc, vn_fifo_bypass },
  1441. { &vop_create_desc, vn_fifo_bypass },
  1442. { &vop_mknod_desc, vn_fifo_bypass },
  1443. { &vop_open_desc, vn_fifo_bypass },
  1444. { &vop_close_desc, ufsfifo_close },
  1445. { &vop_access_desc, chfs_access },
  1446. { &vop_getattr_desc, chfs_getattr },
  1447. { &vop_setattr_desc, chfs_setattr },
  1448. { &vop_read_desc, ufsfifo_read },
  1449. { &vop_write_desc, ufsfifo_write },
  1450. { &vop_ioctl_desc, vn_fifo_bypass },
  1451. { &vop_fcntl_desc, genfs_fcntl },
  1452. { &vop_poll_desc, vn_fifo_bypass },
  1453. { &vop_kqfilter_desc, vn_fifo_bypass },
  1454. { &vop_revoke_desc, vn_fifo_bypass },
  1455. { &vop_mmap_desc, vn_fifo_bypass },
  1456. { &vop_fsync_desc, vn_fifo_bypass },
  1457. { &vop_seek_desc, vn_fifo_bypass },
  1458. { &vop_remove_desc, vn_fifo_bypass },
  1459. { &vop_link_desc, vn_fifo_bypass },
  1460. { &vop_rename_desc, vn_fifo_bypass },
  1461. { &vop_mkdir_desc, vn_fifo_bypass },
  1462. { &vop_rmdir_desc, vn_fifo_bypass },
  1463. { &vop_symlink_desc, vn_fifo_bypass },
  1464. { &vop_readdir_desc, vn_fifo_bypass },
  1465. { &vop_readlink_desc, vn_fifo_bypass },
  1466. { &vop_abortop_desc, vn_fifo_bypass },
  1467. { &vop_inactive_desc, chfs_inactive },
  1468. { &vop_reclaim_desc, chfs_reclaim },
  1469. { &vop_lock_desc, genfs_lock },
  1470. { &vop_unlock_desc, genfs_unlock },
  1471. { &vop_bmap_desc, vn_fifo_bypass },
  1472. { &vop_strategy_desc, vn_fifo_bypass },
  1473. { &vop_print_desc, ufs_print },
  1474. { &vop_pathconf_desc, vn_fifo_bypass },
  1475. { &vop_islocked_desc, genfs_islocked },
  1476. { &vop_advlock_desc, vn_fifo_bypass },
  1477. { &vop_bwrite_desc, genfs_nullop },
  1478. { &vop_getpages_desc, genfs_badop },
  1479. { &vop_putpages_desc, vn_fifo_bypass },
  1480. { NULL, NULL } };
  1481. const struct vnodeopv_desc chfs_fifoop_opv_desc =
  1482. { &chfs_fifoop_p, chfs_fifoop_entries };