/core/10.5/fusefs/fuse_internal.h
C++ Header | 897 lines | 655 code | 164 blank | 78 comment | 81 complexity | 0ae5ecf12e6cd27f7bfa596de5b3a4a4 MD5 | raw file
1/* 2 * Copyright (C) 2006-2008 Google. All Rights Reserved. 3 * Amit Singh <singh@> 4 */ 5 6#ifndef _FUSE_INTERNAL_H_ 7#define _FUSE_INTERNAL_H_ 8 9#include <kern/clock.h> 10#include <sys/types.h> 11#include <sys/kauth.h> 12#include <sys/kernel_types.h> 13#include <sys/mount.h> 14#include <sys/stat.h> 15#include <sys/ubc.h> 16#include <sys/uio.h> 17#include <sys/vnode.h> 18#include <sys/xattr.h> 19 20#include <fuse_ioctl.h> 21#include "fuse_ipc.h" 22#include "fuse_kludges.h" 23#include "fuse_locking.h" 24#include "fuse_node.h" 25 26#if M_MACFUSE_ENABLE_KUNC 27#include <UserNotification/KUNCUserNotifications.h> 28#else 29enum { 30 kKUNCDefaultResponse = 0, 31 kKUNCAlternateResponse = 1, 32 kKUNCOtherResponse = 2, 33 kKUNCCancelResponse = 3 34}; 35#endif 36 37struct fuse_attr; 38struct fuse_data; 39struct fuse_dispatcher; 40struct fuse_filehandle; 41struct fuse_iov; 42struct fuse_ticket; 43 44/* time */ 45 46#define fuse_timespec_add(vvp, uvp) \ 47 do { \ 48 (vvp)->tv_sec += (uvp)->tv_sec; \ 49 (vvp)->tv_nsec += (uvp)->tv_nsec; \ 50 if ((vvp)->tv_nsec >= 1000000000) { \ 51 (vvp)->tv_sec++; \ 52 (vvp)->tv_nsec -= 1000000000; \ 53 } \ 54 } while (0) 55 56#define fuse_timespec_cmp(tvp, uvp, cmp) \ 57 (((tvp)->tv_sec == (uvp)->tv_sec) ? \ 58 ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ 59 ((tvp)->tv_sec cmp (uvp)->tv_sec)) 60 61/* miscellaneous */ 62 63#if M_MACFUSE_ENABLE_UNSUPPORTED 64extern const char *vnode_getname(vnode_t vp); 65extern void vnode_putname(const char *name); 66#endif /* M_MACFUSE_ENABLE_UNSUPPORTED */ 67 68static __inline__ 69int 70fuse_match_cred(kauth_cred_t daemoncred, kauth_cred_t requestcred) 71{ 72 if ((daemoncred->cr_uid == requestcred->cr_uid) && 73 (daemoncred->cr_uid == requestcred->cr_ruid) && 74 75 // THINK_ABOUT_THIS_LATER 76 // (daemoncred->cr_uid == requestcred->cr_svuid) && 77 78 (daemoncred->cr_groups[0] == requestcred->cr_groups[0]) && 79 (daemoncred->cr_groups[0] == requestcred->cr_rgid) && 80 (daemoncred->cr_groups[0] == requestcred->cr_svgid)) { 81 return 0; 82 } 83 84 return EPERM; 85} 86 87static __inline__ 88int 89fuse_vfs_context_issuser(vfs_context_t context) 90{ 91 return (vfs_context_ucred(context)->cr_uid == 0); 92} 93 94static __inline__ 95int 96fuse_isautocache_mp(mount_t mp) 97{ 98 return (fuse_get_mpdata(mp)->dataflags & FSESS_AUTO_CACHE); 99} 100 101#define fuse_isdeadfs_nop(vp) 0 102 103static __inline__ 104int 105fuse_isdeadfs_mp(mount_t mp) 106{ 107 return (fuse_get_mpdata(mp)->dataflags & FSESS_DEAD); 108} 109 110static __inline__ 111int 112fuse_isdeadfs(vnode_t vp) 113{ 114 if (VTOFUD(vp)->flag & FN_REVOKED) { 115 return 1; 116 } 117 118 return fuse_isdeadfs_mp(vnode_mount(vp)); 119} 120 121static __inline__ 122int 123fuse_isdeadfs_fs(vnode_t vp) 124{ 125 return fuse_isdeadfs_mp(vnode_mount(vp)); 126} 127 128static __inline__ 129int 130fuse_isdirectio(vnode_t vp) 131{ 132 /* Try global first. */ 133 if (fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_DIRECT_IO) { 134 return 1; 135 } 136 137 return (VTOFUD(vp)->flag & FN_DIRECT_IO); 138} 139 140static __inline__ 141int 142fuse_isdirectio_mp(mount_t mp) 143{ 144 return (fuse_get_mpdata(mp)->dataflags & FSESS_DIRECT_IO); 145} 146 147static __inline__ 148int 149fuse_isnoattrcache(vnode_t vp) 150{ 151 /* Try global first. */ 152 if (fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_NO_ATTRCACHE) { 153 return 1; 154 } 155 156 return 0; 157} 158 159static __inline__ 160int 161fuse_isnoattrcache_mp(mount_t mp) 162{ 163 return (fuse_get_mpdata(mp)->dataflags & FSESS_NO_ATTRCACHE); 164} 165 166static __inline__ 167int 168fuse_isnoreadahead(vnode_t vp) 169{ 170 /* Try global first. */ 171 if (fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_NO_READAHEAD) { 172 return 1; 173 } 174 175 /* In our model, direct_io implies no readahead. */ 176 return fuse_isdirectio(vp); 177} 178 179static __inline__ 180int 181fuse_isnosynconclose(vnode_t vp) 182{ 183 if (fuse_isdirectio(vp)) { 184 return 0; 185 } 186 187 return (fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_NO_SYNCONCLOSE); 188} 189 190static __inline__ 191int 192fuse_isnosyncwrites_mp(mount_t mp) 193{ 194 /* direct_io implies we won't have nosyncwrites. */ 195 if (fuse_isdirectio_mp(mp)) { 196 return 0; 197 } 198 199 return (fuse_get_mpdata(mp)->dataflags & FSESS_NO_SYNCWRITES); 200} 201 202static __inline__ 203void 204fuse_setnosyncwrites_mp(mount_t mp) 205{ 206 vfs_clearflags(mp, MNT_SYNCHRONOUS); 207 vfs_setflags(mp, MNT_ASYNC); 208 fuse_get_mpdata(mp)->dataflags |= FSESS_NO_SYNCWRITES; 209} 210 211static __inline__ 212void 213fuse_clearnosyncwrites_mp(mount_t mp) 214{ 215 if (!vfs_issynchronous(mp)) { 216 vfs_clearflags(mp, MNT_ASYNC); 217 vfs_setflags(mp, MNT_SYNCHRONOUS); 218 fuse_get_mpdata(mp)->dataflags &= ~FSESS_NO_SYNCWRITES; 219 } 220} 221 222static __inline__ 223int 224fuse_isnoubc(vnode_t vp) 225{ 226 /* Try global first. */ 227 if (fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_NO_UBC) { 228 return 1; 229 } 230 231 /* In our model, direct_io implies no UBC. */ 232 return fuse_isdirectio(vp); 233} 234 235static __inline__ 236int 237fuse_isnoubc_mp(mount_t mp) 238{ 239 return (fuse_get_mpdata(mp)->dataflags & FSESS_NO_UBC); 240} 241 242static __inline__ 243int 244fuse_isnegativevncache_mp(mount_t mp) 245{ 246 return (fuse_get_mpdata(mp)->dataflags & FSESS_NEGATIVE_VNCACHE); 247} 248 249static __inline__ 250int 251fuse_isnovncache(vnode_t vp) 252{ 253 /* Try global first. */ 254 if (fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_NO_VNCACHE) { 255 return 1; 256 } 257 258 /* In our model, direct_io implies no vncache for this vnode. */ 259 return fuse_isdirectio(vp); 260} 261 262static __inline__ 263int 264fuse_isnovncache_mp(mount_t mp) 265{ 266 return (fuse_get_mpdata(mp)->dataflags & FSESS_NO_VNCACHE); 267} 268 269static __inline__ 270int 271fuse_isextendedsecurity(vnode_t vp) 272{ 273 return (fuse_get_mpdata(vnode_mount(vp))->dataflags & \ 274 FSESS_EXTENDED_SECURITY); 275} 276 277static __inline__ 278int 279fuse_isextendedsecurity_mp(mount_t mp) 280{ 281 return (fuse_get_mpdata(mp)->dataflags & FSESS_EXTENDED_SECURITY); 282} 283 284static __inline__ 285int 286fuse_isdefaultpermissions(vnode_t vp) 287{ 288 return (fuse_get_mpdata(vnode_mount(vp))->dataflags & \ 289 FSESS_DEFAULT_PERMISSIONS); 290} 291 292static __inline__ 293int 294fuse_isdefaultpermissions_mp(mount_t mp) 295{ 296 return (fuse_get_mpdata(mp)->dataflags & FSESS_DEFAULT_PERMISSIONS); 297} 298 299static __inline__ 300int 301fuse_isdeferpermissions(vnode_t vp) 302{ 303 return (fuse_get_mpdata(vnode_mount(vp))->dataflags & \ 304 FSESS_DEFER_PERMISSIONS); 305} 306 307static __inline__ 308int 309fuse_isdeferpermissions_mp(mount_t mp) 310{ 311 return (fuse_get_mpdata(mp)->dataflags & FSESS_DEFER_PERMISSIONS); 312} 313 314static __inline__ 315int 316fuse_isxtimes(vnode_t vp) 317{ 318 return (fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_XTIMES); 319} 320 321static __inline__ 322int 323fuse_isxtimes_mp(mount_t mp) 324{ 325 return (fuse_get_mpdata(mp)->dataflags & FSESS_XTIMES); 326} 327 328static __inline__ 329int 330fuse_issparse_mp(mount_t mp) 331{ 332 return (fuse_get_mpdata(mp)->dataflags & FSESS_SPARSE); 333} 334 335static __inline__ 336uint32_t 337fuse_round_powerof2(uint32_t size) 338{ 339 uint32_t result = 512; 340 size = size & 0x7FFFFFFFU; /* clip at 2G */ 341 342 while (result < size) { 343 result <<= 1; 344 } 345 346 return result; 347} 348 349static __inline__ 350uint32_t 351fuse_round_size(uint32_t size, uint32_t b_min, uint32_t b_max) 352{ 353 uint32_t candidate = fuse_round_powerof2(size); 354 355 /* We assume that b_min and b_max will already be powers of 2. */ 356 357 if (candidate < b_min) { 358 candidate = b_min; 359 } 360 361 if (candidate > b_max) { 362 candidate = b_max; 363 } 364 365 return candidate; 366} 367 368static __inline__ 369int 370fuse_skip_apple_double_mp(mount_t mp, char *nameptr, long namelen) 371{ 372#define DS_STORE ".DS_Store" 373 int ismpoption = fuse_get_mpdata(mp)->dataflags & FSESS_NO_APPLEDOUBLE; 374 375 if (ismpoption && nameptr) { 376 /* This _will_ allow just "._", that is, a namelen of 2. */ 377 if (namelen > 2) { 378 if ((namelen == ((sizeof(DS_STORE)/sizeof(char)) - 1)) && 379 (bcmp(nameptr, DS_STORE, sizeof(DS_STORE)) == 0)) { 380 return 1; 381 } else if (nameptr[0] == '.' && nameptr[1] == '_') { 382 return 1; 383 } 384 } 385 } 386 387 return 0; 388} 389 390static __inline__ 391int 392fuse_blanket_deny(vnode_t vp, vfs_context_t context) 393{ 394 mount_t mp = vnode_mount(vp); 395 struct fuse_data *data = fuse_get_mpdata(mp); 396 int issuser = fuse_vfs_context_issuser(context); 397 int isvroot = vnode_isvroot(vp); 398 399 /* if allow_other is set */ 400 if (data->dataflags & FSESS_ALLOW_OTHER) { 401 return 0; 402 } 403 404 /* if allow_root is set */ 405 if (issuser && (data->dataflags & FSESS_ALLOW_ROOT)) { 406 return 0; 407 } 408 409 /* if this is the user who mounted the fs */ 410 if (fuse_match_cred(data->daemoncred, vfs_context_ucred(context)) == 0) { 411 return 0; 412 } 413 414 if (!(data->dataflags & FSESS_INITED) && isvroot && issuser) { 415 return 0; 416 } 417 418 if (fuse_isdeadfs(vp) && isvroot) { 419 return 0; 420 } 421 422 /* If kernel itself, allow. */ 423 if (vfs_context_pid(context) == 0) { 424 return 0; 425 } 426 427 return 1; 428} 429 430#define CHECK_BLANKET_DENIAL(vp, context, err) \ 431 { \ 432 if (fuse_blanket_deny(vp, context)) { \ 433 return err; \ 434 } \ 435 } 436 437/* access */ 438 439/* FN_ACCESS_NOOP is in fuse_node.h */ 440 441#define FACCESS_VA_VALID 0x00000001 442#define FACCESS_DO_ACCESS 0x00000002 443#define FACCESS_NOCHECKSPY 0x00000004 444#define FACCESS_FROM_VNOP 0x00000008 445 446struct fuse_access_param { 447 uid_t xuid; 448 gid_t xgid; 449 uint32_t facc_flags; 450}; 451 452int 453fuse_internal_access(vnode_t vp, 454 int action, 455 vfs_context_t context, 456 struct fuse_access_param *facp); 457 458/* attributes */ 459 460int 461fuse_internal_loadxtimes(vnode_t vp, struct vnode_attr *out_vap, 462 vfs_context_t context); 463 464int 465fuse_internal_attr_vat2fsai(mount_t mp, 466 vnode_t vp, 467 struct vnode_attr *vap, 468 struct fuse_setattr_in *fsai, 469 uint64_t *newsize); 470 471static __inline__ 472void 473fuse_internal_attr_fat2vat(vnode_t vp, 474 struct fuse_attr *fat, 475 struct vnode_attr *vap) 476{ 477 struct timespec t; 478 mount_t mp = vnode_mount(vp); 479 struct fuse_data *data = fuse_get_mpdata(mp); 480 struct fuse_vnode_data *fvdat = VTOFUD(vp); 481 482 VATTR_INIT(vap); 483 484 VATTR_RETURN(vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); 485 VATTR_RETURN(vap, va_fileid, fat->ino); 486 VATTR_RETURN(vap, va_linkid, fat->ino); 487 488 /* 489 * If we have asynchronous writes enabled, our local in-kernel size 490 * takes precedence over what the daemon thinks. 491 */ 492 /* ATTR_FUDGE_CASE */ 493 if (!vfs_issynchronous(mp)) { 494 fat->size = fvdat->filesize; 495 } 496 VATTR_RETURN(vap, va_data_size, fat->size); 497 498 /* 499 * The kernel will compute the following for us if we leave them 500 * untouched (and have sane values in statvfs): 501 * 502 * va_total_size 503 * va_data_alloc 504 * va_total_alloc 505 */ 506 if (fuse_issparse_mp(mp)) { 507 VATTR_RETURN(vap, va_data_alloc, fat->blocks); 508 } 509 510 t.tv_sec = (typeof(t.tv_sec))fat->atime; /* XXX: truncation */ 511 t.tv_nsec = fat->atimensec; 512 VATTR_RETURN(vap, va_access_time, t); 513 514 t.tv_sec = (typeof(t.tv_sec))fat->ctime; /* XXX: truncation */ 515 t.tv_nsec = fat->ctimensec; 516 VATTR_RETURN(vap, va_change_time, t); 517 518 t.tv_sec = (typeof(t.tv_sec))fat->mtime; /* XXX: truncation */ 519 t.tv_nsec = fat->mtimensec; 520 VATTR_RETURN(vap, va_modify_time, t); 521 522 t.tv_sec = (typeof(t.tv_sec))fat->crtime; /* XXX: truncation */ 523 t.tv_nsec = fat->crtimensec; 524 VATTR_RETURN(vap, va_create_time, t); 525 526 VATTR_RETURN(vap, va_mode, fat->mode & ~S_IFMT); 527 VATTR_RETURN(vap, va_nlink, fat->nlink); 528 VATTR_RETURN(vap, va_uid, fat->uid); 529 VATTR_RETURN(vap, va_gid, fat->gid); 530 VATTR_RETURN(vap, va_rdev, fat->rdev); 531 532 /* 533 * If the file in question has ACLs, we record that knowledge here: 534 * 535 * VTOFUD(vp)->flag |= FN_HAS_ACL; 536 */ 537 538 VATTR_RETURN(vap, va_type, IFTOVT(fat->mode)); 539 540 VATTR_RETURN(vap, va_iosize, data->iosize); 541 542 VATTR_RETURN(vap, va_flags, fat->flags); 543} 544 545static __inline__ 546void 547fuse_internal_attr_loadvap(vnode_t vp, struct vnode_attr *out_vap, 548 vfs_context_t context) 549{ 550 mount_t mp = vnode_mount(vp); 551 struct vnode_attr *in_vap = VTOVA(vp); 552 struct fuse_vnode_data *fvdat = VTOFUD(vp); 553 int purged = 0; 554 long hint = 0; 555 556 if (in_vap == out_vap) { 557 return; 558 } 559 560 VATTR_RETURN(out_vap, va_fsid, in_vap->va_fsid); 561 562 VATTR_RETURN(out_vap, va_fileid, in_vap->va_fileid); 563 VATTR_RETURN(out_vap, va_linkid, in_vap->va_linkid); 564 VATTR_RETURN(out_vap, va_gen, 565 (typeof(out_vap->va_gen))fvdat->generation); /* XXX: truncation */ 566 if (!vnode_isvroot(vp)) { 567 /* 568 * If we do return va_parentid for our root vnode, things get 569 * a bit too interesting for the Finder. 570 */ 571 VATTR_RETURN(out_vap, va_parentid, fvdat->parent_nodeid); 572 } 573 574 /* 575 * If we have asynchronous writes enabled, our local in-kernel size 576 * takes precedence over what the daemon thinks. 577 */ 578 /* ATTR_FUDGE_CASE */ 579 if (!vfs_issynchronous(mp)) { 580 /* Bring in_vap up to date if need be. */ 581 VATTR_RETURN(in_vap, va_data_size, fvdat->filesize); 582 } else { 583 /* The size might have changed remotely. */ 584 if (fvdat->filesize != (off_t)in_vap->va_data_size) { 585 hint |= NOTE_WRITE; 586 /* Remote size overrides what we have. */ 587 (void)ubc_msync(vp, (off_t)0, fvdat->filesize, (off_t*)0, 588 UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); 589 purged = 1; 590 if (fvdat->filesize > (off_t)in_vap->va_data_size) { 591 hint |= NOTE_EXTEND; 592 } 593 fvdat->filesize = in_vap->va_data_size; 594 ubc_setsize(vp, fvdat->filesize); 595 } 596 } 597 VATTR_RETURN(out_vap, va_data_size, in_vap->va_data_size); 598 599 if (fuse_issparse_mp(mp)) { 600 VATTR_RETURN(out_vap, va_data_alloc, in_vap->va_data_alloc); 601 } 602 603 VATTR_RETURN(out_vap, va_mode, in_vap->va_mode); 604 VATTR_RETURN(out_vap, va_nlink, in_vap->va_nlink); 605 VATTR_RETURN(out_vap, va_uid, in_vap->va_uid); 606 VATTR_RETURN(out_vap, va_gid, in_vap->va_gid); 607 VATTR_RETURN(out_vap, va_rdev, in_vap->va_rdev); 608 609 VATTR_RETURN(out_vap, va_type, in_vap->va_type); 610 611 VATTR_RETURN(out_vap, va_iosize, in_vap->va_iosize); 612 613 VATTR_RETURN(out_vap, va_flags, in_vap->va_flags); 614 615 VATTR_RETURN(out_vap, va_access_time, in_vap->va_access_time); 616 VATTR_RETURN(out_vap, va_change_time, in_vap->va_change_time); 617 VATTR_RETURN(out_vap, va_modify_time, in_vap->va_modify_time); 618 619 /* 620 * When __DARWIN_64_BIT_INO_T is not enabled, the User library 621 * will set va_create_time to -1. In that case, we will have 622 * to ask for it separately, if necessary. 623 */ 624 if (in_vap->va_create_time.tv_sec != (int64_t)-1) { 625 VATTR_RETURN(out_vap, va_create_time, in_vap->va_create_time); 626 } 627 628 if ((fvdat->modify_time.tv_sec != in_vap->va_modify_time.tv_sec) || 629 (fvdat->modify_time.tv_nsec != in_vap->va_modify_time.tv_nsec)) { 630 fvdat->modify_time.tv_sec = in_vap->va_modify_time.tv_sec; 631 fvdat->modify_time.tv_nsec = in_vap->va_modify_time.tv_nsec; 632 hint |= NOTE_ATTRIB; 633 if (fuse_isautocache_mp(mp) && !purged) { 634 (void)ubc_msync(vp, (off_t)0, fvdat->filesize, (off_t*)0, 635 UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); 636 } 637 } 638 639 if (VATTR_IS_ACTIVE(out_vap, va_backup_time) || 640 (VATTR_IS_ACTIVE(out_vap, va_create_time) && 641 !VATTR_IS_SUPPORTED(out_vap, va_create_time))) { 642 (void)fuse_internal_loadxtimes(vp, out_vap, context); 643 } 644 645 if (hint) { 646 FUSE_KNOTE(vp, hint); 647 } 648} 649 650#define cache_attrs(vp, fuse_out) do { \ 651 struct timespec uptsp_ ## __func__; \ 652 \ 653 /* XXX: truncation; user space sends us a 64-bit tv_sec */ \ 654 VTOFUD(vp)->attr_valid.tv_sec = (time_t)(fuse_out)->attr_valid; \ 655 VTOFUD(vp)->attr_valid.tv_nsec = (fuse_out)->attr_valid_nsec; \ 656 nanouptime(&uptsp_ ## __func__); \ 657 \ 658 fuse_timespec_add(&VTOFUD(vp)->attr_valid, &uptsp_ ## __func__); \ 659 \ 660 fuse_internal_attr_fat2vat(vp, &(fuse_out)->attr, VTOVA(vp)); \ 661} while (0) 662 663#if M_MACFUSE_ENABLE_EXCHANGE 664 665/* exchange */ 666 667int 668fuse_internal_exchange(vnode_t fvp, 669 const char *fname, 670 size_t flen, 671 vnode_t tvp, 672 const char *tname, 673 size_t tlen, 674 int options, 675 vfs_context_t context); 676 677#endif /* M_MACFUSE_ENABLE_EXCHANGE */ 678 679/* fsync */ 680 681int 682fuse_internal_fsync(vnode_t vp, 683 vfs_context_t context, 684 struct fuse_filehandle *fufh, 685 void *param, 686 fuse_op_waitfor_t waitfor); 687 688int 689fuse_internal_fsync_callback(struct fuse_ticket *ftick, uio_t uio); 690 691/* ioctl */ 692 693int 694fuse_internal_ioctl_avfi(vnode_t vp, 695 vfs_context_t context, 696 struct fuse_avfi_ioctl *avfi); 697 698/* readdir */ 699 700struct pseudo_dirent { 701 uint32_t d_namlen; 702}; 703 704int 705fuse_internal_readdir(vnode_t vp, 706 uio_t uio, 707 vfs_context_t context, 708 struct fuse_filehandle *fufh, 709 struct fuse_iov *cookediov, 710 int *numdirent); 711 712int 713fuse_internal_readdir_processdata(vnode_t vp, 714 uio_t uio, 715 size_t reqsize, 716 void *buf, 717 size_t bufsize, 718 struct fuse_iov *cookediov, 719 int *numdirent); 720 721/* remove */ 722 723int 724fuse_internal_remove(vnode_t dvp, 725 vnode_t vp, 726 struct componentname *cnp, 727 enum fuse_opcode op, 728 vfs_context_t context); 729 730/* rename */ 731 732int 733fuse_internal_rename(vnode_t fdvp, 734 vnode_t fvp, 735 struct componentname *fcnp, 736 vnode_t tdvp, 737 vnode_t tvp, 738 struct componentname *tcnp, 739 vfs_context_t context); 740 741/* revoke */ 742 743int 744fuse_internal_revoke(vnode_t vp, int flags, vfs_context_t context, int how); 745 746void 747fuse_internal_vnode_disappear(vnode_t vp, vfs_context_t context, int how); 748 749/* strategy */ 750 751int 752fuse_internal_strategy(vnode_t vp, buf_t bp); 753 754errno_t 755fuse_internal_strategy_buf(struct vnop_strategy_args *ap); 756 757 758/* xattr */ 759 760static __inline__ 761int 762fuse_skip_apple_xattr_mp(mount_t mp, const char *name) 763{ 764 int ismpoption = fuse_get_mpdata(mp)->dataflags & FSESS_NO_APPLEXATTR; 765 766 if (ismpoption && name) { 767 #define COM_APPLE_ "com.apple." 768 if (bcmp(name, COM_APPLE_, sizeof(COM_APPLE_) - 1) == 0) { 769 return 1; 770 } 771 } 772 773 return 0; 774} 775 776 777/* entity creation */ 778 779static __inline__ 780int 781fuse_internal_checkentry(struct fuse_entry_out *feo, enum vtype vtype) 782{ 783 if (vtype != IFTOVT(feo->attr.mode)) { 784 return EINVAL; 785 } 786 787 if (feo->nodeid == FUSE_NULL_ID) { 788 return EINVAL; 789 } 790 791 if (feo->nodeid == FUSE_ROOT_ID) { 792 return EINVAL; 793 } 794 795 return 0; 796} 797 798int 799fuse_internal_newentry(vnode_t dvp, 800 vnode_t *vpp, 801 struct componentname *cnp, 802 enum fuse_opcode op, 803 void *buf, 804 size_t bufsize, 805 enum vtype vtype, 806 vfs_context_t context); 807 808void 809fuse_internal_newentry_makerequest(mount_t mp, 810 uint64_t dnid, 811 struct componentname *cnp, 812 enum fuse_opcode op, 813 void *buf, 814 size_t bufsize, 815 struct fuse_dispatcher *fdip, 816 vfs_context_t context); 817 818int 819fuse_internal_newentry_core(vnode_t dvp, 820 vnode_t *vpp, 821 struct componentname *cnp, 822 enum vtype vtyp, 823 struct fuse_dispatcher *fdip, 824 vfs_context_t context); 825 826/* entity destruction */ 827 828int 829fuse_internal_forget_callback(struct fuse_ticket *ftick, uio_t uio); 830 831void 832fuse_internal_forget_send(mount_t mp, 833 vfs_context_t context, 834 uint64_t nodeid, 835 uint64_t nlookup, 836 struct fuse_dispatcher *fdip); 837 838void 839fuse_internal_interrupt_send(struct fuse_ticket *ftick); 840 841enum { 842 REVOKE_NONE = 0, 843 REVOKE_SOFT = 1, 844 REVOKE_HARD = 2, 845}; 846 847/* fuse start/stop */ 848 849int fuse_internal_init_synchronous(struct fuse_ticket *ftick); 850int fuse_internal_send_init(struct fuse_data *data, vfs_context_t context); 851 852/* other */ 853 854static __inline__ 855int 856fuse_implemented(struct fuse_data *data, uint64_t which) 857{ 858 int result; 859 860 /* FUSE_DATA_LOCK_SHARED(data); */ 861 result = (int)!(data->noimplflags & which); 862 /* FUSE_DATA_UNLOCK_SHARED(data); */ 863 864 return result; 865} 866 867static __inline__ 868void 869fuse_clear_implemented(struct fuse_data *data, uint64_t which) 870{ 871 /* FUSE_DATA_LOCK_EXCLUSIVE(data); */ 872 data->noimplflags |= which; 873 /* FUSE_DATA_UNLOCK_EXCLUSIVE(data); */ 874} 875 876static __inline__ 877int 878fuse_set_implemented_custom(struct fuse_data *data, uint64_t flags) 879{ 880 if (!data) { 881 return EINVAL; 882 } 883 884 FUSE_DATA_LOCK_EXCLUSIVE(data); 885 data->noimplflags = flags; 886 FUSE_DATA_UNLOCK_EXCLUSIVE(data); 887 888 return 0; 889} 890 891void 892fuse_internal_print_vnodes(mount_t mp); 893 894void 895fuse_preflight_log(vnode_t vp, fufh_type_t fufh_type, int err, char *message); 896 897#endif /* _FUSE_INTERNAL_H_ */