/fs/namei.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t · C · 3411 lines · 2426 code · 405 blank · 580 comment · 589 complexity · fb2fd2e2ce1cc24bb0eb80bc14eb23bf MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * linux/fs/namei.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. /*
  7. * Some corrections by tytso.
  8. */
  9. /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
  10. * lookup logic.
  11. */
  12. /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
  13. */
  14. #include <linux/init.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/fs.h>
  18. #include <linux/namei.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/fsnotify.h>
  21. #include <linux/personality.h>
  22. #include <linux/security.h>
  23. #include <linux/ima.h>
  24. #include <linux/syscalls.h>
  25. #include <linux/mount.h>
  26. #include <linux/audit.h>
  27. #include <linux/capability.h>
  28. #include <linux/file.h>
  29. #include <linux/fcntl.h>
  30. #include <linux/device_cgroup.h>
  31. #include <linux/fs_struct.h>
  32. #include <linux/posix_acl.h>
  33. #include <asm/uaccess.h>
  34. #include "internal.h"
  35. /* [Feb-1997 T. Schoebel-Theuer]
  36. * Fundamental changes in the pathname lookup mechanisms (namei)
  37. * were necessary because of omirr. The reason is that omirr needs
  38. * to know the _real_ pathname, not the user-supplied one, in case
  39. * of symlinks (and also when transname replacements occur).
  40. *
  41. * The new code replaces the old recursive symlink resolution with
  42. * an iterative one (in case of non-nested symlink chains). It does
  43. * this with calls to <fs>_follow_link().
  44. * As a side effect, dir_namei(), _namei() and follow_link() are now
  45. * replaced with a single function lookup_dentry() that can handle all
  46. * the special cases of the former code.
  47. *
  48. * With the new dcache, the pathname is stored at each inode, at least as
  49. * long as the refcount of the inode is positive. As a side effect, the
  50. * size of the dcache depends on the inode cache and thus is dynamic.
  51. *
  52. * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
  53. * resolution to correspond with current state of the code.
  54. *
  55. * Note that the symlink resolution is not *completely* iterative.
  56. * There is still a significant amount of tail- and mid- recursion in
  57. * the algorithm. Also, note that <fs>_readlink() is not used in
  58. * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
  59. * may return different results than <fs>_follow_link(). Many virtual
  60. * filesystems (including /proc) exhibit this behavior.
  61. */
  62. /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
  63. * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
  64. * and the name already exists in form of a symlink, try to create the new
  65. * name indicated by the symlink. The old code always complained that the
  66. * name already exists, due to not following the symlink even if its target
  67. * is nonexistent. The new semantics affects also mknod() and link() when
  68. * the name is a symlink pointing to a non-existent name.
  69. *
  70. * I don't know which semantics is the right one, since I have no access
  71. * to standards. But I found by trial that HP-UX 9.0 has the full "new"
  72. * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
  73. * "old" one. Personally, I think the new semantics is much more logical.
  74. * Note that "ln old new" where "new" is a symlink pointing to a non-existing
  75. * file does succeed in both HP-UX and SunOs, but not in Solaris
  76. * and in the old Linux semantics.
  77. */
  78. /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
  79. * semantics. See the comments in "open_namei" and "do_link" below.
  80. *
  81. * [10-Sep-98 Alan Modra] Another symlink change.
  82. */
  83. /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
  84. * inside the path - always follow.
  85. * in the last component in creation/removal/renaming - never follow.
  86. * if LOOKUP_FOLLOW passed - follow.
  87. * if the pathname has trailing slashes - follow.
  88. * otherwise - don't follow.
  89. * (applied in that order).
  90. *
  91. * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
  92. * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
  93. * During the 2.4 we need to fix the userland stuff depending on it -
  94. * hopefully we will be able to get rid of that wart in 2.5. So far only
  95. * XEmacs seems to be relying on it...
  96. */
  97. /*
  98. * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
  99. * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
  100. * any extra contention...
  101. */
  102. /* In order to reduce some races, while at the same time doing additional
  103. * checking and hopefully speeding things up, we copy filenames to the
  104. * kernel data space before using them..
  105. *
  106. * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
  107. * PATH_MAX includes the nul terminator --RR.
  108. */
  109. static int do_getname(const char __user *filename, char *page)
  110. {
  111. int retval;
  112. unsigned long len = PATH_MAX;
  113. if (!segment_eq(get_fs(), KERNEL_DS)) {
  114. if ((unsigned long) filename >= TASK_SIZE)
  115. return -EFAULT;
  116. if (TASK_SIZE - (unsigned long) filename < PATH_MAX)
  117. len = TASK_SIZE - (unsigned long) filename;
  118. }
  119. retval = strncpy_from_user(page, filename, len);
  120. if (retval > 0) {
  121. if (retval < len)
  122. return 0;
  123. return -ENAMETOOLONG;
  124. } else if (!retval)
  125. retval = -ENOENT;
  126. return retval;
  127. }
  128. static char *getname_flags(const char __user *filename, int flags, int *empty)
  129. {
  130. char *tmp, *result;
  131. result = ERR_PTR(-ENOMEM);
  132. tmp = __getname();
  133. if (tmp) {
  134. int retval = do_getname(filename, tmp);
  135. result = tmp;
  136. if (retval < 0) {
  137. if (retval == -ENOENT && empty)
  138. *empty = 1;
  139. if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
  140. __putname(tmp);
  141. result = ERR_PTR(retval);
  142. }
  143. }
  144. }
  145. audit_getname(result);
  146. return result;
  147. }
  148. char *getname(const char __user * filename)
  149. {
  150. return getname_flags(filename, 0, 0);
  151. }
  152. #ifdef CONFIG_AUDITSYSCALL
  153. void putname(const char *name)
  154. {
  155. if (unlikely(!audit_dummy_context()))
  156. audit_putname(name);
  157. else
  158. __putname(name);
  159. }
  160. EXPORT_SYMBOL(putname);
  161. #endif
  162. static int check_acl(struct inode *inode, int mask)
  163. {
  164. #ifdef CONFIG_FS_POSIX_ACL
  165. struct posix_acl *acl;
  166. if (mask & MAY_NOT_BLOCK) {
  167. acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
  168. if (!acl)
  169. return -EAGAIN;
  170. /* no ->get_acl() calls in RCU mode... */
  171. if (acl == ACL_NOT_CACHED)
  172. return -ECHILD;
  173. return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
  174. }
  175. acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
  176. /*
  177. * A filesystem can force a ACL callback by just never filling the
  178. * ACL cache. But normally you'd fill the cache either at inode
  179. * instantiation time, or on the first ->get_acl call.
  180. *
  181. * If the filesystem doesn't have a get_acl() function at all, we'll
  182. * just create the negative cache entry.
  183. */
  184. if (acl == ACL_NOT_CACHED) {
  185. if (inode->i_op->get_acl) {
  186. acl = inode->i_op->get_acl(inode, ACL_TYPE_ACCESS);
  187. if (IS_ERR(acl))
  188. return PTR_ERR(acl);
  189. } else {
  190. set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
  191. return -EAGAIN;
  192. }
  193. }
  194. if (acl) {
  195. int error = posix_acl_permission(inode, acl, mask);
  196. posix_acl_release(acl);
  197. return error;
  198. }
  199. #endif
  200. return -EAGAIN;
  201. }
  202. /*
  203. * This does basic POSIX ACL permission checking
  204. */
  205. static int acl_permission_check(struct inode *inode, int mask)
  206. {
  207. unsigned int mode = inode->i_mode;
  208. mask &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK;
  209. if (current_user_ns() != inode_userns(inode))
  210. goto other_perms;
  211. if (likely(current_fsuid() == inode->i_uid))
  212. mode >>= 6;
  213. else {
  214. if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
  215. int error = check_acl(inode, mask);
  216. if (error != -EAGAIN)
  217. return error;
  218. }
  219. if (in_group_p(inode->i_gid))
  220. mode >>= 3;
  221. }
  222. other_perms:
  223. /*
  224. * If the DACs are ok we don't need any capability check.
  225. */
  226. if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
  227. return 0;
  228. return -EACCES;
  229. }
  230. /**
  231. * generic_permission - check for access rights on a Posix-like filesystem
  232. * @inode: inode to check access rights for
  233. * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
  234. *
  235. * Used to check for read/write/execute permissions on a file.
  236. * We use "fsuid" for this, letting us set arbitrary permissions
  237. * for filesystem access without changing the "normal" uids which
  238. * are used for other things.
  239. *
  240. * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
  241. * request cannot be satisfied (eg. requires blocking or too much complexity).
  242. * It would then be called again in ref-walk mode.
  243. */
  244. int generic_permission(struct inode *inode, int mask)
  245. {
  246. int ret;
  247. /*
  248. * Do the basic POSIX ACL permission checks.
  249. */
  250. ret = acl_permission_check(inode, mask);
  251. if (ret != -EACCES)
  252. return ret;
  253. if (S_ISDIR(inode->i_mode)) {
  254. /* DACs are overridable for directories */
  255. if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
  256. return 0;
  257. if (!(mask & MAY_WRITE))
  258. if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
  259. return 0;
  260. return -EACCES;
  261. }
  262. /*
  263. * Read/write DACs are always overridable.
  264. * Executable DACs are overridable when there is
  265. * at least one exec bit set.
  266. */
  267. if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
  268. if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
  269. return 0;
  270. /*
  271. * Searching includes executable on directories, else just read.
  272. */
  273. mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
  274. if (mask == MAY_READ)
  275. if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
  276. return 0;
  277. return -EACCES;
  278. }
  279. /*
  280. * We _really_ want to just do "generic_permission()" without
  281. * even looking at the inode->i_op values. So we keep a cache
  282. * flag in inode->i_opflags, that says "this has not special
  283. * permission function, use the fast case".
  284. */
  285. static inline int do_inode_permission(struct inode *inode, int mask)
  286. {
  287. if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
  288. if (likely(inode->i_op->permission))
  289. return inode->i_op->permission(inode, mask);
  290. /* This gets set once for the inode lifetime */
  291. spin_lock(&inode->i_lock);
  292. inode->i_opflags |= IOP_FASTPERM;
  293. spin_unlock(&inode->i_lock);
  294. }
  295. return generic_permission(inode, mask);
  296. }
  297. /**
  298. * inode_permission - check for access rights to a given inode
  299. * @inode: inode to check permission on
  300. * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
  301. *
  302. * Used to check for read/write/execute permissions on an inode.
  303. * We use "fsuid" for this, letting us set arbitrary permissions
  304. * for filesystem access without changing the "normal" uids which
  305. * are used for other things.
  306. */
  307. int inode_permission(struct inode *inode, int mask)
  308. {
  309. int retval;
  310. if (unlikely(mask & MAY_WRITE)) {
  311. umode_t mode = inode->i_mode;
  312. /*
  313. * Nobody gets write access to a read-only fs.
  314. */
  315. if (IS_RDONLY(inode) &&
  316. (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
  317. return -EROFS;
  318. /*
  319. * Nobody gets write access to an immutable file.
  320. */
  321. if (IS_IMMUTABLE(inode))
  322. return -EACCES;
  323. }
  324. retval = do_inode_permission(inode, mask);
  325. if (retval)
  326. return retval;
  327. retval = devcgroup_inode_permission(inode, mask);
  328. if (retval)
  329. return retval;
  330. return security_inode_permission(inode, mask);
  331. }
  332. /**
  333. * path_get - get a reference to a path
  334. * @path: path to get the reference to
  335. *
  336. * Given a path increment the reference count to the dentry and the vfsmount.
  337. */
  338. void path_get(struct path *path)
  339. {
  340. mntget(path->mnt);
  341. dget(path->dentry);
  342. }
  343. EXPORT_SYMBOL(path_get);
  344. /**
  345. * path_put - put a reference to a path
  346. * @path: path to put the reference to
  347. *
  348. * Given a path decrement the reference count to the dentry and the vfsmount.
  349. */
  350. void path_put(struct path *path)
  351. {
  352. dput(path->dentry);
  353. mntput(path->mnt);
  354. }
  355. EXPORT_SYMBOL(path_put);
  356. /*
  357. * Path walking has 2 modes, rcu-walk and ref-walk (see
  358. * Documentation/filesystems/path-lookup.txt). In situations when we can't
  359. * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
  360. * normal reference counts on dentries and vfsmounts to transition to rcu-walk
  361. * mode. Refcounts are grabbed at the last known good point before rcu-walk
  362. * got stuck, so ref-walk may continue from there. If this is not successful
  363. * (eg. a seqcount has changed), then failure is returned and it's up to caller
  364. * to restart the path walk from the beginning in ref-walk mode.
  365. */
  366. /**
  367. * unlazy_walk - try to switch to ref-walk mode.
  368. * @nd: nameidata pathwalk data
  369. * @dentry: child of nd->path.dentry or NULL
  370. * Returns: 0 on success, -ECHILD on failure
  371. *
  372. * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
  373. * for ref-walk mode. @dentry must be a path found by a do_lookup call on
  374. * @nd or NULL. Must be called from rcu-walk context.
  375. */
  376. static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
  377. {
  378. struct fs_struct *fs = current->fs;
  379. struct dentry *parent = nd->path.dentry;
  380. int want_root = 0;
  381. BUG_ON(!(nd->flags & LOOKUP_RCU));
  382. if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
  383. want_root = 1;
  384. spin_lock(&fs->lock);
  385. if (nd->root.mnt != fs->root.mnt ||
  386. nd->root.dentry != fs->root.dentry)
  387. goto err_root;
  388. }
  389. spin_lock(&parent->d_lock);
  390. if (!dentry) {
  391. if (!__d_rcu_to_refcount(parent, nd->seq))
  392. goto err_parent;
  393. BUG_ON(nd->inode != parent->d_inode);
  394. } else {
  395. if (dentry->d_parent != parent)
  396. goto err_parent;
  397. spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
  398. if (!__d_rcu_to_refcount(dentry, nd->seq))
  399. goto err_child;
  400. /*
  401. * If the sequence check on the child dentry passed, then
  402. * the child has not been removed from its parent. This
  403. * means the parent dentry must be valid and able to take
  404. * a reference at this point.
  405. */
  406. BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
  407. BUG_ON(!parent->d_count);
  408. parent->d_count++;
  409. spin_unlock(&dentry->d_lock);
  410. }
  411. spin_unlock(&parent->d_lock);
  412. if (want_root) {
  413. path_get(&nd->root);
  414. spin_unlock(&fs->lock);
  415. }
  416. mntget(nd->path.mnt);
  417. rcu_read_unlock();
  418. br_read_unlock(vfsmount_lock);
  419. nd->flags &= ~LOOKUP_RCU;
  420. return 0;
  421. err_child:
  422. spin_unlock(&dentry->d_lock);
  423. err_parent:
  424. spin_unlock(&parent->d_lock);
  425. err_root:
  426. if (want_root)
  427. spin_unlock(&fs->lock);
  428. return -ECHILD;
  429. }
  430. /**
  431. * release_open_intent - free up open intent resources
  432. * @nd: pointer to nameidata
  433. */
  434. void release_open_intent(struct nameidata *nd)
  435. {
  436. struct file *file = nd->intent.open.file;
  437. if (file && !IS_ERR(file)) {
  438. if (file->f_path.dentry == NULL)
  439. put_filp(file);
  440. else
  441. fput(file);
  442. }
  443. }
  444. static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
  445. {
  446. return dentry->d_op->d_revalidate(dentry, nd);
  447. }
  448. /**
  449. * complete_walk - successful completion of path walk
  450. * @nd: pointer nameidata
  451. *
  452. * If we had been in RCU mode, drop out of it and legitimize nd->path.
  453. * Revalidate the final result, unless we'd already done that during
  454. * the path walk or the filesystem doesn't ask for it. Return 0 on
  455. * success, -error on failure. In case of failure caller does not
  456. * need to drop nd->path.
  457. */
  458. static int complete_walk(struct nameidata *nd)
  459. {
  460. struct dentry *dentry = nd->path.dentry;
  461. int status;
  462. if (nd->flags & LOOKUP_RCU) {
  463. nd->flags &= ~LOOKUP_RCU;
  464. if (!(nd->flags & LOOKUP_ROOT))
  465. nd->root.mnt = NULL;
  466. spin_lock(&dentry->d_lock);
  467. if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
  468. spin_unlock(&dentry->d_lock);
  469. rcu_read_unlock();
  470. br_read_unlock(vfsmount_lock);
  471. return -ECHILD;
  472. }
  473. BUG_ON(nd->inode != dentry->d_inode);
  474. spin_unlock(&dentry->d_lock);
  475. mntget(nd->path.mnt);
  476. rcu_read_unlock();
  477. br_read_unlock(vfsmount_lock);
  478. }
  479. if (likely(!(nd->flags & LOOKUP_JUMPED)))
  480. return 0;
  481. if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
  482. return 0;
  483. if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
  484. return 0;
  485. /* Note: we do not d_invalidate() */
  486. status = d_revalidate(dentry, nd);
  487. if (status > 0)
  488. return 0;
  489. if (!status)
  490. status = -ESTALE;
  491. path_put(&nd->path);
  492. return status;
  493. }
  494. static __always_inline void set_root(struct nameidata *nd)
  495. {
  496. if (!nd->root.mnt)
  497. get_fs_root(current->fs, &nd->root);
  498. }
  499. static int link_path_walk(const char *, struct nameidata *);
  500. static __always_inline void set_root_rcu(struct nameidata *nd)
  501. {
  502. if (!nd->root.mnt) {
  503. struct fs_struct *fs = current->fs;
  504. unsigned seq;
  505. do {
  506. seq = read_seqcount_begin(&fs->seq);
  507. nd->root = fs->root;
  508. nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
  509. } while (read_seqcount_retry(&fs->seq, seq));
  510. }
  511. }
  512. static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
  513. {
  514. int ret;
  515. if (IS_ERR(link))
  516. goto fail;
  517. if (*link == '/') {
  518. set_root(nd);
  519. path_put(&nd->path);
  520. nd->path = nd->root;
  521. path_get(&nd->root);
  522. nd->flags |= LOOKUP_JUMPED;
  523. }
  524. nd->inode = nd->path.dentry->d_inode;
  525. ret = link_path_walk(link, nd);
  526. return ret;
  527. fail:
  528. path_put(&nd->path);
  529. return PTR_ERR(link);
  530. }
  531. static void path_put_conditional(struct path *path, struct nameidata *nd)
  532. {
  533. dput(path->dentry);
  534. if (path->mnt != nd->path.mnt)
  535. mntput(path->mnt);
  536. }
  537. static inline void path_to_nameidata(const struct path *path,
  538. struct nameidata *nd)
  539. {
  540. if (!(nd->flags & LOOKUP_RCU)) {
  541. dput(nd->path.dentry);
  542. if (nd->path.mnt != path->mnt)
  543. mntput(nd->path.mnt);
  544. }
  545. nd->path.mnt = path->mnt;
  546. nd->path.dentry = path->dentry;
  547. }
  548. static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
  549. {
  550. struct inode *inode = link->dentry->d_inode;
  551. if (!IS_ERR(cookie) && inode->i_op->put_link)
  552. inode->i_op->put_link(link->dentry, nd, cookie);
  553. path_put(link);
  554. }
  555. static __always_inline int
  556. follow_link(struct path *link, struct nameidata *nd, void **p)
  557. {
  558. int error;
  559. struct dentry *dentry = link->dentry;
  560. BUG_ON(nd->flags & LOOKUP_RCU);
  561. if (link->mnt == nd->path.mnt)
  562. mntget(link->mnt);
  563. if (unlikely(current->total_link_count >= 40)) {
  564. *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */
  565. path_put(&nd->path);
  566. return -ELOOP;
  567. }
  568. cond_resched();
  569. current->total_link_count++;
  570. touch_atime(link->mnt, dentry);
  571. nd_set_link(nd, NULL);
  572. error = security_inode_follow_link(link->dentry, nd);
  573. if (error) {
  574. *p = ERR_PTR(error); /* no ->put_link(), please */
  575. path_put(&nd->path);
  576. return error;
  577. }
  578. nd->last_type = LAST_BIND;
  579. *p = dentry->d_inode->i_op->follow_link(dentry, nd);
  580. error = PTR_ERR(*p);
  581. if (!IS_ERR(*p)) {
  582. char *s = nd_get_link(nd);
  583. error = 0;
  584. if (s)
  585. error = __vfs_follow_link(nd, s);
  586. else if (nd->last_type == LAST_BIND) {
  587. nd->flags |= LOOKUP_JUMPED;
  588. nd->inode = nd->path.dentry->d_inode;
  589. if (nd->inode->i_op->follow_link) {
  590. /* stepped on a _really_ weird one */
  591. path_put(&nd->path);
  592. error = -ELOOP;
  593. }
  594. }
  595. }
  596. return error;
  597. }
  598. static int follow_up_rcu(struct path *path)
  599. {
  600. struct vfsmount *parent;
  601. struct dentry *mountpoint;
  602. parent = path->mnt->mnt_parent;
  603. if (parent == path->mnt)
  604. return 0;
  605. mountpoint = path->mnt->mnt_mountpoint;
  606. path->dentry = mountpoint;
  607. path->mnt = parent;
  608. return 1;
  609. }
  610. int follow_up(struct path *path)
  611. {
  612. struct vfsmount *parent;
  613. struct dentry *mountpoint;
  614. br_read_lock(vfsmount_lock);
  615. parent = path->mnt->mnt_parent;
  616. if (parent == path->mnt) {
  617. br_read_unlock(vfsmount_lock);
  618. return 0;
  619. }
  620. mntget(parent);
  621. mountpoint = dget(path->mnt->mnt_mountpoint);
  622. br_read_unlock(vfsmount_lock);
  623. dput(path->dentry);
  624. path->dentry = mountpoint;
  625. mntput(path->mnt);
  626. path->mnt = parent;
  627. return 1;
  628. }
  629. /*
  630. * Perform an automount
  631. * - return -EISDIR to tell follow_managed() to stop and return the path we
  632. * were called with.
  633. */
  634. static int follow_automount(struct path *path, unsigned flags,
  635. bool *need_mntput)
  636. {
  637. struct vfsmount *mnt;
  638. int err;
  639. if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
  640. return -EREMOTE;
  641. /* We don't want to mount if someone's just doing a stat -
  642. * unless they're stat'ing a directory and appended a '/' to
  643. * the name.
  644. *
  645. * We do, however, want to mount if someone wants to open or
  646. * create a file of any type under the mountpoint, wants to
  647. * traverse through the mountpoint or wants to open the
  648. * mounted directory. Also, autofs may mark negative dentries
  649. * as being automount points. These will need the attentions
  650. * of the daemon to instantiate them before they can be used.
  651. */
  652. if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
  653. LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
  654. path->dentry->d_inode)
  655. return -EISDIR;
  656. current->total_link_count++;
  657. if (current->total_link_count >= 40)
  658. return -ELOOP;
  659. mnt = path->dentry->d_op->d_automount(path);
  660. if (IS_ERR(mnt)) {
  661. /*
  662. * The filesystem is allowed to return -EISDIR here to indicate
  663. * it doesn't want to automount. For instance, autofs would do
  664. * this so that its userspace daemon can mount on this dentry.
  665. *
  666. * However, we can only permit this if it's a terminal point in
  667. * the path being looked up; if it wasn't then the remainder of
  668. * the path is inaccessible and we should say so.
  669. */
  670. if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
  671. return -EREMOTE;
  672. return PTR_ERR(mnt);
  673. }
  674. if (!mnt) /* mount collision */
  675. return 0;
  676. if (!*need_mntput) {
  677. /* lock_mount() may release path->mnt on error */
  678. mntget(path->mnt);
  679. *need_mntput = true;
  680. }
  681. err = finish_automount(mnt, path);
  682. switch (err) {
  683. case -EBUSY:
  684. /* Someone else made a mount here whilst we were busy */
  685. return 0;
  686. case 0:
  687. path_put(path);
  688. path->mnt = mnt;
  689. path->dentry = dget(mnt->mnt_root);
  690. return 0;
  691. default:
  692. return err;
  693. }
  694. }
  695. /*
  696. * Handle a dentry that is managed in some way.
  697. * - Flagged for transit management (autofs)
  698. * - Flagged as mountpoint
  699. * - Flagged as automount point
  700. *
  701. * This may only be called in refwalk mode.
  702. *
  703. * Serialization is taken care of in namespace.c
  704. */
  705. static int follow_managed(struct path *path, unsigned flags)
  706. {
  707. struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
  708. unsigned managed;
  709. bool need_mntput = false;
  710. int ret = 0;
  711. /* Given that we're not holding a lock here, we retain the value in a
  712. * local variable for each dentry as we look at it so that we don't see
  713. * the components of that value change under us */
  714. while (managed = ACCESS_ONCE(path->dentry->d_flags),
  715. managed &= DCACHE_MANAGED_DENTRY,
  716. unlikely(managed != 0)) {
  717. /* Allow the filesystem to manage the transit without i_mutex
  718. * being held. */
  719. if (managed & DCACHE_MANAGE_TRANSIT) {
  720. BUG_ON(!path->dentry->d_op);
  721. BUG_ON(!path->dentry->d_op->d_manage);
  722. ret = path->dentry->d_op->d_manage(path->dentry, false);
  723. if (ret < 0)
  724. break;
  725. }
  726. /* Transit to a mounted filesystem. */
  727. if (managed & DCACHE_MOUNTED) {
  728. struct vfsmount *mounted = lookup_mnt(path);
  729. if (mounted) {
  730. dput(path->dentry);
  731. if (need_mntput)
  732. mntput(path->mnt);
  733. path->mnt = mounted;
  734. path->dentry = dget(mounted->mnt_root);
  735. need_mntput = true;
  736. continue;
  737. }
  738. /* Something is mounted on this dentry in another
  739. * namespace and/or whatever was mounted there in this
  740. * namespace got unmounted before we managed to get the
  741. * vfsmount_lock */
  742. }
  743. /* Handle an automount point */
  744. if (managed & DCACHE_NEED_AUTOMOUNT) {
  745. ret = follow_automount(path, flags, &need_mntput);
  746. if (ret < 0)
  747. break;
  748. continue;
  749. }
  750. /* We didn't change the current path point */
  751. break;
  752. }
  753. if (need_mntput && path->mnt == mnt)
  754. mntput(path->mnt);
  755. if (ret == -EISDIR)
  756. ret = 0;
  757. return ret < 0 ? ret : need_mntput;
  758. }
  759. int follow_down_one(struct path *path)
  760. {
  761. struct vfsmount *mounted;
  762. mounted = lookup_mnt(path);
  763. if (mounted) {
  764. dput(path->dentry);
  765. mntput(path->mnt);
  766. path->mnt = mounted;
  767. path->dentry = dget(mounted->mnt_root);
  768. return 1;
  769. }
  770. return 0;
  771. }
  772. static inline bool managed_dentry_might_block(struct dentry *dentry)
  773. {
  774. return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
  775. dentry->d_op->d_manage(dentry, true) < 0);
  776. }
  777. /*
  778. * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
  779. * we meet a managed dentry that would need blocking.
  780. */
  781. static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
  782. struct inode **inode)
  783. {
  784. for (;;) {
  785. struct vfsmount *mounted;
  786. /*
  787. * Don't forget we might have a non-mountpoint managed dentry
  788. * that wants to block transit.
  789. */
  790. if (unlikely(managed_dentry_might_block(path->dentry)))
  791. return false;
  792. if (!d_mountpoint(path->dentry))
  793. break;
  794. mounted = __lookup_mnt(path->mnt, path->dentry, 1);
  795. if (!mounted)
  796. break;
  797. path->mnt = mounted;
  798. path->dentry = mounted->mnt_root;
  799. nd->flags |= LOOKUP_JUMPED;
  800. nd->seq = read_seqcount_begin(&path->dentry->d_seq);
  801. /*
  802. * Update the inode too. We don't need to re-check the
  803. * dentry sequence number here after this d_inode read,
  804. * because a mount-point is always pinned.
  805. */
  806. *inode = path->dentry->d_inode;
  807. }
  808. return true;
  809. }
  810. static void follow_mount_rcu(struct nameidata *nd)
  811. {
  812. while (d_mountpoint(nd->path.dentry)) {
  813. struct vfsmount *mounted;
  814. mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
  815. if (!mounted)
  816. break;
  817. nd->path.mnt = mounted;
  818. nd->path.dentry = mounted->mnt_root;
  819. nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
  820. }
  821. }
  822. static int follow_dotdot_rcu(struct nameidata *nd)
  823. {
  824. set_root_rcu(nd);
  825. while (1) {
  826. if (nd->path.dentry == nd->root.dentry &&
  827. nd->path.mnt == nd->root.mnt) {
  828. break;
  829. }
  830. if (nd->path.dentry != nd->path.mnt->mnt_root) {
  831. struct dentry *old = nd->path.dentry;
  832. struct dentry *parent = old->d_parent;
  833. unsigned seq;
  834. seq = read_seqcount_begin(&parent->d_seq);
  835. if (read_seqcount_retry(&old->d_seq, nd->seq))
  836. goto failed;
  837. nd->path.dentry = parent;
  838. nd->seq = seq;
  839. break;
  840. }
  841. if (!follow_up_rcu(&nd->path))
  842. break;
  843. nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
  844. }
  845. follow_mount_rcu(nd);
  846. nd->inode = nd->path.dentry->d_inode;
  847. return 0;
  848. failed:
  849. nd->flags &= ~LOOKUP_RCU;
  850. if (!(nd->flags & LOOKUP_ROOT))
  851. nd->root.mnt = NULL;
  852. rcu_read_unlock();
  853. br_read_unlock(vfsmount_lock);
  854. return -ECHILD;
  855. }
  856. /*
  857. * Follow down to the covering mount currently visible to userspace. At each
  858. * point, the filesystem owning that dentry may be queried as to whether the
  859. * caller is permitted to proceed or not.
  860. */
  861. int follow_down(struct path *path)
  862. {
  863. unsigned managed;
  864. int ret;
  865. while (managed = ACCESS_ONCE(path->dentry->d_flags),
  866. unlikely(managed & DCACHE_MANAGED_DENTRY)) {
  867. /* Allow the filesystem to manage the transit without i_mutex
  868. * being held.
  869. *
  870. * We indicate to the filesystem if someone is trying to mount
  871. * something here. This gives autofs the chance to deny anyone
  872. * other than its daemon the right to mount on its
  873. * superstructure.
  874. *
  875. * The filesystem may sleep at this point.
  876. */
  877. if (managed & DCACHE_MANAGE_TRANSIT) {
  878. BUG_ON(!path->dentry->d_op);
  879. BUG_ON(!path->dentry->d_op->d_manage);
  880. ret = path->dentry->d_op->d_manage(
  881. path->dentry, false);
  882. if (ret < 0)
  883. return ret == -EISDIR ? 0 : ret;
  884. }
  885. /* Transit to a mounted filesystem. */
  886. if (managed & DCACHE_MOUNTED) {
  887. struct vfsmount *mounted = lookup_mnt(path);
  888. if (!mounted)
  889. break;
  890. dput(path->dentry);
  891. mntput(path->mnt);
  892. path->mnt = mounted;
  893. path->dentry = dget(mounted->mnt_root);
  894. continue;
  895. }
  896. /* Don't handle automount points here */
  897. break;
  898. }
  899. return 0;
  900. }
  901. /*
  902. * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
  903. */
  904. static void follow_mount(struct path *path)
  905. {
  906. while (d_mountpoint(path->dentry)) {
  907. struct vfsmount *mounted = lookup_mnt(path);
  908. if (!mounted)
  909. break;
  910. dput(path->dentry);
  911. mntput(path->mnt);
  912. path->mnt = mounted;
  913. path->dentry = dget(mounted->mnt_root);
  914. }
  915. }
  916. static void follow_dotdot(struct nameidata *nd)
  917. {
  918. set_root(nd);
  919. while(1) {
  920. struct dentry *old = nd->path.dentry;
  921. if (nd->path.dentry == nd->root.dentry &&
  922. nd->path.mnt == nd->root.mnt) {
  923. break;
  924. }
  925. if (nd->path.dentry != nd->path.mnt->mnt_root) {
  926. /* rare case of legitimate dget_parent()... */
  927. nd->path.dentry = dget_parent(nd->path.dentry);
  928. dput(old);
  929. break;
  930. }
  931. if (!follow_up(&nd->path))
  932. break;
  933. }
  934. follow_mount(&nd->path);
  935. nd->inode = nd->path.dentry->d_inode;
  936. }
  937. /*
  938. * Allocate a dentry with name and parent, and perform a parent
  939. * directory ->lookup on it. Returns the new dentry, or ERR_PTR
  940. * on error. parent->d_inode->i_mutex must be held. d_lookup must
  941. * have verified that no child exists while under i_mutex.
  942. */
  943. static struct dentry *d_alloc_and_lookup(struct dentry *parent,
  944. struct qstr *name, struct nameidata *nd)
  945. {
  946. struct inode *inode = parent->d_inode;
  947. struct dentry *dentry;
  948. struct dentry *old;
  949. /* Don't create child dentry for a dead directory. */
  950. if (unlikely(IS_DEADDIR(inode)))
  951. return ERR_PTR(-ENOENT);
  952. dentry = d_alloc(parent, name);
  953. if (unlikely(!dentry))
  954. return ERR_PTR(-ENOMEM);
  955. old = inode->i_op->lookup(inode, dentry, nd);
  956. if (unlikely(old)) {
  957. dput(dentry);
  958. dentry = old;
  959. }
  960. return dentry;
  961. }
  962. /*
  963. * We already have a dentry, but require a lookup to be performed on the parent
  964. * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error.
  965. * parent->d_inode->i_mutex must be held. d_lookup must have verified that no
  966. * child exists while under i_mutex.
  967. */
  968. static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry,
  969. struct nameidata *nd)
  970. {
  971. struct inode *inode = parent->d_inode;
  972. struct dentry *old;
  973. /* Don't create child dentry for a dead directory. */
  974. if (unlikely(IS_DEADDIR(inode)))
  975. return ERR_PTR(-ENOENT);
  976. old = inode->i_op->lookup(inode, dentry, nd);
  977. if (unlikely(old)) {
  978. dput(dentry);
  979. dentry = old;
  980. }
  981. return dentry;
  982. }
  983. /*
  984. * It's more convoluted than I'd like it to be, but... it's still fairly
  985. * small and for now I'd prefer to have fast path as straight as possible.
  986. * It _is_ time-critical.
  987. */
  988. static int do_lookup(struct nameidata *nd, struct qstr *name,
  989. struct path *path, struct inode **inode)
  990. {
  991. struct vfsmount *mnt = nd->path.mnt;
  992. struct dentry *dentry, *parent = nd->path.dentry;
  993. int need_reval = 1;
  994. int status = 1;
  995. int err;
  996. /*
  997. * Rename seqlock is not required here because in the off chance
  998. * of a false negative due to a concurrent rename, we're going to
  999. * do the non-racy lookup, below.
  1000. */
  1001. if (nd->flags & LOOKUP_RCU) {
  1002. unsigned seq;
  1003. *inode = nd->inode;
  1004. dentry = __d_lookup_rcu(parent, name, &seq, inode);
  1005. if (!dentry)
  1006. goto unlazy;
  1007. /* Memory barrier in read_seqcount_begin of child is enough */
  1008. if (__read_seqcount_retry(&parent->d_seq, nd->seq))
  1009. return -ECHILD;
  1010. nd->seq = seq;
  1011. if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
  1012. status = d_revalidate(dentry, nd);
  1013. if (unlikely(status <= 0)) {
  1014. if (status != -ECHILD)
  1015. need_reval = 0;
  1016. goto unlazy;
  1017. }
  1018. }
  1019. if (unlikely(d_need_lookup(dentry)))
  1020. goto unlazy;
  1021. path->mnt = mnt;
  1022. path->dentry = dentry;
  1023. if (unlikely(!__follow_mount_rcu(nd, path, inode)))
  1024. goto unlazy;
  1025. if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
  1026. goto unlazy;
  1027. return 0;
  1028. unlazy:
  1029. if (unlazy_walk(nd, dentry))
  1030. return -ECHILD;
  1031. } else {
  1032. dentry = __d_lookup(parent, name);
  1033. }
  1034. if (dentry && unlikely(d_need_lookup(dentry))) {
  1035. dput(dentry);
  1036. dentry = NULL;
  1037. }
  1038. retry:
  1039. if (unlikely(!dentry)) {
  1040. struct inode *dir = parent->d_inode;
  1041. BUG_ON(nd->inode != dir);
  1042. mutex_lock(&dir->i_mutex);
  1043. dentry = d_lookup(parent, name);
  1044. if (likely(!dentry)) {
  1045. dentry = d_alloc_and_lookup(parent, name, nd);
  1046. if (IS_ERR(dentry)) {
  1047. mutex_unlock(&dir->i_mutex);
  1048. return PTR_ERR(dentry);
  1049. }
  1050. /* known good */
  1051. need_reval = 0;
  1052. status = 1;
  1053. } else if (unlikely(d_need_lookup(dentry))) {
  1054. dentry = d_inode_lookup(parent, dentry, nd);
  1055. if (IS_ERR(dentry)) {
  1056. mutex_unlock(&dir->i_mutex);
  1057. return PTR_ERR(dentry);
  1058. }
  1059. /* known good */
  1060. need_reval = 0;
  1061. status = 1;
  1062. }
  1063. mutex_unlock(&dir->i_mutex);
  1064. }
  1065. if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
  1066. status = d_revalidate(dentry, nd);
  1067. if (unlikely(status <= 0)) {
  1068. if (status < 0) {
  1069. dput(dentry);
  1070. return status;
  1071. }
  1072. if (!d_invalidate(dentry)) {
  1073. dput(dentry);
  1074. dentry = NULL;
  1075. need_reval = 1;
  1076. goto retry;
  1077. }
  1078. }
  1079. path->mnt = mnt;
  1080. path->dentry = dentry;
  1081. err = follow_managed(path, nd->flags);
  1082. if (unlikely(err < 0)) {
  1083. path_put_conditional(path, nd);
  1084. return err;
  1085. }
  1086. if (err)
  1087. nd->flags |= LOOKUP_JUMPED;
  1088. *inode = path->dentry->d_inode;
  1089. return 0;
  1090. }
  1091. static inline int may_lookup(struct nameidata *nd)
  1092. {
  1093. if (nd->flags & LOOKUP_RCU) {
  1094. int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
  1095. if (err != -ECHILD)
  1096. return err;
  1097. if (unlazy_walk(nd, NULL))
  1098. return -ECHILD;
  1099. }
  1100. return inode_permission(nd->inode, MAY_EXEC);
  1101. }
  1102. static inline int handle_dots(struct nameidata *nd, int type)
  1103. {
  1104. if (type == LAST_DOTDOT) {
  1105. if (nd->flags & LOOKUP_RCU) {
  1106. if (follow_dotdot_rcu(nd))
  1107. return -ECHILD;
  1108. } else
  1109. follow_dotdot(nd);
  1110. }
  1111. return 0;
  1112. }
  1113. static void terminate_walk(struct nameidata *nd)
  1114. {
  1115. if (!(nd->flags & LOOKUP_RCU)) {
  1116. path_put(&nd->path);
  1117. } else {
  1118. nd->flags &= ~LOOKUP_RCU;
  1119. if (!(nd->flags & LOOKUP_ROOT))
  1120. nd->root.mnt = NULL;
  1121. rcu_read_unlock();
  1122. br_read_unlock(vfsmount_lock);
  1123. }
  1124. }
  1125. /*
  1126. * Do we need to follow links? We _really_ want to be able
  1127. * to do this check without having to look at inode->i_op,
  1128. * so we keep a cache of "no, this doesn't need follow_link"
  1129. * for the common case.
  1130. */
  1131. static inline int should_follow_link(struct inode *inode, int follow)
  1132. {
  1133. if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
  1134. if (likely(inode->i_op->follow_link))
  1135. return follow;
  1136. /* This gets set once for the inode lifetime */
  1137. spin_lock(&inode->i_lock);
  1138. inode->i_opflags |= IOP_NOFOLLOW;
  1139. spin_unlock(&inode->i_lock);
  1140. }
  1141. return 0;
  1142. }
  1143. static inline int walk_component(struct nameidata *nd, struct path *path,
  1144. struct qstr *name, int type, int follow)
  1145. {
  1146. struct inode *inode;
  1147. int err;
  1148. /*
  1149. * "." and ".." are special - ".." especially so because it has
  1150. * to be able to know about the current root directory and
  1151. * parent relationships.
  1152. */
  1153. if (unlikely(type != LAST_NORM))
  1154. return handle_dots(nd, type);
  1155. err = do_lookup(nd, name, path, &inode);
  1156. if (unlikely(err)) {
  1157. terminate_walk(nd);
  1158. return err;
  1159. }
  1160. if (!inode) {
  1161. path_to_nameidata(path, nd);
  1162. terminate_walk(nd);
  1163. return -ENOENT;
  1164. }
  1165. if (should_follow_link(inode, follow)) {
  1166. if (nd->flags & LOOKUP_RCU) {
  1167. if (unlikely(unlazy_walk(nd, path->dentry))) {
  1168. terminate_walk(nd);
  1169. return -ECHILD;
  1170. }
  1171. }
  1172. BUG_ON(inode != path->dentry->d_inode);
  1173. return 1;
  1174. }
  1175. path_to_nameidata(path, nd);
  1176. nd->inode = inode;
  1177. return 0;
  1178. }
  1179. /*
  1180. * This limits recursive symlink follows to 8, while
  1181. * limiting consecutive symlinks to 40.
  1182. *
  1183. * Without that kind of total limit, nasty chains of consecutive
  1184. * symlinks can cause almost arbitrarily long lookups.
  1185. */
  1186. static inline int nested_symlink(struct path *path, struct nameidata *nd)
  1187. {
  1188. int res;
  1189. if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
  1190. path_put_conditional(path, nd);
  1191. path_put(&nd->path);
  1192. return -ELOOP;
  1193. }
  1194. BUG_ON(nd->depth >= MAX_NESTED_LINKS);
  1195. nd->depth++;
  1196. current->link_count++;
  1197. do {
  1198. struct path link = *path;
  1199. void *cookie;
  1200. res = follow_link(&link, nd, &cookie);
  1201. if (!res)
  1202. res = walk_component(nd, path, &nd->last,
  1203. nd->last_type, LOOKUP_FOLLOW);
  1204. put_link(nd, &link, cookie);
  1205. } while (res > 0);
  1206. current->link_count--;
  1207. nd->depth--;
  1208. return res;
  1209. }
  1210. /*
  1211. * We really don't want to look at inode->i_op->lookup
  1212. * when we don't have to. So we keep a cache bit in
  1213. * the inode ->i_opflags field that says "yes, we can
  1214. * do lookup on this inode".
  1215. */
  1216. static inline int can_lookup(struct inode *inode)
  1217. {
  1218. if (likely(inode->i_opflags & IOP_LOOKUP))
  1219. return 1;
  1220. if (likely(!inode->i_op->lookup))
  1221. return 0;
  1222. /* We do this once for the lifetime of the inode */
  1223. spin_lock(&inode->i_lock);
  1224. inode->i_opflags |= IOP_LOOKUP;
  1225. spin_unlock(&inode->i_lock);
  1226. return 1;
  1227. }
  1228. /*
  1229. * Name resolution.
  1230. * This is the basic name resolution function, turning a pathname into
  1231. * the final dentry. We expect 'base' to be positive and a directory.
  1232. *
  1233. * Returns 0 and nd will have valid dentry and mnt on success.
  1234. * Returns error and drops reference to input namei data on failure.
  1235. */
  1236. static int link_path_walk(const char *name, struct nameidata *nd)
  1237. {
  1238. struct path next;
  1239. int err;
  1240. while (*name=='/')
  1241. name++;
  1242. if (!*name)
  1243. return 0;
  1244. /* At this point we know we have a real path component. */
  1245. for(;;) {
  1246. unsigned long hash;
  1247. struct qstr this;
  1248. unsigned int c;
  1249. int type;
  1250. err = may_lookup(nd);
  1251. if (err)
  1252. break;
  1253. this.name = name;
  1254. c = *(const unsigned char *)name;
  1255. hash = init_name_hash();
  1256. do {
  1257. name++;
  1258. hash = partial_name_hash(c, hash);
  1259. c = *(const unsigned char *)name;
  1260. } while (c && (c != '/'));
  1261. this.len = name - (const char *) this.name;
  1262. this.hash = end_name_hash(hash);
  1263. type = LAST_NORM;
  1264. if (this.name[0] == '.') switch (this.len) {
  1265. case 2:
  1266. if (this.name[1] == '.') {
  1267. type = LAST_DOTDOT;
  1268. nd->flags |= LOOKUP_JUMPED;
  1269. }
  1270. break;
  1271. case 1:
  1272. type = LAST_DOT;
  1273. }
  1274. if (likely(type == LAST_NORM)) {
  1275. struct dentry *parent = nd->path.dentry;
  1276. nd->flags &= ~LOOKUP_JUMPED;
  1277. if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
  1278. err = parent->d_op->d_hash(parent, nd->inode,
  1279. &this);
  1280. if (err < 0)
  1281. break;
  1282. }
  1283. }
  1284. /* remove trailing slashes? */
  1285. if (!c)
  1286. goto last_component;
  1287. while (*++name == '/');
  1288. if (!*name)
  1289. goto last_component;
  1290. err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
  1291. if (err < 0)
  1292. return err;
  1293. if (err) {
  1294. err = nested_symlink(&next, nd);
  1295. if (err)
  1296. return err;
  1297. }
  1298. if (can_lookup(nd->inode))
  1299. continue;
  1300. err = -ENOTDIR;
  1301. break;
  1302. /* here ends the main loop */
  1303. last_component:
  1304. nd->last = this;
  1305. nd->last_type = type;
  1306. return 0;
  1307. }
  1308. terminate_walk(nd);
  1309. return err;
  1310. }
  1311. static int path_init(int dfd, const char *name, unsigned int flags,
  1312. struct nameidata *nd, struct file **fp)
  1313. {
  1314. int retval = 0;
  1315. int fput_needed;
  1316. struct file *file;
  1317. nd->last_type = LAST_ROOT; /* if there are only slashes... */
  1318. nd->flags = flags | LOOKUP_JUMPED;
  1319. nd->depth = 0;
  1320. if (flags & LOOKUP_ROOT) {
  1321. struct inode *inode = nd->root.dentry->d_inode;
  1322. if (*name) {
  1323. if (!inode->i_op->lookup)
  1324. return -ENOTDIR;
  1325. retval = inode_permission(inode, MAY_EXEC);
  1326. if (retval)
  1327. return retval;
  1328. }
  1329. nd->path = nd->root;
  1330. nd->inode = inode;
  1331. if (flags & LOOKUP_RCU) {
  1332. br_read_lock(vfsmount_lock);
  1333. rcu_read_lock();
  1334. nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
  1335. } else {
  1336. path_get(&nd->path);
  1337. }
  1338. return 0;
  1339. }
  1340. nd->root.mnt = NULL;
  1341. if (*name=='/') {
  1342. if (flags & LOOKUP_RCU) {
  1343. br_read_lock(vfsmount_lock);
  1344. rcu_read_lock();
  1345. set_root_rcu(nd);
  1346. } else {
  1347. set_root(nd);
  1348. path_get(&nd->root);
  1349. }
  1350. nd->path = nd->root;
  1351. } else if (dfd == AT_FDCWD) {
  1352. if (flags & LOOKUP_RCU) {
  1353. struct fs_struct *fs = current->fs;
  1354. unsigned seq;
  1355. br_read_lock(vfsmount_lock);
  1356. rcu_read_lock();
  1357. do {
  1358. seq = read_seqcount_begin(&fs->seq);
  1359. nd->path = fs->pwd;
  1360. nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
  1361. } while (read_seqcount_retry(&fs->seq, seq));
  1362. } else {
  1363. get_fs_pwd(current->fs, &nd->path);
  1364. }
  1365. } else {
  1366. struct dentry *dentry;
  1367. file = fget_raw_light(dfd, &fput_needed);
  1368. retval = -EBADF;
  1369. if (!file)
  1370. goto out_fail;
  1371. dentry = file->f_path.dentry;
  1372. if (*name) {
  1373. retval = -ENOTDIR;
  1374. if (!S_ISDIR(dentry->d_inode->i_mode))
  1375. goto fput_fail;
  1376. retval = inode_permission(dentry->d_inode, MAY_EXEC);
  1377. if (retval)
  1378. goto fput_fail;
  1379. }
  1380. nd->path = file->f_path;
  1381. if (flags & LOOKUP_RCU) {
  1382. if (fput_needed)
  1383. *fp = file;
  1384. nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
  1385. br_read_lock(vfsmount_lock);
  1386. rcu_read_lock();
  1387. } else {
  1388. path_get(&file->f_path);
  1389. fput_light(file, fput_needed);
  1390. }
  1391. }
  1392. nd->inode = nd->path.dentry->d_inode;
  1393. return 0;
  1394. fput_fail:
  1395. fput_light(file, fput_needed);
  1396. out_fail:
  1397. return retval;
  1398. }
  1399. static inline int lookup_last(struct nameidata *nd, struct path *path)
  1400. {
  1401. if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
  1402. nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
  1403. nd->flags &= ~LOOKUP_PARENT;
  1404. return walk_component(nd, path, &nd->last, nd->last_type,
  1405. nd->flags & LOOKUP_FOLLOW);
  1406. }
  1407. /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
  1408. static int path_lookupat(int dfd, const char *name,
  1409. unsigned int flags, struct nameidata *nd)
  1410. {
  1411. struct file *base = NULL;
  1412. struct path path;
  1413. int err;
  1414. /*
  1415. * Path walking is largely split up into 2 different synchronisation
  1416. * schemes, rcu-walk and ref-walk (explained in
  1417. * Documentation/filesystems/path-lookup.txt). These share much of the
  1418. * path walk code, but some things particularly setup, cleanup, and
  1419. * following mounts are sufficiently divergent that functions are
  1420. * duplicated. Typically there is a function foo(), and its RCU
  1421. * analogue, foo_rcu().
  1422. *
  1423. * -ECHILD is the error number of choice (just to avoid clashes) that
  1424. * is returned if some aspect of an rcu-walk fails. Such an error must
  1425. * be handled by restarting a traditional ref-walk (which will always
  1426. * be able to complete).
  1427. */
  1428. err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
  1429. if (unlikely(err))
  1430. return err;
  1431. current->total_link_count = 0;
  1432. err = link_path_walk(name, nd);
  1433. if (!err && !(flags & LOOKUP_PARENT)) {
  1434. err = lookup_last(nd, &path);
  1435. while (err > 0) {
  1436. void *cookie;
  1437. struct path link = path;
  1438. nd->flags |= LOOKUP_PARENT;
  1439. err = follow_link(&link, nd, &cookie);
  1440. if (!err)
  1441. err = lookup_last(nd, &path);
  1442. put_link(nd, &link, cookie);
  1443. }
  1444. }
  1445. if (!err)
  1446. err = complete_walk(nd);
  1447. if (!err && nd->flags & LOOKUP_DIRECTORY) {
  1448. if (!nd->inode->i_op->lookup) {
  1449. path_put(&nd->path);
  1450. err = -ENOTDIR;
  1451. }
  1452. }
  1453. if (base)
  1454. fput(base);
  1455. if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
  1456. path_put(&nd->root);
  1457. nd->root.mnt = NULL;
  1458. }
  1459. return err;
  1460. }
  1461. static int do_path_lookup(int dfd, const char *name,
  1462. unsigned int flags, struct nameidata *nd)
  1463. {
  1464. int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
  1465. if (unlikely(retval == -ECHILD))
  1466. retval = path_lookupat(dfd, name, flags, nd);
  1467. if (unlikely(retval == -ESTALE))
  1468. retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
  1469. if (likely(!retval)) {
  1470. if (unlikely(!audit_dummy_context())) {
  1471. if (nd->path.dentry && nd->inode)
  1472. audit_inode(name, nd->path.dentry);
  1473. }
  1474. }
  1475. return retval;
  1476. }
  1477. int kern_path_parent(const char *name, struct nameidata *nd)
  1478. {
  1479. return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd);
  1480. }
  1481. int kern_path(const char *name, unsigned int flags, struct path *path)
  1482. {
  1483. struct nameidata nd;
  1484. int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
  1485. if (!res)
  1486. *path = nd.path;
  1487. return res;
  1488. }
  1489. /**
  1490. * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
  1491. * @dentry: pointer to dentry of the base directory
  1492. * @mnt: pointer to vfs mount of the base directory
  1493. * @name: pointer to file name
  1494. * @flags: lookup flags
  1495. * @path: pointer to struct path to fill
  1496. */
  1497. int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
  1498. const char *name, unsigned int flags,
  1499. struct path *path)
  1500. {
  1501. struct nameidata nd;
  1502. int err;
  1503. nd.root.dentry = dentry;
  1504. nd.root.mnt = mnt;
  1505. BUG_ON(flags & LOOKUP_PARENT);
  1506. /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
  1507. err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
  1508. if (!err)
  1509. *path = nd.path;
  1510. return err;
  1511. }
  1512. static struct dentry *__lookup_hash(struct qstr *name,
  1513. struct dentry *base, struct nameidata *nd)
  1514. {
  1515. struct inode *inode = base->d_inode;
  1516. struct dentry *dentry;
  1517. int err;
  1518. err = inode_permission(inode, MAY_EXEC);
  1519. if (err)
  1520. return ERR_PTR(err);
  1521. /*
  1522. * Don't bother with __d_lookup: callers are for creat as
  1523. * well as unlink, so a lot of the time it would cost
  1524. * a double lookup.
  1525. */
  1526. dentry = d_lookup(base, name);
  1527. if (dentry && d_need_lookup(dentry)) {
  1528. /*
  1529. * __lookup_hash is called with the parent dir's i_mutex already
  1530. * held, so we are good to go here.
  1531. */
  1532. dentry = d_inode_lookup(base, dentry, nd);
  1533. if (IS_ERR(dentry))
  1534. return dentry;
  1535. }
  1536. if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) {
  1537. int status = d_revalidate(dentry, nd);
  1538. if (unlikely(status <= 0)) {
  1539. /*
  1540. * The dentry failed validation.
  1541. * If d_revalidate returned 0 attempt to invalidate
  1542. * the dentry otherwise d_revalidate is asking us
  1543. * to return a fail status.
  1544. */
  1545. if (status < 0) {
  1546. dput(dentry);
  1547. return ERR_PTR(status);
  1548. } else if (!d_invalidate(dentry)) {
  1549. dput(dentry);
  1550. dentry = NULL;
  1551. }
  1552. }
  1553. }
  1554. if (!dentry)
  1555. dentry = d_alloc_and_lookup(base, name, nd);
  1556. return dentry;
  1557. }
  1558. /*
  1559. * Restricted form of lookup. Doesn't follow links, single-component only,
  1560. * needs parent already locked. Doesn't follow mounts.
  1561. * SMP-safe.
  1562. */
  1563. static struct dentry *lookup_hash(struct nameidata *nd)
  1564. {
  1565. return __lookup_hash(&nd->last, nd->path.dentry, nd);
  1566. }
  1567. /**
  1568. * lookup_one_len - filesystem helper to lookup single pathname component
  1569. * @name: pathname component to lookup
  1570. * @base: base directory to lookup from
  1571. * @len: maximum length @len should be interpreted to
  1572. *
  1573. * Note that this routine is purely a helper for filesystem usage and should
  1574. * not be called by generic code. Also note that by using this function the
  1575. * nameidata argument is passed to the filesystem methods and a filesystem
  1576. * using this helper needs to be prepared for that.
  1577. */
  1578. struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
  1579. {
  1580. struct qstr this;
  1581. unsigned long hash;
  1582. unsigned int c;
  1583. WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
  1584. this.name = name;
  1585. this.len = len;
  1586. if (!len)
  1587. return ERR_PTR(-EACCES);
  1588. hash = init_name_hash();
  1589. while (len--) {
  1590. c = *(const unsigned char *)name++;
  1591. if (c == '/' || c == '\0')
  1592. return ERR_PTR(-EACCES);
  1593. hash = partial_name_hash(c, hash);
  1594. }
  1595. this.hash = end_name_hash(hash);
  1596. /*
  1597. * See if the low-level filesystem might want
  1598. * to use its own hash..
  1599. */
  1600. if (base->d_flags & DCACHE_OP_HASH) {
  1601. int err = base->d_op->d_hash(base, base->d_inode, &this);
  1602. if (err < 0)
  1603. return ERR_PTR(err);
  1604. }
  1605. return __lookup_hash(&this, base, NULL);
  1606. }
  1607. int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
  1608. struct path *path, int *empty)
  1609. {
  1610. struct nameidata nd;
  1611. char *tmp = getname_flags(name, flags, empty);
  1612. int err = PTR_ERR(tmp);
  1613. if (!IS_ERR(tmp)) {
  1614. BUG_ON(flags & LOOKUP_PARENT);
  1615. err = do_path_lookup(dfd, tmp, flags, &nd);
  1616. putname(tmp);
  1617. if (!err)
  1618. *path = nd.path;
  1619. }
  1620. return err;
  1621. }
  1622. int user_path_at(int dfd, const char __user *name, unsigned flags,
  1623. struct path *path)
  1624. {
  1625. return user_path_at_empty(dfd, name, flags, path, 0);
  1626. }
  1627. static int user_path_parent(int dfd, const char __user *path,
  1628. struct nameidata *nd, char **name)
  1629. {
  1630. char *s = getname(path);
  1631. int error;
  1632. if (IS_ERR(s))
  1633. return PTR_ERR(s);
  1634. error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd);
  1635. if (error)
  1636. putname(s);
  1637. else
  1638. *name = s;
  1639. return error;
  1640. }
  1641. /*
  1642. * It's inline, so penalty for filesystems that don't use sticky bit is
  1643. * minimal.
  1644. */
  1645. static inline int check_sticky(struct inode *dir, struct inode *inode)
  1646. {
  1647. uid_t fsuid = current_fsuid();
  1648. if (!(dir->i_mode & S_ISVTX))
  1649. return 0;
  1650. if (current_user_ns() != inode_userns(inode))
  1651. goto other_userns;
  1652. if (inode->i_uid == fsuid)
  1653. return 0;
  1654. if (dir->i_uid == fsuid)
  1655. return 0;
  1656. other_userns:
  1657. return !ns_capable(inode_userns(inode), CAP_FOWNER);
  1658. }
  1659. /*
  1660. * Check whether we can remove a link victim from directory dir, check
  1661. * whether the type of victim is right.
  1662. * 1. We can't do it if dir is read-only (done in permission())
  1663. * 2. We should have write and exec permissions on dir
  1664. * 3. We can't remove anything from append-only dir
  1665. * 4. We can't do anything with immutable dir (done in permission())
  1666. * 5. If the sticky bit on dir is set we should either
  1667. * a. be owner of dir, or
  1668. * b. be owner of victim, or
  1669. * c. have CAP_FOWNER capability
  1670. * 6. If the victim is append-only or immutable we can't do antyhing with
  1671. * links pointing to it.
  1672. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
  1673. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
  1674. * 9. We can't remove a root or mountpoint.
  1675. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
  1676. * nfs_async_unlink().
  1677. */
  1678. static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
  1679. {
  1680. int error;
  1681. if (!victim->d_inode)
  1682. return -ENOENT;
  1683. BUG_ON(victim->d_parent->d_inode != dir);
  1684. audit_inode_child(victim, dir);
  1685. error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
  1686. if (error)
  1687. return error;
  1688. if (IS_APPEND(dir))
  1689. return -EPERM;
  1690. if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
  1691. IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
  1692. return -EPERM;
  1693. if (isdir) {
  1694. if (!S_ISDIR(victim->d_inode->i_mode))
  1695. return -ENOTDIR;
  1696. if (IS_ROOT(victim))
  1697. return -EBUSY;
  1698. } else if (S_ISDIR(victim->d_inode->i_mode))
  1699. return -EISDIR;
  1700. if (IS_DEADDIR(dir))
  1701. return -ENOENT;
  1702. if (victim->d_flags & DCACHE_NFSFS_RENAMED)
  1703. return -EBUSY;
  1704. return 0;
  1705. }
  1706. /* Check whether we can create an object with dentry child in directory
  1707. * dir.
  1708. * 1. We can't do it if child already exists (open has special treatment for
  1709. * this case, but since we are inlined it's OK)
  1710. * 2. We can't do it if dir is read-only (done in permission())
  1711. * 3. We should have write and exec permissions on dir
  1712. * 4. We can't do it if dir is immutable (done in permission())
  1713. */
  1714. static inline int may_create(struct inode *dir, struct dentry *child)
  1715. {
  1716. if (child->d_inode)
  1717. return -EEXIST;
  1718. if (IS_DEADDIR(dir))
  1719. return -ENOENT;
  1720. return inode_permission(dir, MAY_WRITE | MAY_EXEC);
  1721. }
  1722. /*
  1723. * p1 and p2 should be directories on the same fs.
  1724. */
  1725. struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
  1726. {
  1727. struct dentry *p;
  1728. if (p1 == p2) {
  1729. mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
  1730. return NULL;
  1731. }
  1732. mutex_lock(&p1->d_inode->i_sb->s_vfs_rena