PageRenderTime 41ms CodeModel.GetById 12ms RepoModel.GetById 0ms app.codeStats 0ms

/fs/cachefiles/namei.c

https://github.com/mstsirkin/linux
C | 988 lines | 697 code | 165 blank | 126 comment | 129 complexity | 77f3b868f961a583b1c1958ae55bc590 MD5 | raw file
  1. /* CacheFiles path walking and related routines
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/file.h>
  14. #include <linux/fs.h>
  15. #include <linux/fsnotify.h>
  16. #include <linux/quotaops.h>
  17. #include <linux/xattr.h>
  18. #include <linux/mount.h>
  19. #include <linux/namei.h>
  20. #include <linux/security.h>
  21. #include <linux/slab.h>
  22. #include "internal.h"
  23. #define CACHEFILES_KEYBUF_SIZE 512
  24. /*
  25. * dump debugging info about an object
  26. */
  27. static noinline
  28. void __cachefiles_printk_object(struct cachefiles_object *object,
  29. const char *prefix,
  30. u8 *keybuf)
  31. {
  32. struct fscache_cookie *cookie;
  33. unsigned keylen, loop;
  34. printk(KERN_ERR "%sobject: OBJ%x\n",
  35. prefix, object->fscache.debug_id);
  36. printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
  37. prefix, fscache_object_states[object->fscache.state],
  38. object->fscache.flags, work_busy(&object->fscache.work),
  39. object->fscache.events,
  40. object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
  41. printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
  42. prefix, object->fscache.n_ops, object->fscache.n_in_progress,
  43. object->fscache.n_exclusive);
  44. printk(KERN_ERR "%sparent=%p\n",
  45. prefix, object->fscache.parent);
  46. spin_lock(&object->fscache.lock);
  47. cookie = object->fscache.cookie;
  48. if (cookie) {
  49. printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
  50. prefix,
  51. object->fscache.cookie,
  52. object->fscache.cookie->parent,
  53. object->fscache.cookie->netfs_data,
  54. object->fscache.cookie->flags);
  55. if (keybuf)
  56. keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
  57. CACHEFILES_KEYBUF_SIZE);
  58. else
  59. keylen = 0;
  60. } else {
  61. printk(KERN_ERR "%scookie=NULL\n", prefix);
  62. keylen = 0;
  63. }
  64. spin_unlock(&object->fscache.lock);
  65. if (keylen) {
  66. printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
  67. for (loop = 0; loop < keylen; loop++)
  68. printk("%02x", keybuf[loop]);
  69. printk("'\n");
  70. }
  71. }
  72. /*
  73. * dump debugging info about a pair of objects
  74. */
  75. static noinline void cachefiles_printk_object(struct cachefiles_object *object,
  76. struct cachefiles_object *xobject)
  77. {
  78. u8 *keybuf;
  79. keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
  80. if (object)
  81. __cachefiles_printk_object(object, "", keybuf);
  82. if (xobject)
  83. __cachefiles_printk_object(xobject, "x", keybuf);
  84. kfree(keybuf);
  85. }
  86. /*
  87. * mark the owner of a dentry, if there is one, to indicate that that dentry
  88. * has been preemptively deleted
  89. * - the caller must hold the i_mutex on the dentry's parent as required to
  90. * call vfs_unlink(), vfs_rmdir() or vfs_rename()
  91. */
  92. static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
  93. struct dentry *dentry)
  94. {
  95. struct cachefiles_object *object;
  96. struct rb_node *p;
  97. _enter(",'%*.*s'",
  98. dentry->d_name.len, dentry->d_name.len, dentry->d_name.name);
  99. write_lock(&cache->active_lock);
  100. p = cache->active_nodes.rb_node;
  101. while (p) {
  102. object = rb_entry(p, struct cachefiles_object, active_node);
  103. if (object->dentry > dentry)
  104. p = p->rb_left;
  105. else if (object->dentry < dentry)
  106. p = p->rb_right;
  107. else
  108. goto found_dentry;
  109. }
  110. write_unlock(&cache->active_lock);
  111. _leave(" [no owner]");
  112. return;
  113. /* found the dentry for */
  114. found_dentry:
  115. kdebug("preemptive burial: OBJ%x [%s] %p",
  116. object->fscache.debug_id,
  117. fscache_object_states[object->fscache.state],
  118. dentry);
  119. if (object->fscache.state < FSCACHE_OBJECT_DYING) {
  120. printk(KERN_ERR "\n");
  121. printk(KERN_ERR "CacheFiles: Error:"
  122. " Can't preemptively bury live object\n");
  123. cachefiles_printk_object(object, NULL);
  124. } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
  125. printk(KERN_ERR "CacheFiles: Error:"
  126. " Object already preemptively buried\n");
  127. }
  128. write_unlock(&cache->active_lock);
  129. _leave(" [owner marked]");
  130. }
  131. /*
  132. * record the fact that an object is now active
  133. */
  134. static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
  135. struct cachefiles_object *object)
  136. {
  137. struct cachefiles_object *xobject;
  138. struct rb_node **_p, *_parent = NULL;
  139. struct dentry *dentry;
  140. _enter(",%p", object);
  141. try_again:
  142. write_lock(&cache->active_lock);
  143. if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
  144. printk(KERN_ERR "CacheFiles: Error: Object already active\n");
  145. cachefiles_printk_object(object, NULL);
  146. BUG();
  147. }
  148. dentry = object->dentry;
  149. _p = &cache->active_nodes.rb_node;
  150. while (*_p) {
  151. _parent = *_p;
  152. xobject = rb_entry(_parent,
  153. struct cachefiles_object, active_node);
  154. ASSERT(xobject != object);
  155. if (xobject->dentry > dentry)
  156. _p = &(*_p)->rb_left;
  157. else if (xobject->dentry < dentry)
  158. _p = &(*_p)->rb_right;
  159. else
  160. goto wait_for_old_object;
  161. }
  162. rb_link_node(&object->active_node, _parent, _p);
  163. rb_insert_color(&object->active_node, &cache->active_nodes);
  164. write_unlock(&cache->active_lock);
  165. _leave(" = 0");
  166. return 0;
  167. /* an old object from a previous incarnation is hogging the slot - we
  168. * need to wait for it to be destroyed */
  169. wait_for_old_object:
  170. if (xobject->fscache.state < FSCACHE_OBJECT_DYING) {
  171. printk(KERN_ERR "\n");
  172. printk(KERN_ERR "CacheFiles: Error:"
  173. " Unexpected object collision\n");
  174. cachefiles_printk_object(object, xobject);
  175. BUG();
  176. }
  177. atomic_inc(&xobject->usage);
  178. write_unlock(&cache->active_lock);
  179. if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
  180. wait_queue_head_t *wq;
  181. signed long timeout = 60 * HZ;
  182. wait_queue_t wait;
  183. bool requeue;
  184. /* if the object we're waiting for is queued for processing,
  185. * then just put ourselves on the queue behind it */
  186. if (work_pending(&xobject->fscache.work)) {
  187. _debug("queue OBJ%x behind OBJ%x immediately",
  188. object->fscache.debug_id,
  189. xobject->fscache.debug_id);
  190. goto requeue;
  191. }
  192. /* otherwise we sleep until either the object we're waiting for
  193. * is done, or the fscache_object is congested */
  194. wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
  195. init_wait(&wait);
  196. requeue = false;
  197. do {
  198. prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
  199. if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
  200. break;
  201. requeue = fscache_object_sleep_till_congested(&timeout);
  202. } while (timeout > 0 && !requeue);
  203. finish_wait(wq, &wait);
  204. if (requeue &&
  205. test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
  206. _debug("queue OBJ%x behind OBJ%x after wait",
  207. object->fscache.debug_id,
  208. xobject->fscache.debug_id);
  209. goto requeue;
  210. }
  211. if (timeout <= 0) {
  212. printk(KERN_ERR "\n");
  213. printk(KERN_ERR "CacheFiles: Error: Overlong"
  214. " wait for old active object to go away\n");
  215. cachefiles_printk_object(object, xobject);
  216. goto requeue;
  217. }
  218. }
  219. ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
  220. cache->cache.ops->put_object(&xobject->fscache);
  221. goto try_again;
  222. requeue:
  223. clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
  224. cache->cache.ops->put_object(&xobject->fscache);
  225. _leave(" = -ETIMEDOUT");
  226. return -ETIMEDOUT;
  227. }
  228. /*
  229. * delete an object representation from the cache
  230. * - file backed objects are unlinked
  231. * - directory backed objects are stuffed into the graveyard for userspace to
  232. * delete
  233. * - unlocks the directory mutex
  234. */
  235. static int cachefiles_bury_object(struct cachefiles_cache *cache,
  236. struct dentry *dir,
  237. struct dentry *rep,
  238. bool preemptive)
  239. {
  240. struct dentry *grave, *trap;
  241. struct path path, path_to_graveyard;
  242. char nbuffer[8 + 8 + 1];
  243. int ret;
  244. _enter(",'%*.*s','%*.*s'",
  245. dir->d_name.len, dir->d_name.len, dir->d_name.name,
  246. rep->d_name.len, rep->d_name.len, rep->d_name.name);
  247. _debug("remove %p from %p", rep, dir);
  248. /* non-directories can just be unlinked */
  249. if (!S_ISDIR(rep->d_inode->i_mode)) {
  250. _debug("unlink stale object");
  251. path.mnt = cache->mnt;
  252. path.dentry = dir;
  253. ret = security_path_unlink(&path, rep);
  254. if (ret < 0) {
  255. cachefiles_io_error(cache, "Unlink security error");
  256. } else {
  257. ret = vfs_unlink(dir->d_inode, rep);
  258. if (preemptive)
  259. cachefiles_mark_object_buried(cache, rep);
  260. }
  261. mutex_unlock(&dir->d_inode->i_mutex);
  262. if (ret == -EIO)
  263. cachefiles_io_error(cache, "Unlink failed");
  264. _leave(" = %d", ret);
  265. return ret;
  266. }
  267. /* directories have to be moved to the graveyard */
  268. _debug("move stale object to graveyard");
  269. mutex_unlock(&dir->d_inode->i_mutex);
  270. try_again:
  271. /* first step is to make up a grave dentry in the graveyard */
  272. sprintf(nbuffer, "%08x%08x",
  273. (uint32_t) get_seconds(),
  274. (uint32_t) atomic_inc_return(&cache->gravecounter));
  275. /* do the multiway lock magic */
  276. trap = lock_rename(cache->graveyard, dir);
  277. /* do some checks before getting the grave dentry */
  278. if (rep->d_parent != dir) {
  279. /* the entry was probably culled when we dropped the parent dir
  280. * lock */
  281. unlock_rename(cache->graveyard, dir);
  282. _leave(" = 0 [culled?]");
  283. return 0;
  284. }
  285. if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
  286. unlock_rename(cache->graveyard, dir);
  287. cachefiles_io_error(cache, "Graveyard no longer a directory");
  288. return -EIO;
  289. }
  290. if (trap == rep) {
  291. unlock_rename(cache->graveyard, dir);
  292. cachefiles_io_error(cache, "May not make directory loop");
  293. return -EIO;
  294. }
  295. if (d_mountpoint(rep)) {
  296. unlock_rename(cache->graveyard, dir);
  297. cachefiles_io_error(cache, "Mountpoint in cache");
  298. return -EIO;
  299. }
  300. grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
  301. if (IS_ERR(grave)) {
  302. unlock_rename(cache->graveyard, dir);
  303. if (PTR_ERR(grave) == -ENOMEM) {
  304. _leave(" = -ENOMEM");
  305. return -ENOMEM;
  306. }
  307. cachefiles_io_error(cache, "Lookup error %ld",
  308. PTR_ERR(grave));
  309. return -EIO;
  310. }
  311. if (grave->d_inode) {
  312. unlock_rename(cache->graveyard, dir);
  313. dput(grave);
  314. grave = NULL;
  315. cond_resched();
  316. goto try_again;
  317. }
  318. if (d_mountpoint(grave)) {
  319. unlock_rename(cache->graveyard, dir);
  320. dput(grave);
  321. cachefiles_io_error(cache, "Mountpoint in graveyard");
  322. return -EIO;
  323. }
  324. /* target should not be an ancestor of source */
  325. if (trap == grave) {
  326. unlock_rename(cache->graveyard, dir);
  327. dput(grave);
  328. cachefiles_io_error(cache, "May not make directory loop");
  329. return -EIO;
  330. }
  331. /* attempt the rename */
  332. path.mnt = cache->mnt;
  333. path.dentry = dir;
  334. path_to_graveyard.mnt = cache->mnt;
  335. path_to_graveyard.dentry = cache->graveyard;
  336. ret = security_path_rename(&path, rep, &path_to_graveyard, grave);
  337. if (ret < 0) {
  338. cachefiles_io_error(cache, "Rename security error %d", ret);
  339. } else {
  340. ret = vfs_rename(dir->d_inode, rep,
  341. cache->graveyard->d_inode, grave);
  342. if (ret != 0 && ret != -ENOMEM)
  343. cachefiles_io_error(cache,
  344. "Rename failed with error %d", ret);
  345. if (preemptive)
  346. cachefiles_mark_object_buried(cache, rep);
  347. }
  348. unlock_rename(cache->graveyard, dir);
  349. dput(grave);
  350. _leave(" = 0");
  351. return 0;
  352. }
  353. /*
  354. * delete an object representation from the cache
  355. */
  356. int cachefiles_delete_object(struct cachefiles_cache *cache,
  357. struct cachefiles_object *object)
  358. {
  359. struct dentry *dir;
  360. int ret;
  361. _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
  362. ASSERT(object->dentry);
  363. ASSERT(object->dentry->d_inode);
  364. ASSERT(object->dentry->d_parent);
  365. dir = dget_parent(object->dentry);
  366. mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
  367. if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
  368. /* object allocation for the same key preemptively deleted this
  369. * object's file so that it could create its own file */
  370. _debug("object preemptively buried");
  371. mutex_unlock(&dir->d_inode->i_mutex);
  372. ret = 0;
  373. } else {
  374. /* we need to check that our parent is _still_ our parent - it
  375. * may have been renamed */
  376. if (dir == object->dentry->d_parent) {
  377. ret = cachefiles_bury_object(cache, dir,
  378. object->dentry, false);
  379. } else {
  380. /* it got moved, presumably by cachefilesd culling it,
  381. * so it's no longer in the key path and we can ignore
  382. * it */
  383. mutex_unlock(&dir->d_inode->i_mutex);
  384. ret = 0;
  385. }
  386. }
  387. dput(dir);
  388. _leave(" = %d", ret);
  389. return ret;
  390. }
  391. /*
  392. * walk from the parent object to the child object through the backing
  393. * filesystem, creating directories as we go
  394. */
  395. int cachefiles_walk_to_object(struct cachefiles_object *parent,
  396. struct cachefiles_object *object,
  397. const char *key,
  398. struct cachefiles_xattr *auxdata)
  399. {
  400. struct cachefiles_cache *cache;
  401. struct dentry *dir, *next = NULL;
  402. struct path path;
  403. unsigned long start;
  404. const char *name;
  405. int ret, nlen;
  406. _enter("OBJ%x{%p},OBJ%x,%s,",
  407. parent->fscache.debug_id, parent->dentry,
  408. object->fscache.debug_id, key);
  409. cache = container_of(parent->fscache.cache,
  410. struct cachefiles_cache, cache);
  411. path.mnt = cache->mnt;
  412. ASSERT(parent->dentry);
  413. ASSERT(parent->dentry->d_inode);
  414. if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
  415. // TODO: convert file to dir
  416. _leave("looking up in none directory");
  417. return -ENOBUFS;
  418. }
  419. dir = dget(parent->dentry);
  420. advance:
  421. /* attempt to transit the first directory component */
  422. name = key;
  423. nlen = strlen(key);
  424. /* key ends in a double NUL */
  425. key = key + nlen + 1;
  426. if (!*key)
  427. key = NULL;
  428. lookup_again:
  429. /* search the current directory for the element name */
  430. _debug("lookup '%s'", name);
  431. mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
  432. start = jiffies;
  433. next = lookup_one_len(name, dir, nlen);
  434. cachefiles_hist(cachefiles_lookup_histogram, start);
  435. if (IS_ERR(next))
  436. goto lookup_error;
  437. _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative");
  438. if (!key)
  439. object->new = !next->d_inode;
  440. /* if this element of the path doesn't exist, then the lookup phase
  441. * failed, and we can release any readers in the certain knowledge that
  442. * there's nothing for them to actually read */
  443. if (!next->d_inode)
  444. fscache_object_lookup_negative(&object->fscache);
  445. /* we need to create the object if it's negative */
  446. if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
  447. /* index objects and intervening tree levels must be subdirs */
  448. if (!next->d_inode) {
  449. ret = cachefiles_has_space(cache, 1, 0);
  450. if (ret < 0)
  451. goto create_error;
  452. path.dentry = dir;
  453. ret = security_path_mkdir(&path, next, 0);
  454. if (ret < 0)
  455. goto create_error;
  456. start = jiffies;
  457. ret = vfs_mkdir(dir->d_inode, next, 0);
  458. cachefiles_hist(cachefiles_mkdir_histogram, start);
  459. if (ret < 0)
  460. goto create_error;
  461. ASSERT(next->d_inode);
  462. _debug("mkdir -> %p{%p{ino=%lu}}",
  463. next, next->d_inode, next->d_inode->i_ino);
  464. } else if (!S_ISDIR(next->d_inode->i_mode)) {
  465. kerror("inode %lu is not a directory",
  466. next->d_inode->i_ino);
  467. ret = -ENOBUFS;
  468. goto error;
  469. }
  470. } else {
  471. /* non-index objects start out life as files */
  472. if (!next->d_inode) {
  473. ret = cachefiles_has_space(cache, 1, 0);
  474. if (ret < 0)
  475. goto create_error;
  476. path.dentry = dir;
  477. ret = security_path_mknod(&path, next, S_IFREG, 0);
  478. if (ret < 0)
  479. goto create_error;
  480. start = jiffies;
  481. ret = vfs_create(dir->d_inode, next, S_IFREG, NULL);
  482. cachefiles_hist(cachefiles_create_histogram, start);
  483. if (ret < 0)
  484. goto create_error;
  485. ASSERT(next->d_inode);
  486. _debug("create -> %p{%p{ino=%lu}}",
  487. next, next->d_inode, next->d_inode->i_ino);
  488. } else if (!S_ISDIR(next->d_inode->i_mode) &&
  489. !S_ISREG(next->d_inode->i_mode)
  490. ) {
  491. kerror("inode %lu is not a file or directory",
  492. next->d_inode->i_ino);
  493. ret = -ENOBUFS;
  494. goto error;
  495. }
  496. }
  497. /* process the next component */
  498. if (key) {
  499. _debug("advance");
  500. mutex_unlock(&dir->d_inode->i_mutex);
  501. dput(dir);
  502. dir = next;
  503. next = NULL;
  504. goto advance;
  505. }
  506. /* we've found the object we were looking for */
  507. object->dentry = next;
  508. /* if we've found that the terminal object exists, then we need to
  509. * check its attributes and delete it if it's out of date */
  510. if (!object->new) {
  511. _debug("validate '%*.*s'",
  512. next->d_name.len, next->d_name.len, next->d_name.name);
  513. ret = cachefiles_check_object_xattr(object, auxdata);
  514. if (ret == -ESTALE) {
  515. /* delete the object (the deleter drops the directory
  516. * mutex) */
  517. object->dentry = NULL;
  518. ret = cachefiles_bury_object(cache, dir, next, true);
  519. dput(next);
  520. next = NULL;
  521. if (ret < 0)
  522. goto delete_error;
  523. _debug("redo lookup");
  524. goto lookup_again;
  525. }
  526. }
  527. /* note that we're now using this object */
  528. ret = cachefiles_mark_object_active(cache, object);
  529. mutex_unlock(&dir->d_inode->i_mutex);
  530. dput(dir);
  531. dir = NULL;
  532. if (ret == -ETIMEDOUT)
  533. goto mark_active_timed_out;
  534. _debug("=== OBTAINED_OBJECT ===");
  535. if (object->new) {
  536. /* attach data to a newly constructed terminal object */
  537. ret = cachefiles_set_object_xattr(object, auxdata);
  538. if (ret < 0)
  539. goto check_error;
  540. } else {
  541. /* always update the atime on an object we've just looked up
  542. * (this is used to keep track of culling, and atimes are only
  543. * updated by read, write and readdir but not lookup or
  544. * open) */
  545. touch_atime(cache->mnt, next);
  546. }
  547. /* open a file interface onto a data file */
  548. if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
  549. if (S_ISREG(object->dentry->d_inode->i_mode)) {
  550. const struct address_space_operations *aops;
  551. ret = -EPERM;
  552. aops = object->dentry->d_inode->i_mapping->a_ops;
  553. if (!aops->bmap)
  554. goto check_error;
  555. object->backer = object->dentry;
  556. } else {
  557. BUG(); // TODO: open file in data-class subdir
  558. }
  559. }
  560. object->new = 0;
  561. fscache_obtained_object(&object->fscache);
  562. _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino);
  563. return 0;
  564. create_error:
  565. _debug("create error %d", ret);
  566. if (ret == -EIO)
  567. cachefiles_io_error(cache, "Create/mkdir failed");
  568. goto error;
  569. mark_active_timed_out:
  570. _debug("mark active timed out");
  571. goto release_dentry;
  572. check_error:
  573. _debug("check error %d", ret);
  574. write_lock(&cache->active_lock);
  575. rb_erase(&object->active_node, &cache->active_nodes);
  576. clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
  577. wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
  578. write_unlock(&cache->active_lock);
  579. release_dentry:
  580. dput(object->dentry);
  581. object->dentry = NULL;
  582. goto error_out;
  583. delete_error:
  584. _debug("delete error %d", ret);
  585. goto error_out2;
  586. lookup_error:
  587. _debug("lookup error %ld", PTR_ERR(next));
  588. ret = PTR_ERR(next);
  589. if (ret == -EIO)
  590. cachefiles_io_error(cache, "Lookup failed");
  591. next = NULL;
  592. error:
  593. mutex_unlock(&dir->d_inode->i_mutex);
  594. dput(next);
  595. error_out2:
  596. dput(dir);
  597. error_out:
  598. _leave(" = error %d", -ret);
  599. return ret;
  600. }
  601. /*
  602. * get a subdirectory
  603. */
  604. struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
  605. struct dentry *dir,
  606. const char *dirname)
  607. {
  608. struct dentry *subdir;
  609. unsigned long start;
  610. struct path path;
  611. int ret;
  612. _enter(",,%s", dirname);
  613. /* search the current directory for the element name */
  614. mutex_lock(&dir->d_inode->i_mutex);
  615. start = jiffies;
  616. subdir = lookup_one_len(dirname, dir, strlen(dirname));
  617. cachefiles_hist(cachefiles_lookup_histogram, start);
  618. if (IS_ERR(subdir)) {
  619. if (PTR_ERR(subdir) == -ENOMEM)
  620. goto nomem_d_alloc;
  621. goto lookup_error;
  622. }
  623. _debug("subdir -> %p %s",
  624. subdir, subdir->d_inode ? "positive" : "negative");
  625. /* we need to create the subdir if it doesn't exist yet */
  626. if (!subdir->d_inode) {
  627. ret = cachefiles_has_space(cache, 1, 0);
  628. if (ret < 0)
  629. goto mkdir_error;
  630. _debug("attempt mkdir");
  631. path.mnt = cache->mnt;
  632. path.dentry = dir;
  633. ret = security_path_mkdir(&path, subdir, 0700);
  634. if (ret < 0)
  635. goto mkdir_error;
  636. ret = vfs_mkdir(dir->d_inode, subdir, 0700);
  637. if (ret < 0)
  638. goto mkdir_error;
  639. ASSERT(subdir->d_inode);
  640. _debug("mkdir -> %p{%p{ino=%lu}}",
  641. subdir,
  642. subdir->d_inode,
  643. subdir->d_inode->i_ino);
  644. }
  645. mutex_unlock(&dir->d_inode->i_mutex);
  646. /* we need to make sure the subdir is a directory */
  647. ASSERT(subdir->d_inode);
  648. if (!S_ISDIR(subdir->d_inode->i_mode)) {
  649. kerror("%s is not a directory", dirname);
  650. ret = -EIO;
  651. goto check_error;
  652. }
  653. ret = -EPERM;
  654. if (!subdir->d_inode->i_op ||
  655. !subdir->d_inode->i_op->setxattr ||
  656. !subdir->d_inode->i_op->getxattr ||
  657. !subdir->d_inode->i_op->lookup ||
  658. !subdir->d_inode->i_op->mkdir ||
  659. !subdir->d_inode->i_op->create ||
  660. !subdir->d_inode->i_op->rename ||
  661. !subdir->d_inode->i_op->rmdir ||
  662. !subdir->d_inode->i_op->unlink)
  663. goto check_error;
  664. _leave(" = [%lu]", subdir->d_inode->i_ino);
  665. return subdir;
  666. check_error:
  667. dput(subdir);
  668. _leave(" = %d [check]", ret);
  669. return ERR_PTR(ret);
  670. mkdir_error:
  671. mutex_unlock(&dir->d_inode->i_mutex);
  672. dput(subdir);
  673. kerror("mkdir %s failed with error %d", dirname, ret);
  674. return ERR_PTR(ret);
  675. lookup_error:
  676. mutex_unlock(&dir->d_inode->i_mutex);
  677. ret = PTR_ERR(subdir);
  678. kerror("Lookup %s failed with error %d", dirname, ret);
  679. return ERR_PTR(ret);
  680. nomem_d_alloc:
  681. mutex_unlock(&dir->d_inode->i_mutex);
  682. _leave(" = -ENOMEM");
  683. return ERR_PTR(-ENOMEM);
  684. }
  685. /*
  686. * find out if an object is in use or not
  687. * - if finds object and it's not in use:
  688. * - returns a pointer to the object and a reference on it
  689. * - returns with the directory locked
  690. */
  691. static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
  692. struct dentry *dir,
  693. char *filename)
  694. {
  695. struct cachefiles_object *object;
  696. struct rb_node *_n;
  697. struct dentry *victim;
  698. unsigned long start;
  699. int ret;
  700. //_enter(",%*.*s/,%s",
  701. // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
  702. /* look up the victim */
  703. mutex_lock_nested(&dir->d_inode->i_mutex, 1);
  704. start = jiffies;
  705. victim = lookup_one_len(filename, dir, strlen(filename));
  706. cachefiles_hist(cachefiles_lookup_histogram, start);
  707. if (IS_ERR(victim))
  708. goto lookup_error;
  709. //_debug("victim -> %p %s",
  710. // victim, victim->d_inode ? "positive" : "negative");
  711. /* if the object is no longer there then we probably retired the object
  712. * at the netfs's request whilst the cull was in progress
  713. */
  714. if (!victim->d_inode) {
  715. mutex_unlock(&dir->d_inode->i_mutex);
  716. dput(victim);
  717. _leave(" = -ENOENT [absent]");
  718. return ERR_PTR(-ENOENT);
  719. }
  720. /* check to see if we're using this object */
  721. read_lock(&cache->active_lock);
  722. _n = cache->active_nodes.rb_node;
  723. while (_n) {
  724. object = rb_entry(_n, struct cachefiles_object, active_node);
  725. if (object->dentry > victim)
  726. _n = _n->rb_left;
  727. else if (object->dentry < victim)
  728. _n = _n->rb_right;
  729. else
  730. goto object_in_use;
  731. }
  732. read_unlock(&cache->active_lock);
  733. //_leave(" = %p", victim);
  734. return victim;
  735. object_in_use:
  736. read_unlock(&cache->active_lock);
  737. mutex_unlock(&dir->d_inode->i_mutex);
  738. dput(victim);
  739. //_leave(" = -EBUSY [in use]");
  740. return ERR_PTR(-EBUSY);
  741. lookup_error:
  742. mutex_unlock(&dir->d_inode->i_mutex);
  743. ret = PTR_ERR(victim);
  744. if (ret == -ENOENT) {
  745. /* file or dir now absent - probably retired by netfs */
  746. _leave(" = -ESTALE [absent]");
  747. return ERR_PTR(-ESTALE);
  748. }
  749. if (ret == -EIO) {
  750. cachefiles_io_error(cache, "Lookup failed");
  751. } else if (ret != -ENOMEM) {
  752. kerror("Internal error: %d", ret);
  753. ret = -EIO;
  754. }
  755. _leave(" = %d", ret);
  756. return ERR_PTR(ret);
  757. }
  758. /*
  759. * cull an object if it's not in use
  760. * - called only by cache manager daemon
  761. */
  762. int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
  763. char *filename)
  764. {
  765. struct dentry *victim;
  766. int ret;
  767. _enter(",%*.*s/,%s",
  768. dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
  769. victim = cachefiles_check_active(cache, dir, filename);
  770. if (IS_ERR(victim))
  771. return PTR_ERR(victim);
  772. _debug("victim -> %p %s",
  773. victim, victim->d_inode ? "positive" : "negative");
  774. /* okay... the victim is not being used so we can cull it
  775. * - start by marking it as stale
  776. */
  777. _debug("victim is cullable");
  778. ret = cachefiles_remove_object_xattr(cache, victim);
  779. if (ret < 0)
  780. goto error_unlock;
  781. /* actually remove the victim (drops the dir mutex) */
  782. _debug("bury");
  783. ret = cachefiles_bury_object(cache, dir, victim, false);
  784. if (ret < 0)
  785. goto error;
  786. dput(victim);
  787. _leave(" = 0");
  788. return 0;
  789. error_unlock:
  790. mutex_unlock(&dir->d_inode->i_mutex);
  791. error:
  792. dput(victim);
  793. if (ret == -ENOENT) {
  794. /* file or dir now absent - probably retired by netfs */
  795. _leave(" = -ESTALE [absent]");
  796. return -ESTALE;
  797. }
  798. if (ret != -ENOMEM) {
  799. kerror("Internal error: %d", ret);
  800. ret = -EIO;
  801. }
  802. _leave(" = %d", ret);
  803. return ret;
  804. }
  805. /*
  806. * find out if an object is in use or not
  807. * - called only by cache manager daemon
  808. * - returns -EBUSY or 0 to indicate whether an object is in use or not
  809. */
  810. int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
  811. char *filename)
  812. {
  813. struct dentry *victim;
  814. //_enter(",%*.*s/,%s",
  815. // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
  816. victim = cachefiles_check_active(cache, dir, filename);
  817. if (IS_ERR(victim))
  818. return PTR_ERR(victim);
  819. mutex_unlock(&dir->d_inode->i_mutex);
  820. dput(victim);
  821. //_leave(" = 0");
  822. return 0;
  823. }