/kern_oII/fs/notify/inotify/inotify_fsnotify.c

http://omnia2droid.googlecode.com/ · C · 168 lines · 95 code · 28 blank · 45 comment · 9 complexity · 1651b51c28a3a7cce78302468fefb8c8 MD5 · raw file

  1. /*
  2. * fs/inotify_user.c - inotify support for userspace
  3. *
  4. * Authors:
  5. * John McCutchan <ttb@tentacle.dhs.org>
  6. * Robert Love <rml@novell.com>
  7. *
  8. * Copyright (C) 2005 John McCutchan
  9. * Copyright 2006 Hewlett-Packard Development Company, L.P.
  10. *
  11. * Copyright (C) 2009 Eric Paris <Red Hat Inc>
  12. * inotify was largely rewriten to make use of the fsnotify infrastructure
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License as published by the
  16. * Free Software Foundation; either version 2, or (at your option) any
  17. * later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. */
  24. #include <linux/fs.h> /* struct inode */
  25. #include <linux/fsnotify_backend.h>
  26. #include <linux/inotify.h>
  27. #include <linux/path.h> /* struct path */
  28. #include <linux/slab.h> /* kmem_* */
  29. #include <linux/types.h>
  30. #include "inotify.h"
  31. static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
  32. {
  33. struct fsnotify_mark_entry *entry;
  34. struct inotify_inode_mark_entry *ientry;
  35. struct inode *to_tell;
  36. struct inotify_event_private_data *event_priv;
  37. struct fsnotify_event_private_data *fsn_event_priv;
  38. int wd, ret;
  39. to_tell = event->to_tell;
  40. spin_lock(&to_tell->i_lock);
  41. entry = fsnotify_find_mark_entry(group, to_tell);
  42. spin_unlock(&to_tell->i_lock);
  43. /* race with watch removal? We already passes should_send */
  44. if (unlikely(!entry))
  45. return 0;
  46. ientry = container_of(entry, struct inotify_inode_mark_entry,
  47. fsn_entry);
  48. wd = ientry->wd;
  49. event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
  50. if (unlikely(!event_priv))
  51. return -ENOMEM;
  52. fsn_event_priv = &event_priv->fsnotify_event_priv_data;
  53. fsn_event_priv->group = group;
  54. event_priv->wd = wd;
  55. ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
  56. if (ret) {
  57. inotify_free_event_priv(fsn_event_priv);
  58. /* EEXIST says we tail matched, EOVERFLOW isn't something
  59. * to report up the stack. */
  60. if ((ret == -EEXIST) ||
  61. (ret == -EOVERFLOW))
  62. ret = 0;
  63. }
  64. /*
  65. * If we hold the entry until after the event is on the queue
  66. * IN_IGNORED won't be able to pass this event in the queue
  67. */
  68. fsnotify_put_mark(entry);
  69. return ret;
  70. }
  71. static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
  72. {
  73. inotify_ignored_and_remove_idr(entry, group);
  74. }
  75. static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
  76. {
  77. struct fsnotify_mark_entry *entry;
  78. bool send;
  79. spin_lock(&inode->i_lock);
  80. entry = fsnotify_find_mark_entry(group, inode);
  81. spin_unlock(&inode->i_lock);
  82. if (!entry)
  83. return false;
  84. mask = (mask & ~FS_EVENT_ON_CHILD);
  85. send = (entry->mask & mask);
  86. /* find took a reference */
  87. fsnotify_put_mark(entry);
  88. return send;
  89. }
  90. /*
  91. * This is NEVER supposed to be called. Inotify marks should either have been
  92. * removed from the idr when the watch was removed or in the
  93. * fsnotify_destroy_mark_by_group() call when the inotify instance was being
  94. * torn down. This is only called if the idr is about to be freed but there
  95. * are still marks in it.
  96. */
  97. static int idr_callback(int id, void *p, void *data)
  98. {
  99. struct fsnotify_mark_entry *entry;
  100. struct inotify_inode_mark_entry *ientry;
  101. static bool warned = false;
  102. if (warned)
  103. return 0;
  104. warned = false;
  105. entry = p;
  106. ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
  107. WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
  108. "idr. Probably leaking memory\n", id, p, data);
  109. /*
  110. * I'm taking the liberty of assuming that the mark in question is a
  111. * valid address and I'm dereferencing it. This might help to figure
  112. * out why we got here and the panic is no worse than the original
  113. * BUG() that was here.
  114. */
  115. if (entry)
  116. printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
  117. entry->group, entry->inode, ientry->wd);
  118. return 0;
  119. }
  120. static void inotify_free_group_priv(struct fsnotify_group *group)
  121. {
  122. /* ideally the idr is empty and we won't hit the BUG in teh callback */
  123. idr_for_each(&group->inotify_data.idr, idr_callback, group);
  124. idr_remove_all(&group->inotify_data.idr);
  125. idr_destroy(&group->inotify_data.idr);
  126. }
  127. void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
  128. {
  129. struct inotify_event_private_data *event_priv;
  130. event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
  131. fsnotify_event_priv_data);
  132. kmem_cache_free(event_priv_cachep, event_priv);
  133. }
  134. const struct fsnotify_ops inotify_fsnotify_ops = {
  135. .handle_event = inotify_handle_event,
  136. .should_send_event = inotify_should_send_event,
  137. .free_group_priv = inotify_free_group_priv,
  138. .free_event_priv = inotify_free_event_priv,
  139. .freeing_mark = inotify_freeing_mark,
  140. };