/filesystems/unixfs/common/unixfs/unixfs_internal.c

http://macfuse.googlecode.com/ · C · 284 lines · 226 code · 39 blank · 19 comment · 58 complexity · 2269d7422c11aa186c32cf5f791f0156 MD5 · raw file

  1. /*
  2. * UnixFS
  3. *
  4. * A general-purpose file system layer for writing/reimplementing/porting
  5. * Unix file systems through MacFUSE.
  6. * Copyright (c) 2008 Amit Singh. All Rights Reserved.
  7. * http://osxbook.com
  8. */
  9. /*
  10. * XXX: This is very ad hoc right now. I made it "work" only for the
  11. * demos. Do not rely on this for read-write support (yet).
  12. */
  13. #include "unixfs_internal.h"
  14. #include <stdio.h>
  15. #include <stdlib.h>
  16. #include <errno.h>
  17. static int desirednodes = 65536;
  18. static pthread_mutex_t ihash_lock;
  19. static LIST_HEAD(ihash_head, inode) *ihash_table = NULL;
  20. typedef struct ihash_head ihash_head;
  21. static size_t ihash_count = 0;
  22. static size_t iprivsize = 0;
  23. static u_long ihash_mask;
  24. static ihash_head*
  25. unixfs_inodelayer_firstfromhash(ino_t ino)
  26. {
  27. return (ihash_head*)&ihash_table[ino & ihash_mask];
  28. }
  29. int
  30. unixfs_inodelayer_init(size_t privsize)
  31. {
  32. if (!UNIXFS_ENABLE_INODEHASH)
  33. return 0;
  34. if (pthread_mutex_init(&ihash_lock, (const pthread_mutexattr_t*)0)) {
  35. fprintf(stderr, "failed to initialize the inode layer lock\n");
  36. return -1;
  37. }
  38. iprivsize = privsize;
  39. int i;
  40. u_long hashsize;
  41. LIST_HEAD(generic, generic) *hashtbl;
  42. for (hashsize = 1; hashsize <= desirednodes; hashsize <<= 1)
  43. continue;
  44. hashsize >>= 1;
  45. hashtbl = (struct generic *)malloc(hashsize * sizeof(*hashtbl));
  46. if (hashtbl != NULL) {
  47. for (i = 0; i < hashsize; i++)
  48. LIST_INIT(&hashtbl[i]);
  49. ihash_mask = hashsize - 1;
  50. ihash_table = (struct ihash_head *)hashtbl;
  51. }
  52. if (ihash_table == NULL) {
  53. (void)pthread_mutex_destroy(&ihash_lock);
  54. return -1;
  55. }
  56. return 0;
  57. }
  58. void
  59. unixfs_inodelayer_fini(void)
  60. {
  61. if (!UNIXFS_ENABLE_INODEHASH)
  62. return;
  63. if (ihash_table != NULL) {
  64. if (ihash_count != 0) {
  65. fprintf(stderr,
  66. "*** warning: ihash terminated when not empty (%lu)\n",
  67. (unsigned long)ihash_count);
  68. int node_index = 0;
  69. u_long ihash_index = 0;
  70. for (; ihash_index < ihash_mask; ihash_index++) {
  71. struct inode* ip;
  72. LIST_FOREACH(ip, &ihash_table[ihash_index], I_hashlink) {
  73. fprintf(stderr, "*** warning: inode %llu still present\n",
  74. (ino64_t)ip->I_number);
  75. node_index++;
  76. }
  77. }
  78. }
  79. u_long i;
  80. for (i = 0; i < (ihash_mask + 1); i++) {
  81. if (ihash_table[i].lh_first != NULL)
  82. fprintf(stderr,
  83. "*** warning: found ihash_table[%lu].lh_first = %p\n",
  84. i, ihash_table[i].lh_first);
  85. }
  86. free(ihash_table);
  87. ihash_table = NULL;
  88. }
  89. (void)pthread_mutex_destroy(&ihash_lock);
  90. }
  91. struct inode *
  92. unixfs_inodelayer_iget(ino_t ino)
  93. {
  94. if (!UNIXFS_ENABLE_INODEHASH) {
  95. struct inode* new_node = calloc(1, sizeof(struct inode) + iprivsize);
  96. if (new_node == NULL)
  97. return NULL;
  98. new_node->I_number = ino;
  99. if (iprivsize)
  100. new_node->I_private = (void*)&((struct inode *)new_node)[1];
  101. return new_node;
  102. }
  103. struct inode* this_node = NULL;
  104. struct inode* new_node = NULL;
  105. int needs_unlock = 1;
  106. int err;
  107. pthread_mutex_lock(&ihash_lock);
  108. do {
  109. err = EAGAIN;
  110. this_node = LIST_FIRST(unixfs_inodelayer_firstfromhash(ino));
  111. while (this_node != NULL) {
  112. if (this_node->I_number == ino)
  113. break;
  114. this_node = LIST_NEXT(this_node, I_hashlink);
  115. }
  116. if (this_node == NULL) {
  117. if (new_node == NULL) {
  118. pthread_mutex_unlock(&ihash_lock);
  119. new_node = calloc(1, sizeof(struct inode) + iprivsize);
  120. if (new_node == NULL) {
  121. err = ENOMEM;
  122. } else {
  123. new_node->I_number = ino;
  124. if (iprivsize)
  125. new_node->I_private =
  126. (void*)&((struct inode *)new_node)[1];
  127. (void)pthread_cond_init(&new_node->I_state_cond,
  128. (const pthread_condattr_t*)0);
  129. }
  130. pthread_mutex_lock(&ihash_lock);
  131. } else {
  132. LIST_INSERT_HEAD(unixfs_inodelayer_firstfromhash(ino),
  133. new_node, I_hashlink);
  134. ihash_count++;
  135. this_node = new_node;
  136. new_node = NULL;
  137. }
  138. }
  139. if (this_node != NULL) {
  140. if (this_node->I_attachoutstanding) {
  141. this_node->I_waiting = 1;
  142. this_node->I_count++; /* XXX See comment below. */
  143. while (this_node->I_attachoutstanding) {
  144. int ret = pthread_cond_wait(&this_node->I_state_cond,
  145. &ihash_lock);
  146. if (ret) {
  147. fprintf(stderr, "lock %p failed for inode %llu\n",
  148. &this_node->I_state_cond, (ino64_t)ino);
  149. abort();
  150. }
  151. }
  152. pthread_mutex_unlock(&ihash_lock); /* XXX See comment below. */
  153. err = needs_unlock = 0; /* XXX See comment below. */
  154. /*
  155. * XXX Yes, this comment. There's a subtlety here. This logic
  156. * will work only for a read-only file system. If the hash
  157. * table could change while we were sleeping, we must loop
  158. * again.
  159. */
  160. } else if (this_node->I_initialized == 0) {
  161. this_node->I_count++;
  162. this_node->I_attachoutstanding = 1;
  163. pthread_mutex_unlock(&ihash_lock);
  164. err = needs_unlock = 0;
  165. } else {
  166. this_node->I_count++;
  167. pthread_mutex_unlock(&ihash_lock);
  168. err = needs_unlock = 0;
  169. }
  170. }
  171. } while (err == EAGAIN);
  172. if (needs_unlock)
  173. pthread_mutex_unlock(&ihash_lock);
  174. if (new_node != NULL)
  175. free(new_node);
  176. return this_node;
  177. }
  178. void
  179. unixfs_inodelayer_isucceeded(struct inode* ip)
  180. {
  181. if (!UNIXFS_ENABLE_INODEHASH)
  182. return;
  183. pthread_mutex_lock(&ihash_lock);
  184. ip->I_initialized = 1;
  185. ip->I_attachoutstanding = 0;
  186. if (ip->I_waiting) {
  187. ip->I_waiting = 0;
  188. pthread_cond_broadcast(&ip->I_state_cond);
  189. }
  190. pthread_mutex_unlock(&ihash_lock);
  191. }
  192. void
  193. unixfs_inodelayer_ifailed(struct inode* ip)
  194. {
  195. if (!UNIXFS_ENABLE_INODEHASH)
  196. return;
  197. pthread_mutex_lock(&ihash_lock);
  198. LIST_REMOVE(ip, I_hashlink);
  199. ip->I_initialized = 0;
  200. ip->I_attachoutstanding = 0;
  201. if (ip->I_waiting) {
  202. ip->I_waiting = 0;
  203. pthread_cond_broadcast(&ip->I_state_cond);
  204. }
  205. ihash_count--;
  206. pthread_mutex_unlock(&ihash_lock);
  207. (void)pthread_cond_destroy(&ip->I_state_cond);
  208. free(ip);
  209. }
  210. void
  211. unixfs_inodelayer_iput(struct inode* ip)
  212. {
  213. if (!UNIXFS_ENABLE_INODEHASH) {
  214. free(ip);
  215. return;
  216. }
  217. pthread_mutex_lock(&ihash_lock);
  218. ip->I_count--;
  219. if (ip->I_count == 0) {
  220. LIST_REMOVE(ip, I_hashlink);
  221. ihash_count--;
  222. pthread_mutex_unlock(&ihash_lock);
  223. (void)pthread_cond_destroy(&ip->I_state_cond);
  224. free(ip);
  225. } else
  226. pthread_mutex_unlock(&ihash_lock);
  227. }
  228. void
  229. unixfs_inodelayer_dump(unixfs_inodelayer_iterator_t it)
  230. {
  231. pthread_mutex_lock(&ihash_lock);
  232. int node_index = 0;
  233. u_long ihash_index = 0;
  234. for (; ihash_index < ihash_mask; ihash_index++) {
  235. struct inode* ip;
  236. LIST_FOREACH(ip, &ihash_table[ihash_index], I_hashlink) {
  237. if (it(ip, ip->I_private) != 0)
  238. goto out;
  239. node_index++;
  240. }
  241. }
  242. out:
  243. pthread_mutex_unlock(&ihash_lock);
  244. }