PageRenderTime 30ms CodeModel.GetById 11ms app.highlight 16ms RepoModel.GetById 1ms app.codeStats 0ms

/filesystems/unixfs/common/unixfs/unixfs_internal.c

http://macfuse.googlecode.com/
C | 284 lines | 226 code | 39 blank | 19 comment | 58 complexity | 2269d7422c11aa186c32cf5f791f0156 MD5 | raw file
  1/*
  2 * UnixFS
  3 *
  4 * A general-purpose file system layer for writing/reimplementing/porting
  5 * Unix file systems through MacFUSE.
  6
  7 * Copyright (c) 2008 Amit Singh. All Rights Reserved.
  8 * http://osxbook.com
  9 */
 10
 11/*
 12 * XXX: This is very ad hoc right now. I made it "work" only for the
 13 * demos. Do not rely on this for read-write support (yet).
 14 */
 15
 16#include "unixfs_internal.h"
 17#include <stdio.h>
 18#include <stdlib.h>
 19#include <errno.h>
 20
 21static int desirednodes = 65536;
 22static pthread_mutex_t ihash_lock;
 23static LIST_HEAD(ihash_head, inode) *ihash_table = NULL;
 24typedef struct ihash_head ihash_head;
 25static size_t ihash_count = 0;
 26static size_t iprivsize = 0;
 27
 28static u_long ihash_mask;
 29
 30static ihash_head*
 31unixfs_inodelayer_firstfromhash(ino_t ino)
 32{
 33    return (ihash_head*)&ihash_table[ino & ihash_mask];
 34}
 35
 36int
 37unixfs_inodelayer_init(size_t privsize)
 38{
 39    if (!UNIXFS_ENABLE_INODEHASH)
 40        return 0;
 41
 42    if (pthread_mutex_init(&ihash_lock, (const pthread_mutexattr_t*)0)) {    
 43        fprintf(stderr, "failed to initialize the inode layer lock\n");
 44        return -1;
 45    }
 46
 47    iprivsize = privsize;
 48
 49    int i;
 50    u_long hashsize;
 51    LIST_HEAD(generic, generic) *hashtbl;
 52
 53    for (hashsize = 1; hashsize <= desirednodes; hashsize <<= 1)
 54            continue;
 55
 56    hashsize >>= 1;
 57
 58    hashtbl = (struct generic *)malloc(hashsize * sizeof(*hashtbl));
 59    if (hashtbl != NULL) {
 60        for (i = 0; i < hashsize; i++)
 61            LIST_INIT(&hashtbl[i]);
 62         ihash_mask = hashsize - 1;
 63         ihash_table = (struct ihash_head *)hashtbl;
 64    }
 65
 66    if (ihash_table == NULL) {
 67        (void)pthread_mutex_destroy(&ihash_lock);
 68        return -1;
 69    }
 70    
 71    return 0;
 72}
 73
 74void
 75unixfs_inodelayer_fini(void)
 76{
 77    if (!UNIXFS_ENABLE_INODEHASH)
 78        return;
 79
 80    if (ihash_table != NULL) {
 81        if (ihash_count != 0) {
 82            fprintf(stderr,
 83                    "*** warning: ihash terminated when not empty (%lu)\n",
 84                    (unsigned long)ihash_count);
 85
 86            int node_index = 0;
 87            u_long ihash_index = 0;
 88            for (; ihash_index < ihash_mask; ihash_index++) {
 89                struct inode* ip;
 90                LIST_FOREACH(ip, &ihash_table[ihash_index], I_hashlink) {
 91                    fprintf(stderr, "*** warning: inode %llu still present\n",
 92                            (ino64_t)ip->I_number);
 93                    node_index++;
 94                }
 95            }
 96        }
 97
 98        u_long i;
 99        for (i = 0; i < (ihash_mask + 1); i++) {
100            if (ihash_table[i].lh_first != NULL)
101                fprintf(stderr,
102                        "*** warning: found ihash_table[%lu].lh_first = %p\n",
103                        i, ihash_table[i].lh_first);
104        }
105        free(ihash_table);
106        ihash_table = NULL;
107    }
108
109    (void)pthread_mutex_destroy(&ihash_lock);
110}
111
112struct inode *
113unixfs_inodelayer_iget(ino_t ino)
114{
115    if (!UNIXFS_ENABLE_INODEHASH) {
116        struct inode* new_node = calloc(1, sizeof(struct inode) + iprivsize);
117        if (new_node == NULL)
118            return NULL;
119        new_node->I_number = ino;
120        if (iprivsize)
121            new_node->I_private = (void*)&((struct inode *)new_node)[1];
122        return new_node;
123    }
124
125    struct inode* this_node = NULL;
126    struct inode* new_node = NULL;
127    int needs_unlock = 1;
128    int err;
129
130    pthread_mutex_lock(&ihash_lock);
131
132    do {
133        err = EAGAIN;
134        this_node = LIST_FIRST(unixfs_inodelayer_firstfromhash(ino));
135        while (this_node != NULL) {
136            if (this_node->I_number == ino)
137                break;
138            this_node = LIST_NEXT(this_node, I_hashlink);
139        }
140
141        if (this_node == NULL) {
142            if (new_node == NULL) {
143                pthread_mutex_unlock(&ihash_lock);
144                new_node = calloc(1, sizeof(struct inode) + iprivsize);
145                if (new_node == NULL) {
146                    err = ENOMEM;
147                } else {
148                    new_node->I_number = ino;
149                    if (iprivsize)
150                        new_node->I_private =
151                            (void*)&((struct inode *)new_node)[1];
152                    (void)pthread_cond_init(&new_node->I_state_cond,
153                                            (const pthread_condattr_t*)0);
154                }
155                pthread_mutex_lock(&ihash_lock);
156            } else {
157                LIST_INSERT_HEAD(unixfs_inodelayer_firstfromhash(ino),
158                                 new_node, I_hashlink);
159                ihash_count++;
160                this_node = new_node;
161                new_node = NULL;
162            }
163        }
164
165        if (this_node != NULL) {
166            if (this_node->I_attachoutstanding) {
167                this_node->I_waiting = 1;
168                this_node->I_count++; /* XXX See comment below. */
169                while (this_node->I_attachoutstanding) {
170                    int ret = pthread_cond_wait(&this_node->I_state_cond,
171                                                 &ihash_lock);
172                    if (ret) {
173                        fprintf(stderr, "lock %p failed for inode %llu\n",
174                                &this_node->I_state_cond, (ino64_t)ino);
175                        abort();
176                    }
177                }
178                pthread_mutex_unlock(&ihash_lock); /* XXX See comment below. */
179                err = needs_unlock = 0; /* XXX See comment below. */
180                /*
181                 * XXX Yes, this comment. There's a subtlety here. This logic
182                 * will work only for a read-only file system. If the hash
183                 * table could change while we were sleeping, we must loop
184                 * again.
185                 */
186            } else if (this_node->I_initialized == 0) {
187                this_node->I_count++;
188                this_node->I_attachoutstanding = 1;
189                pthread_mutex_unlock(&ihash_lock);
190                err = needs_unlock = 0;
191            } else {
192                this_node->I_count++;
193                pthread_mutex_unlock(&ihash_lock);
194                err = needs_unlock = 0;
195            }
196        }
197
198    } while (err == EAGAIN);
199
200    if (needs_unlock)
201        pthread_mutex_unlock(&ihash_lock);
202
203    if (new_node != NULL)
204        free(new_node);
205        
206    return this_node;
207}
208
209void
210unixfs_inodelayer_isucceeded(struct inode* ip)
211{
212    if (!UNIXFS_ENABLE_INODEHASH)
213        return;
214
215    pthread_mutex_lock(&ihash_lock);
216    ip->I_initialized = 1;
217    ip->I_attachoutstanding = 0;
218    if (ip->I_waiting) {
219        ip->I_waiting = 0;
220        pthread_cond_broadcast(&ip->I_state_cond);
221    }
222    pthread_mutex_unlock(&ihash_lock);
223}
224
225void
226unixfs_inodelayer_ifailed(struct inode* ip)
227{
228    if (!UNIXFS_ENABLE_INODEHASH)
229        return;
230
231    pthread_mutex_lock(&ihash_lock);
232    LIST_REMOVE(ip, I_hashlink);
233    ip->I_initialized = 0;
234    ip->I_attachoutstanding = 0;
235    if (ip->I_waiting) {
236        ip->I_waiting = 0;
237        pthread_cond_broadcast(&ip->I_state_cond);
238    }
239    ihash_count--;
240    pthread_mutex_unlock(&ihash_lock);
241    (void)pthread_cond_destroy(&ip->I_state_cond);
242    free(ip);
243}
244
245void
246unixfs_inodelayer_iput(struct inode* ip)
247{
248    if (!UNIXFS_ENABLE_INODEHASH) {
249        free(ip);
250        return;
251    }
252
253    pthread_mutex_lock(&ihash_lock);
254    ip->I_count--;
255    if (ip->I_count == 0) {
256        LIST_REMOVE(ip, I_hashlink);
257        ihash_count--;
258        pthread_mutex_unlock(&ihash_lock);
259        (void)pthread_cond_destroy(&ip->I_state_cond);
260        free(ip);
261    } else
262        pthread_mutex_unlock(&ihash_lock);
263}
264
265void
266unixfs_inodelayer_dump(unixfs_inodelayer_iterator_t it)
267{
268    pthread_mutex_lock(&ihash_lock);
269
270    int node_index = 0;
271    u_long ihash_index = 0;
272
273    for (; ihash_index < ihash_mask; ihash_index++) {
274        struct inode* ip;
275        LIST_FOREACH(ip, &ihash_table[ihash_index], I_hashlink) {
276            if (it(ip, ip->I_private) != 0)
277                goto out;
278            node_index++;
279        }
280    }
281
282out:
283    pthread_mutex_unlock(&ihash_lock);
284}