PageRenderTime 4352ms CodeModel.GetById 36ms RepoModel.GetById 0ms app.codeStats 0ms

/src/modules/HTTPLoop/cache.c

http://github.com/pikelang/Pike
C | 301 lines | 253 code | 30 blank | 18 comment | 46 complexity | b4c3a88f31238745f1393f608ae76a4c MD5 | raw file
Possible License(s): LGPL-2.1, MPL-2.0-no-copyleft-exception
  1. /*
  2. || This file is part of Pike. For copyright information see COPYRIGHT.
  3. || Pike is distributed under GPL, LGPL and MPL. See the file COPYING
  4. || for more information.
  5. */
  6. #include "config.h"
  7. #include <global.h>
  8. #include <threads.h>
  9. #include <stralloc.h>
  10. #ifdef _REENTRANT
  11. #include <errno.h>
  12. #ifdef HAVE_SYS_SOCKET_H
  13. #include <sys/socket.h>
  14. #endif
  15. #ifdef HAVE_NETINET_IN_H
  16. #include <netinet/in.h>
  17. #endif
  18. #ifdef HAVE_ARPA_INET_H
  19. #include <arpa/inet.h>
  20. #endif
  21. #include "pike_netlib.h"
  22. #include "accept_and_parse.h"
  23. #include "cache.h"
  24. #include "util.h"
  25. #include "backend.h"
  26. #include "pike_embed.h"
  27. struct cache *first_cache;
  28. static struct pike_string *free_queue[1024];
  29. static int numtofree;
  30. static PIKE_MUTEX_T tofree_mutex;
  31. static PIKE_MUTEX_T cache_entry_lock;
  32. int next_free_ce, num_cache_entries;
  33. struct cache_entry *free_cache_entries[1024];
  34. static void low_free_cache_entry( struct cache_entry *arg )
  35. {
  36. num_cache_entries--;
  37. aap_enqueue_string_to_free( arg->data );
  38. free( arg->url ); /* host is in the same malloced area */
  39. mt_lock( &cache_entry_lock );
  40. if( next_free_ce < 1024 )
  41. free_cache_entries[next_free_ce++] = arg;
  42. else
  43. free(arg);
  44. mt_unlock( &cache_entry_lock );
  45. /* fprintf(stderr, " %d+%d args\n", num_cache_entries, next_free_ce ); */
  46. }
  47. struct cache_entry *new_cache_entry( )
  48. {
  49. struct cache_entry *res;
  50. mt_lock( &cache_entry_lock );
  51. num_cache_entries++;
  52. if( next_free_ce )
  53. res = free_cache_entries[--next_free_ce];
  54. else
  55. res = malloc( sizeof( struct cache_entry ) );
  56. mt_unlock( &cache_entry_lock );
  57. /* fprintf(stderr, " %d+%d centries\n", num_cache_entries, next_free_ce ); */
  58. return res;
  59. }
  60. static void really_free_from_queue(void)
  61. /* Must have tofree lock and interpreter lock */
  62. {
  63. int i;
  64. for( i=0; i<numtofree; i++ )
  65. free_string( free_queue[i] );
  66. numtofree=0;
  67. }
  68. static int ensure_interpreter_lock(void)
  69. {
  70. struct thread_state *thi;
  71. int free=0;
  72. if( (thi = thread_state_for_id( th_self() )) )
  73. {
  74. if( thi->swapped ) /* We are swapped out.. */
  75. {
  76. low_mt_lock_interpreter(); /* Can run even if threads_disabled. */
  77. return 1;
  78. }
  79. return 0; /* we are swapped in */
  80. }
  81. /* we are not a pike thread */
  82. if( num_threads == 1 )
  83. free=num_threads++;
  84. wake_up_backend();
  85. low_mt_lock_interpreter(); /* Can run even if threads_disabled. */
  86. if( free )
  87. num_threads--;
  88. return 1;
  89. }
  90. static void free_from_queue(void)
  91. {
  92. /* We have the interpreter lock here, this is a backend callback */
  93. mt_lock( &tofree_mutex );
  94. really_free_from_queue();
  95. mt_unlock( &tofree_mutex );
  96. }
  97. void aap_enqueue_string_to_free( struct pike_string *s )
  98. {
  99. mt_lock( &tofree_mutex );
  100. if( numtofree > 1020 )
  101. {
  102. /* This should not happend all that often.
  103. * Almost never, actually.
  104. *
  105. * This only happends if 1020 different cache entries
  106. * have to be freed in one backend callback loop.
  107. *
  108. */
  109. int free_interpreter_lock = ensure_interpreter_lock();
  110. really_free_from_queue();
  111. if( free_interpreter_lock )
  112. mt_unlock_interpreter();
  113. }
  114. free_queue[ numtofree++ ] = s;
  115. mt_unlock( &tofree_mutex );
  116. }
  117. static size_t cache_hash(char *s, ptrdiff_t len)
  118. {
  119. size_t res = len * 9471111;
  120. while(len--) { res=res<<1 ^ ((res&(~0x7ffffff))>>31); res ^= s[len]; }
  121. return (res % CACHE_HTABLE_SIZE)/2;
  122. } /* ^^ OBS! */
  123. static void really_free_cache_entry(struct cache *c, struct cache_entry *e,
  124. struct cache_entry *prev, size_t b)
  125. {
  126. #ifdef DEBUG
  127. if(d_flag>2)
  128. {
  129. if(b!=(cache_hash(e->url, e->url_len) +
  130. cache_hash(e->host, e->host_len)))
  131. Pike_fatal("Cache entry did not hash to the same spot\n");
  132. if(!mt_trylock( & c->mutex ))
  133. Pike_fatal("Cache free_entry running unlocked\n");
  134. if(prev && prev->next != e)
  135. Pike_fatal("prev->next != e\n");
  136. }
  137. #endif
  138. if(!prev)
  139. c->htable[ b ] = e->next;
  140. else
  141. prev->next = e->next;
  142. c->size -= e->data->len;
  143. c->entries--;
  144. low_free_cache_entry( e );
  145. }
  146. void aap_free_cache_entry(struct cache *c, struct cache_entry *e,
  147. struct cache_entry *prev, size_t b)
  148. {
  149. #ifdef DEBUG
  150. if(e->refs<=0)
  151. Pike_fatal("Freeing free cache entry\n");
  152. #endif
  153. if(!--e->refs)
  154. really_free_cache_entry(c,e,prev,b);
  155. }
  156. void simple_aap_free_cache_entry(struct cache *c, struct cache_entry *e)
  157. {
  158. mt_lock( &c->mutex );
  159. if(!--e->refs)
  160. {
  161. struct cache_entry *t, *p=0;
  162. size_t hv = cache_hash(e->url, e->url_len)+cache_hash(e->host,e->host_len);
  163. t = c->htable[ hv ];
  164. while(t)
  165. {
  166. if( t == e )
  167. {
  168. really_free_cache_entry(c,t,p,hv);
  169. break;
  170. }
  171. p=t;
  172. t=t->next;
  173. }
  174. }
  175. mt_unlock( &c->mutex );
  176. }
  177. void aap_cache_insert(struct cache_entry *ce, struct cache *c)
  178. {
  179. struct cache_entry *head, *p;
  180. char *t;
  181. size_t hv;
  182. #ifdef DEBUG
  183. extern int d_flag;
  184. if((d_flag > 2) && !mt_trylock( & c->mutex ))
  185. Pike_fatal("Cache insert running unlocked\n");
  186. #endif
  187. c->size += ce->data->len;
  188. if((head = aap_cache_lookup(ce->url, ce->url_len,
  189. ce->host, ce->host_len, c, 1,
  190. &p, &hv)))
  191. {
  192. c->size -= head->data->len;
  193. aap_enqueue_string_to_free(head->data);
  194. head->data = ce->data;
  195. head->stale_at = ce->stale_at;
  196. aap_free_cache_entry( c, head, p, hv );
  197. free(ce);
  198. } else {
  199. c->entries++;
  200. t = malloc( ce->url_len + ce->host_len );
  201. memcpy(t,ce->url,ce->url_len); ce->url = t; t+=ce->url_len;
  202. memcpy(t,ce->host,ce->host_len); ce->host = t;
  203. ce->next = c->htable[hv];
  204. ce->refs = 1;
  205. c->htable[hv] = ce;
  206. }
  207. }
  208. struct cache_entry *aap_cache_lookup(char *s, ptrdiff_t len,
  209. char *ho, ptrdiff_t hlen,
  210. struct cache *c, int nolock,
  211. struct cache_entry **p, size_t *hv)
  212. {
  213. size_t h = cache_hash(s, len) + cache_hash(ho,hlen);
  214. struct cache_entry *e, *prev=NULL;
  215. if( hv ) *hv = h;
  216. if(!nolock)
  217. mt_lock(&c->mutex);
  218. #ifdef DEBUG
  219. else
  220. {
  221. extern int d_flag;
  222. if((d_flag>2) && !mt_trylock( & c->mutex ))
  223. Pike_fatal("Cache lookup running unlocked\n");
  224. }
  225. #endif
  226. if( p ) *p = 0;
  227. e = c->htable[h];
  228. while(e)
  229. {
  230. if(e->url_len == len && e->host_len == hlen
  231. && !memcmp(e->url,s,len)
  232. && !memcmp(e->host,ho,hlen))
  233. {
  234. int t = aap_get_time();
  235. if(e->stale_at < t)
  236. {
  237. aap_free_cache_entry( c, e, prev, h );
  238. if(!nolock) mt_unlock(&c->mutex);
  239. return 0;
  240. }
  241. c->hits++;
  242. /* cache hit. Lets add it to the top of the list */
  243. if(c->htable[h] != e)
  244. {
  245. if(prev) prev->next = e->next;
  246. e->next = c->htable[h];
  247. c->htable[h] = e;
  248. }
  249. if(!nolock) mt_unlock(&c->mutex);
  250. e->refs++;
  251. return e;
  252. }
  253. prev = e;
  254. if( p ) *p = prev;
  255. e = e->next;
  256. }
  257. c->misses++;
  258. if(!nolock) mt_unlock(&c->mutex);
  259. return 0;
  260. }
  261. void aap_clean_cache(void)
  262. {
  263. struct cache *c = first_cache;
  264. if(numtofree) free_from_queue();
  265. }
  266. void aap_init_cache(void)
  267. {
  268. mt_init(&tofree_mutex);
  269. mt_init(&cache_entry_lock);
  270. }
  271. #endif