/contrib/bind9/lib/isc/stats.c

https://bitbucket.org/freebsd/freebsd-head/ · C · 326 lines · 218 code · 58 blank · 50 comment · 34 complexity · c8ffd7052b2552c54ced664664e3b1fa MD5 · raw file

  1. /*
  2. * Copyright (C) 2009, 2012 Internet Systems Consortium, Inc. ("ISC")
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
  9. * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
  10. * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
  11. * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
  12. * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
  13. * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  14. * PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* $Id$ */
  17. /*! \file */
  18. #include <config.h>
  19. #include <string.h>
  20. #include <isc/atomic.h>
  21. #include <isc/buffer.h>
  22. #include <isc/magic.h>
  23. #include <isc/mem.h>
  24. #include <isc/platform.h>
  25. #include <isc/print.h>
  26. #include <isc/rwlock.h>
  27. #include <isc/stats.h>
  28. #include <isc/util.h>
  29. #define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
  30. #define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
  31. #ifndef ISC_STATS_USEMULTIFIELDS
  32. #if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
  33. #define ISC_STATS_USEMULTIFIELDS 1
  34. #else
  35. #define ISC_STATS_USEMULTIFIELDS 0
  36. #endif
  37. #endif /* ISC_STATS_USEMULTIFIELDS */
  38. #if ISC_STATS_USEMULTIFIELDS
  39. typedef struct {
  40. isc_uint32_t hi;
  41. isc_uint32_t lo;
  42. } isc_stat_t;
  43. #else
  44. typedef isc_uint64_t isc_stat_t;
  45. #endif
  46. struct isc_stats {
  47. /*% Unlocked */
  48. unsigned int magic;
  49. isc_mem_t *mctx;
  50. int ncounters;
  51. isc_mutex_t lock;
  52. unsigned int references; /* locked by lock */
  53. /*%
  54. * Locked by counterlock or unlocked if efficient rwlock is not
  55. * available.
  56. */
  57. #ifdef ISC_RWLOCK_USEATOMIC
  58. isc_rwlock_t counterlock;
  59. #endif
  60. isc_stat_t *counters;
  61. /*%
  62. * We don't want to lock the counters while we are dumping, so we first
  63. * copy the current counter values into a local array. This buffer
  64. * will be used as the copy destination. It's allocated on creation
  65. * of the stats structure so that the dump operation won't fail due
  66. * to memory allocation failure.
  67. * XXX: this approach is weird for non-threaded build because the
  68. * additional memory and the copy overhead could be avoided. We prefer
  69. * simplicity here, however, under the assumption that this function
  70. * should be only rarely called.
  71. */
  72. isc_uint64_t *copiedcounters;
  73. };
  74. static isc_result_t
  75. create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
  76. isc_stats_t *stats;
  77. isc_result_t result = ISC_R_SUCCESS;
  78. REQUIRE(statsp != NULL && *statsp == NULL);
  79. stats = isc_mem_get(mctx, sizeof(*stats));
  80. if (stats == NULL)
  81. return (ISC_R_NOMEMORY);
  82. result = isc_mutex_init(&stats->lock);
  83. if (result != ISC_R_SUCCESS)
  84. goto clean_stats;
  85. stats->counters = isc_mem_get(mctx, sizeof(isc_stat_t) * ncounters);
  86. if (stats->counters == NULL) {
  87. result = ISC_R_NOMEMORY;
  88. goto clean_mutex;
  89. }
  90. stats->copiedcounters = isc_mem_get(mctx,
  91. sizeof(isc_uint64_t) * ncounters);
  92. if (stats->copiedcounters == NULL) {
  93. result = ISC_R_NOMEMORY;
  94. goto clean_counters;
  95. }
  96. #ifdef ISC_RWLOCK_USEATOMIC
  97. result = isc_rwlock_init(&stats->counterlock, 0, 0);
  98. if (result != ISC_R_SUCCESS)
  99. goto clean_copiedcounters;
  100. #endif
  101. stats->references = 1;
  102. memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
  103. stats->mctx = NULL;
  104. isc_mem_attach(mctx, &stats->mctx);
  105. stats->ncounters = ncounters;
  106. stats->magic = ISC_STATS_MAGIC;
  107. *statsp = stats;
  108. return (result);
  109. clean_counters:
  110. isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
  111. #ifdef ISC_RWLOCK_USEATOMIC
  112. clean_copiedcounters:
  113. isc_mem_put(mctx, stats->copiedcounters,
  114. sizeof(isc_stat_t) * ncounters);
  115. #endif
  116. clean_mutex:
  117. DESTROYLOCK(&stats->lock);
  118. clean_stats:
  119. isc_mem_put(mctx, stats, sizeof(*stats));
  120. return (result);
  121. }
  122. void
  123. isc_stats_attach(isc_stats_t *stats, isc_stats_t **statsp) {
  124. REQUIRE(ISC_STATS_VALID(stats));
  125. REQUIRE(statsp != NULL && *statsp == NULL);
  126. LOCK(&stats->lock);
  127. stats->references++;
  128. UNLOCK(&stats->lock);
  129. *statsp = stats;
  130. }
  131. void
  132. isc_stats_detach(isc_stats_t **statsp) {
  133. isc_stats_t *stats;
  134. REQUIRE(statsp != NULL && ISC_STATS_VALID(*statsp));
  135. stats = *statsp;
  136. *statsp = NULL;
  137. LOCK(&stats->lock);
  138. stats->references--;
  139. UNLOCK(&stats->lock);
  140. if (stats->references == 0) {
  141. isc_mem_put(stats->mctx, stats->copiedcounters,
  142. sizeof(isc_stat_t) * stats->ncounters);
  143. isc_mem_put(stats->mctx, stats->counters,
  144. sizeof(isc_stat_t) * stats->ncounters);
  145. DESTROYLOCK(&stats->lock);
  146. #ifdef ISC_RWLOCK_USEATOMIC
  147. isc_rwlock_destroy(&stats->counterlock);
  148. #endif
  149. isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
  150. }
  151. }
  152. int
  153. isc_stats_ncounters(isc_stats_t *stats) {
  154. REQUIRE(ISC_STATS_VALID(stats));
  155. return (stats->ncounters);
  156. }
  157. static inline void
  158. incrementcounter(isc_stats_t *stats, int counter) {
  159. isc_int32_t prev;
  160. #ifdef ISC_RWLOCK_USEATOMIC
  161. /*
  162. * We use a "read" lock to prevent other threads from reading the
  163. * counter while we "writing" a counter field. The write access itself
  164. * is protected by the atomic operation.
  165. */
  166. isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
  167. #endif
  168. #if ISC_STATS_USEMULTIFIELDS
  169. prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
  170. /*
  171. * If the lower 32-bit field overflows, increment the higher field.
  172. * Note that it's *theoretically* possible that the lower field
  173. * overlaps again before the higher field is incremented. It doesn't
  174. * matter, however, because we don't read the value until
  175. * isc_stats_copy() is called where the whole process is protected
  176. * by the write (exclusive) lock.
  177. */
  178. if (prev == (isc_int32_t)0xffffffff)
  179. isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
  180. #elif defined(ISC_PLATFORM_HAVEXADDQ)
  181. UNUSED(prev);
  182. isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
  183. #else
  184. UNUSED(prev);
  185. stats->counters[counter]++;
  186. #endif
  187. #ifdef ISC_RWLOCK_USEATOMIC
  188. isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
  189. #endif
  190. }
  191. static inline void
  192. decrementcounter(isc_stats_t *stats, int counter) {
  193. isc_int32_t prev;
  194. #ifdef ISC_RWLOCK_USEATOMIC
  195. isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
  196. #endif
  197. #if ISC_STATS_USEMULTIFIELDS
  198. prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
  199. if (prev == 0)
  200. isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
  201. -1);
  202. #elif defined(ISC_PLATFORM_HAVEXADDQ)
  203. UNUSED(prev);
  204. isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
  205. #else
  206. UNUSED(prev);
  207. stats->counters[counter]--;
  208. #endif
  209. #ifdef ISC_RWLOCK_USEATOMIC
  210. isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
  211. #endif
  212. }
  213. static void
  214. copy_counters(isc_stats_t *stats) {
  215. int i;
  216. #ifdef ISC_RWLOCK_USEATOMIC
  217. /*
  218. * We use a "write" lock before "reading" the statistics counters as
  219. * an exclusive lock.
  220. */
  221. isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
  222. #endif
  223. #if ISC_STATS_USEMULTIFIELDS
  224. for (i = 0; i < stats->ncounters; i++) {
  225. stats->copiedcounters[i] =
  226. (isc_uint64_t)(stats->counters[i].hi) << 32 |
  227. stats->counters[i].lo;
  228. }
  229. #else
  230. UNUSED(i);
  231. memcpy(stats->copiedcounters, stats->counters,
  232. stats->ncounters * sizeof(isc_stat_t));
  233. #endif
  234. #ifdef ISC_RWLOCK_USEATOMIC
  235. isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
  236. #endif
  237. }
  238. isc_result_t
  239. isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
  240. REQUIRE(statsp != NULL && *statsp == NULL);
  241. return (create_stats(mctx, ncounters, statsp));
  242. }
  243. void
  244. isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
  245. REQUIRE(ISC_STATS_VALID(stats));
  246. REQUIRE(counter < stats->ncounters);
  247. incrementcounter(stats, (int)counter);
  248. }
  249. void
  250. isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
  251. REQUIRE(ISC_STATS_VALID(stats));
  252. REQUIRE(counter < stats->ncounters);
  253. decrementcounter(stats, (int)counter);
  254. }
  255. void
  256. isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
  257. void *arg, unsigned int options)
  258. {
  259. int i;
  260. REQUIRE(ISC_STATS_VALID(stats));
  261. copy_counters(stats);
  262. for (i = 0; i < stats->ncounters; i++) {
  263. if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
  264. stats->copiedcounters[i] == 0)
  265. continue;
  266. dump_fn((isc_statscounter_t)i, stats->copiedcounters[i], arg);
  267. }
  268. }