/include/linux/u64_stats_sync.h

https://github.com/airy09/android_kernel_sony_apq8064 · C Header · 140 lines · 67 code · 9 blank · 64 comment · 7 complexity · 176d6255f2b2bd382c77aee89a26501b MD5 · raw file

  1. #ifndef _LINUX_U64_STATS_SYNC_H
  2. #define _LINUX_U64_STATS_SYNC_H
  3. /*
  4. * To properly implement 64bits network statistics on 32bit and 64bit hosts,
  5. * we provide a synchronization point, that is a noop on 64bit or UP kernels.
  6. *
  7. * Key points :
  8. * 1) Use a seqcount on SMP 32bits, with low overhead.
  9. * 2) Whole thing is a noop on 64bit arches or UP kernels.
  10. * 3) Write side must ensure mutual exclusion or one seqcount update could
  11. * be lost, thus blocking readers forever.
  12. * If this synchronization point is not a mutex, but a spinlock or
  13. * spinlock_bh() or disable_bh() :
  14. * 3.1) Write side should not sleep.
  15. * 3.2) Write side should not allow preemption.
  16. * 3.3) If applicable, interrupts should be disabled.
  17. *
  18. * 4) If reader fetches several counters, there is no guarantee the whole values
  19. * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
  20. *
  21. * 5) readers are allowed to sleep or be preempted/interrupted : They perform
  22. * pure reads. But if they have to fetch many values, it's better to not allow
  23. * preemptions/interruptions to avoid many retries.
  24. *
  25. * 6) If counter might be written by an interrupt, readers should block interrupts.
  26. * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
  27. * read partial values)
  28. *
  29. * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
  30. * u64_stats_fetch_retry_bh() helpers
  31. *
  32. * Usage :
  33. *
  34. * Stats producer (writer) should use following template granted it already got
  35. * an exclusive access to counters (a lock is already taken, or per cpu
  36. * data is used [in a non preemptable context])
  37. *
  38. * spin_lock_bh(...) or other synchronization to get exclusive access
  39. * ...
  40. * u64_stats_update_begin(&stats->syncp);
  41. * stats->bytes64 += len; // non atomic operation
  42. * stats->packets64++; // non atomic operation
  43. * u64_stats_update_end(&stats->syncp);
  44. *
  45. * While a consumer (reader) should use following template to get consistent
  46. * snapshot for each variable (but no guarantee on several ones)
  47. *
  48. * u64 tbytes, tpackets;
  49. * unsigned int start;
  50. *
  51. * do {
  52. * start = u64_stats_fetch_begin(&stats->syncp);
  53. * tbytes = stats->bytes64; // non atomic operation
  54. * tpackets = stats->packets64; // non atomic operation
  55. * } while (u64_stats_fetch_retry(&stats->syncp, start));
  56. *
  57. *
  58. * Example of use in drivers/net/loopback.c, using per_cpu containers,
  59. * in BH disabled context.
  60. */
  61. #include <linux/seqlock.h>
  62. struct u64_stats_sync {
  63. #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  64. seqcount_t seq;
  65. #endif
  66. };
  67. static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
  68. {
  69. #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  70. write_seqcount_begin(&syncp->seq);
  71. #endif
  72. }
  73. static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
  74. {
  75. #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  76. write_seqcount_end(&syncp->seq);
  77. #endif
  78. }
  79. static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
  80. {
  81. #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  82. return read_seqcount_begin(&syncp->seq);
  83. #else
  84. #if BITS_PER_LONG==32
  85. preempt_disable();
  86. #endif
  87. return 0;
  88. #endif
  89. }
  90. static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
  91. unsigned int start)
  92. {
  93. #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  94. return read_seqcount_retry(&syncp->seq, start);
  95. #else
  96. #if BITS_PER_LONG==32
  97. preempt_enable();
  98. #endif
  99. return false;
  100. #endif
  101. }
  102. /*
  103. * In case softirq handlers can update u64 counters, readers can use following helpers
  104. * - SMP 32bit arches use seqcount protection, irq safe.
  105. * - UP 32bit must disable BH.
  106. * - 64bit have no problem atomically reading u64 values, irq safe.
  107. */
  108. static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
  109. {
  110. #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  111. return read_seqcount_begin(&syncp->seq);
  112. #else
  113. #if BITS_PER_LONG==32
  114. local_bh_disable();
  115. #endif
  116. return 0;
  117. #endif
  118. }
  119. static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
  120. unsigned int start)
  121. {
  122. #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  123. return read_seqcount_retry(&syncp->seq, start);
  124. #else
  125. #if BITS_PER_LONG==32
  126. local_bh_enable();
  127. #endif
  128. return false;
  129. #endif
  130. }
  131. #endif /* _LINUX_U64_STATS_SYNC_H */