PageRenderTime 52ms CodeModel.GetById 25ms RepoModel.GetById 1ms app.codeStats 0ms

/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go

https://gitlab.com/akomba/ether-bot-wallet
Go | 554 lines | 487 code | 58 blank | 9 comment | 168 complexity | 52480f89cd32fa19e98d0a529af424ec MD5 | raw file
  1. // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
  2. // All rights reserved.
  3. //
  4. // Use of this source code is governed by a BSD-style license that can be
  5. // found in the LICENSE file.
  6. package cache
  7. import (
  8. "math/rand"
  9. "runtime"
  10. "sync"
  11. "sync/atomic"
  12. "testing"
  13. "time"
  14. "unsafe"
  15. )
  16. type int32o int32
  17. func (o *int32o) acquire() {
  18. if atomic.AddInt32((*int32)(o), 1) != 1 {
  19. panic("BUG: invalid ref")
  20. }
  21. }
  22. func (o *int32o) Release() {
  23. if atomic.AddInt32((*int32)(o), -1) != 0 {
  24. panic("BUG: invalid ref")
  25. }
  26. }
  27. type releaserFunc struct {
  28. fn func()
  29. value Value
  30. }
  31. func (r releaserFunc) Release() {
  32. if r.fn != nil {
  33. r.fn()
  34. }
  35. }
  36. func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
  37. return c.Get(ns, key, func() (int, Value) {
  38. if relf != nil {
  39. return charge, releaserFunc{relf, value}
  40. } else {
  41. return charge, value
  42. }
  43. })
  44. }
  45. func TestCacheMap(t *testing.T) {
  46. runtime.GOMAXPROCS(runtime.NumCPU())
  47. nsx := []struct {
  48. nobjects, nhandles, concurrent, repeat int
  49. }{
  50. {10000, 400, 50, 3},
  51. {100000, 1000, 100, 10},
  52. }
  53. var (
  54. objects [][]int32o
  55. handles [][]unsafe.Pointer
  56. )
  57. for _, x := range nsx {
  58. objects = append(objects, make([]int32o, x.nobjects))
  59. handles = append(handles, make([]unsafe.Pointer, x.nhandles))
  60. }
  61. c := NewCache(nil)
  62. wg := new(sync.WaitGroup)
  63. var done int32
  64. for ns, x := range nsx {
  65. for i := 0; i < x.concurrent; i++ {
  66. wg.Add(1)
  67. go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
  68. defer wg.Done()
  69. r := rand.New(rand.NewSource(time.Now().UnixNano()))
  70. for j := len(objects) * repeat; j >= 0; j-- {
  71. key := uint64(r.Intn(len(objects)))
  72. h := c.Get(uint64(ns), key, func() (int, Value) {
  73. o := &objects[key]
  74. o.acquire()
  75. return 1, o
  76. })
  77. if v := h.Value().(*int32o); v != &objects[key] {
  78. t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
  79. }
  80. if objects[key] != 1 {
  81. t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
  82. }
  83. if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
  84. h.Release()
  85. }
  86. }
  87. }(ns, i, x.repeat, objects[ns], handles[ns])
  88. }
  89. go func(handles []unsafe.Pointer) {
  90. r := rand.New(rand.NewSource(time.Now().UnixNano()))
  91. for atomic.LoadInt32(&done) == 0 {
  92. i := r.Intn(len(handles))
  93. h := (*Handle)(atomic.LoadPointer(&handles[i]))
  94. if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
  95. h.Release()
  96. }
  97. time.Sleep(time.Millisecond)
  98. }
  99. }(handles[ns])
  100. }
  101. go func() {
  102. handles := make([]*Handle, 100000)
  103. for atomic.LoadInt32(&done) == 0 {
  104. for i := range handles {
  105. handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
  106. return 1, 1
  107. })
  108. }
  109. for _, h := range handles {
  110. h.Release()
  111. }
  112. }
  113. }()
  114. wg.Wait()
  115. atomic.StoreInt32(&done, 1)
  116. for _, handles0 := range handles {
  117. for i := range handles0 {
  118. h := (*Handle)(atomic.LoadPointer(&handles0[i]))
  119. if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
  120. h.Release()
  121. }
  122. }
  123. }
  124. for ns, objects0 := range objects {
  125. for i, o := range objects0 {
  126. if o != 0 {
  127. t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
  128. }
  129. }
  130. }
  131. }
  132. func TestCacheMap_NodesAndSize(t *testing.T) {
  133. c := NewCache(nil)
  134. if c.Nodes() != 0 {
  135. t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
  136. }
  137. if c.Size() != 0 {
  138. t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
  139. }
  140. set(c, 0, 1, 1, 1, nil)
  141. set(c, 0, 2, 2, 2, nil)
  142. set(c, 1, 1, 3, 3, nil)
  143. set(c, 2, 1, 4, 1, nil)
  144. if c.Nodes() != 4 {
  145. t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
  146. }
  147. if c.Size() != 7 {
  148. t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
  149. }
  150. }
  151. func TestLRUCache_Capacity(t *testing.T) {
  152. c := NewCache(NewLRU(10))
  153. if c.Capacity() != 10 {
  154. t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
  155. }
  156. set(c, 0, 1, 1, 1, nil).Release()
  157. set(c, 0, 2, 2, 2, nil).Release()
  158. set(c, 1, 1, 3, 3, nil).Release()
  159. set(c, 2, 1, 4, 1, nil).Release()
  160. set(c, 2, 2, 5, 1, nil).Release()
  161. set(c, 2, 3, 6, 1, nil).Release()
  162. set(c, 2, 4, 7, 1, nil).Release()
  163. set(c, 2, 5, 8, 1, nil).Release()
  164. if c.Nodes() != 7 {
  165. t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
  166. }
  167. if c.Size() != 10 {
  168. t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
  169. }
  170. c.SetCapacity(9)
  171. if c.Capacity() != 9 {
  172. t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
  173. }
  174. if c.Nodes() != 6 {
  175. t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
  176. }
  177. if c.Size() != 8 {
  178. t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
  179. }
  180. }
  181. func TestCacheMap_NilValue(t *testing.T) {
  182. c := NewCache(NewLRU(10))
  183. h := c.Get(0, 0, func() (size int, value Value) {
  184. return 1, nil
  185. })
  186. if h != nil {
  187. t.Error("cache handle is non-nil")
  188. }
  189. if c.Nodes() != 0 {
  190. t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
  191. }
  192. if c.Size() != 0 {
  193. t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
  194. }
  195. }
  196. func TestLRUCache_GetLatency(t *testing.T) {
  197. runtime.GOMAXPROCS(runtime.NumCPU())
  198. const (
  199. concurrentSet = 30
  200. concurrentGet = 3
  201. duration = 3 * time.Second
  202. delay = 3 * time.Millisecond
  203. maxkey = 100000
  204. )
  205. var (
  206. set, getHit, getAll int32
  207. getMaxLatency, getDuration int64
  208. )
  209. c := NewCache(NewLRU(5000))
  210. wg := &sync.WaitGroup{}
  211. until := time.Now().Add(duration)
  212. for i := 0; i < concurrentSet; i++ {
  213. wg.Add(1)
  214. go func(i int) {
  215. defer wg.Done()
  216. r := rand.New(rand.NewSource(time.Now().UnixNano()))
  217. for time.Now().Before(until) {
  218. c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
  219. time.Sleep(delay)
  220. atomic.AddInt32(&set, 1)
  221. return 1, 1
  222. }).Release()
  223. }
  224. }(i)
  225. }
  226. for i := 0; i < concurrentGet; i++ {
  227. wg.Add(1)
  228. go func(i int) {
  229. defer wg.Done()
  230. r := rand.New(rand.NewSource(time.Now().UnixNano()))
  231. for {
  232. mark := time.Now()
  233. if mark.Before(until) {
  234. h := c.Get(0, uint64(r.Intn(maxkey)), nil)
  235. latency := int64(time.Now().Sub(mark))
  236. m := atomic.LoadInt64(&getMaxLatency)
  237. if latency > m {
  238. atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
  239. }
  240. atomic.AddInt64(&getDuration, latency)
  241. if h != nil {
  242. atomic.AddInt32(&getHit, 1)
  243. h.Release()
  244. }
  245. atomic.AddInt32(&getAll, 1)
  246. } else {
  247. break
  248. }
  249. }
  250. }(i)
  251. }
  252. wg.Wait()
  253. getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
  254. t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
  255. set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
  256. if getAvglatency > delay/3 {
  257. t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
  258. }
  259. }
  260. func TestLRUCache_HitMiss(t *testing.T) {
  261. cases := []struct {
  262. key uint64
  263. value string
  264. }{
  265. {1, "vvvvvvvvv"},
  266. {100, "v1"},
  267. {0, "v2"},
  268. {12346, "v3"},
  269. {777, "v4"},
  270. {999, "v5"},
  271. {7654, "v6"},
  272. {2, "v7"},
  273. {3, "v8"},
  274. {9, "v9"},
  275. }
  276. setfin := 0
  277. c := NewCache(NewLRU(1000))
  278. for i, x := range cases {
  279. set(c, 0, x.key, x.value, len(x.value), func() {
  280. setfin++
  281. }).Release()
  282. for j, y := range cases {
  283. h := c.Get(0, y.key, nil)
  284. if j <= i {
  285. // should hit
  286. if h == nil {
  287. t.Errorf("case '%d' iteration '%d' is miss", i, j)
  288. } else {
  289. if x := h.Value().(releaserFunc).value.(string); x != y.value {
  290. t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
  291. }
  292. }
  293. } else {
  294. // should miss
  295. if h != nil {
  296. t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
  297. }
  298. }
  299. if h != nil {
  300. h.Release()
  301. }
  302. }
  303. }
  304. for i, x := range cases {
  305. finalizerOk := false
  306. c.Delete(0, x.key, func() {
  307. finalizerOk = true
  308. })
  309. if !finalizerOk {
  310. t.Errorf("case %d delete finalizer not executed", i)
  311. }
  312. for j, y := range cases {
  313. h := c.Get(0, y.key, nil)
  314. if j > i {
  315. // should hit
  316. if h == nil {
  317. t.Errorf("case '%d' iteration '%d' is miss", i, j)
  318. } else {
  319. if x := h.Value().(releaserFunc).value.(string); x != y.value {
  320. t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
  321. }
  322. }
  323. } else {
  324. // should miss
  325. if h != nil {
  326. t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
  327. }
  328. }
  329. if h != nil {
  330. h.Release()
  331. }
  332. }
  333. }
  334. if setfin != len(cases) {
  335. t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
  336. }
  337. }
  338. func TestLRUCache_Eviction(t *testing.T) {
  339. c := NewCache(NewLRU(12))
  340. o1 := set(c, 0, 1, 1, 1, nil)
  341. set(c, 0, 2, 2, 1, nil).Release()
  342. set(c, 0, 3, 3, 1, nil).Release()
  343. set(c, 0, 4, 4, 1, nil).Release()
  344. set(c, 0, 5, 5, 1, nil).Release()
  345. if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
  346. h.Release()
  347. }
  348. set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
  349. for _, key := range []uint64{9, 2, 5, 1} {
  350. h := c.Get(0, key, nil)
  351. if h == nil {
  352. t.Errorf("miss for key '%d'", key)
  353. } else {
  354. if x := h.Value().(int); x != int(key) {
  355. t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
  356. }
  357. h.Release()
  358. }
  359. }
  360. o1.Release()
  361. for _, key := range []uint64{1, 2, 5} {
  362. h := c.Get(0, key, nil)
  363. if h == nil {
  364. t.Errorf("miss for key '%d'", key)
  365. } else {
  366. if x := h.Value().(int); x != int(key) {
  367. t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
  368. }
  369. h.Release()
  370. }
  371. }
  372. for _, key := range []uint64{3, 4, 9} {
  373. h := c.Get(0, key, nil)
  374. if h != nil {
  375. t.Errorf("hit for key '%d'", key)
  376. if x := h.Value().(int); x != int(key) {
  377. t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
  378. }
  379. h.Release()
  380. }
  381. }
  382. }
  383. func TestLRUCache_Evict(t *testing.T) {
  384. c := NewCache(NewLRU(6))
  385. set(c, 0, 1, 1, 1, nil).Release()
  386. set(c, 0, 2, 2, 1, nil).Release()
  387. set(c, 1, 1, 4, 1, nil).Release()
  388. set(c, 1, 2, 5, 1, nil).Release()
  389. set(c, 2, 1, 6, 1, nil).Release()
  390. set(c, 2, 2, 7, 1, nil).Release()
  391. for ns := 0; ns < 3; ns++ {
  392. for key := 1; key < 3; key++ {
  393. if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
  394. h.Release()
  395. } else {
  396. t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
  397. }
  398. }
  399. }
  400. if ok := c.Evict(0, 1); !ok {
  401. t.Error("first Cache.Evict on #0.1 return false")
  402. }
  403. if ok := c.Evict(0, 1); ok {
  404. t.Error("second Cache.Evict on #0.1 return true")
  405. }
  406. if h := c.Get(0, 1, nil); h != nil {
  407. t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
  408. }
  409. c.EvictNS(1)
  410. if h := c.Get(1, 1, nil); h != nil {
  411. t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
  412. }
  413. if h := c.Get(1, 2, nil); h != nil {
  414. t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
  415. }
  416. c.EvictAll()
  417. for ns := 0; ns < 3; ns++ {
  418. for key := 1; key < 3; key++ {
  419. if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
  420. t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
  421. }
  422. }
  423. }
  424. }
  425. func TestLRUCache_Delete(t *testing.T) {
  426. delFuncCalled := 0
  427. delFunc := func() {
  428. delFuncCalled++
  429. }
  430. c := NewCache(NewLRU(2))
  431. set(c, 0, 1, 1, 1, nil).Release()
  432. set(c, 0, 2, 2, 1, nil).Release()
  433. if ok := c.Delete(0, 1, delFunc); !ok {
  434. t.Error("Cache.Delete on #1 return false")
  435. }
  436. if h := c.Get(0, 1, nil); h != nil {
  437. t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
  438. }
  439. if ok := c.Delete(0, 1, delFunc); ok {
  440. t.Error("Cache.Delete on #1 return true")
  441. }
  442. h2 := c.Get(0, 2, nil)
  443. if h2 == nil {
  444. t.Error("Cache.Get on #2 return nil")
  445. }
  446. if ok := c.Delete(0, 2, delFunc); !ok {
  447. t.Error("(1) Cache.Delete on #2 return false")
  448. }
  449. if ok := c.Delete(0, 2, delFunc); !ok {
  450. t.Error("(2) Cache.Delete on #2 return false")
  451. }
  452. set(c, 0, 3, 3, 1, nil).Release()
  453. set(c, 0, 4, 4, 1, nil).Release()
  454. c.Get(0, 2, nil).Release()
  455. for key := 2; key <= 4; key++ {
  456. if h := c.Get(0, uint64(key), nil); h != nil {
  457. h.Release()
  458. } else {
  459. t.Errorf("Cache.Get on #%d return nil", key)
  460. }
  461. }
  462. h2.Release()
  463. if h := c.Get(0, 2, nil); h != nil {
  464. t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
  465. }
  466. if delFuncCalled != 4 {
  467. t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
  468. }
  469. }
  470. func TestLRUCache_Close(t *testing.T) {
  471. relFuncCalled := 0
  472. relFunc := func() {
  473. relFuncCalled++
  474. }
  475. delFuncCalled := 0
  476. delFunc := func() {
  477. delFuncCalled++
  478. }
  479. c := NewCache(NewLRU(2))
  480. set(c, 0, 1, 1, 1, relFunc).Release()
  481. set(c, 0, 2, 2, 1, relFunc).Release()
  482. h3 := set(c, 0, 3, 3, 1, relFunc)
  483. if h3 == nil {
  484. t.Error("Cache.Get on #3 return nil")
  485. }
  486. if ok := c.Delete(0, 3, delFunc); !ok {
  487. t.Error("Cache.Delete on #3 return false")
  488. }
  489. c.Close()
  490. if relFuncCalled != 3 {
  491. t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
  492. }
  493. if delFuncCalled != 1 {
  494. t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
  495. }
  496. }