PageRenderTime 423ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/vendor/github.com/camlistore/camlistore/third_party/github.com/syndtr/goleveldb/leveldb/db_test.go

https://gitlab.com/github-cloud-corporation/coreos-baremetal
Go | 2194 lines | 1810 code | 309 blank | 75 comment | 404 complexity | fe122ae7bd8456072514fa30fb91390a MD5 | raw file
  1. // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
  2. // All rights reserved.
  3. //
  4. // Use of this source code is governed by a BSD-style license that can be
  5. // found in the LICENSE file.
  6. package leveldb
  7. import (
  8. "bytes"
  9. "container/list"
  10. crand "crypto/rand"
  11. "encoding/binary"
  12. "fmt"
  13. "math/rand"
  14. "os"
  15. "path/filepath"
  16. "runtime"
  17. "strings"
  18. "sync"
  19. "sync/atomic"
  20. "testing"
  21. "time"
  22. "unsafe"
  23. "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/comparer"
  24. "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/errors"
  25. "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/filter"
  26. "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/iterator"
  27. "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/opt"
  28. "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/storage"
  29. "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/util"
  30. )
  31. func tkey(i int) []byte {
  32. return []byte(fmt.Sprintf("%016d", i))
  33. }
  34. func tval(seed, n int) []byte {
  35. r := rand.New(rand.NewSource(int64(seed)))
  36. return randomString(r, n)
  37. }
  38. type dbHarness struct {
  39. t *testing.T
  40. stor *testStorage
  41. db *DB
  42. o *opt.Options
  43. ro *opt.ReadOptions
  44. wo *opt.WriteOptions
  45. }
  46. func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness {
  47. h := new(dbHarness)
  48. h.init(t, o)
  49. return h
  50. }
  51. func newDbHarness(t *testing.T) *dbHarness {
  52. return newDbHarnessWopt(t, &opt.Options{})
  53. }
  54. func (h *dbHarness) init(t *testing.T, o *opt.Options) {
  55. h.t = t
  56. h.stor = newTestStorage(t)
  57. h.o = o
  58. h.ro = nil
  59. h.wo = nil
  60. if err := h.openDB0(); err != nil {
  61. // So that it will come after fatal message.
  62. defer h.stor.Close()
  63. h.t.Fatal("Open (init): got error: ", err)
  64. }
  65. }
  66. func (h *dbHarness) openDB0() (err error) {
  67. h.t.Log("opening DB")
  68. h.db, err = Open(h.stor, h.o)
  69. return
  70. }
  71. func (h *dbHarness) openDB() {
  72. if err := h.openDB0(); err != nil {
  73. h.t.Fatal("Open: got error: ", err)
  74. }
  75. }
  76. func (h *dbHarness) closeDB0() error {
  77. h.t.Log("closing DB")
  78. return h.db.Close()
  79. }
  80. func (h *dbHarness) closeDB() {
  81. if err := h.closeDB0(); err != nil {
  82. h.t.Error("Close: got error: ", err)
  83. }
  84. h.stor.CloseCheck()
  85. runtime.GC()
  86. }
  87. func (h *dbHarness) reopenDB() {
  88. h.closeDB()
  89. h.openDB()
  90. }
  91. func (h *dbHarness) close() {
  92. h.closeDB0()
  93. h.db = nil
  94. h.stor.Close()
  95. h.stor = nil
  96. runtime.GC()
  97. }
  98. func (h *dbHarness) openAssert(want bool) {
  99. db, err := Open(h.stor, h.o)
  100. if err != nil {
  101. if want {
  102. h.t.Error("Open: assert: got error: ", err)
  103. } else {
  104. h.t.Log("Open: assert: got error (expected): ", err)
  105. }
  106. } else {
  107. if !want {
  108. h.t.Error("Open: assert: expect error")
  109. }
  110. db.Close()
  111. }
  112. }
  113. func (h *dbHarness) write(batch *Batch) {
  114. if err := h.db.Write(batch, h.wo); err != nil {
  115. h.t.Error("Write: got error: ", err)
  116. }
  117. }
  118. func (h *dbHarness) put(key, value string) {
  119. if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil {
  120. h.t.Error("Put: got error: ", err)
  121. }
  122. }
  123. func (h *dbHarness) putMulti(n int, low, hi string) {
  124. for i := 0; i < n; i++ {
  125. h.put(low, "begin")
  126. h.put(hi, "end")
  127. h.compactMem()
  128. }
  129. }
  130. func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
  131. t := h.t
  132. db := h.db
  133. var (
  134. maxOverlaps uint64
  135. maxLevel int
  136. )
  137. v := db.s.version()
  138. for i, tt := range v.tables[1 : len(v.tables)-1] {
  139. level := i + 1
  140. next := v.tables[level+1]
  141. for _, t := range tt {
  142. r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false)
  143. sum := r.size()
  144. if sum > maxOverlaps {
  145. maxOverlaps = sum
  146. maxLevel = level
  147. }
  148. }
  149. }
  150. v.release()
  151. if maxOverlaps > want {
  152. t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel)
  153. } else {
  154. t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want)
  155. }
  156. }
  157. func (h *dbHarness) delete(key string) {
  158. t := h.t
  159. db := h.db
  160. err := db.Delete([]byte(key), h.wo)
  161. if err != nil {
  162. t.Error("Delete: got error: ", err)
  163. }
  164. }
  165. func (h *dbHarness) assertNumKeys(want int) {
  166. iter := h.db.NewIterator(nil, h.ro)
  167. defer iter.Release()
  168. got := 0
  169. for iter.Next() {
  170. got++
  171. }
  172. if err := iter.Error(); err != nil {
  173. h.t.Error("assertNumKeys: ", err)
  174. }
  175. if want != got {
  176. h.t.Errorf("assertNumKeys: want=%d got=%d", want, got)
  177. }
  178. }
  179. func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) {
  180. t := h.t
  181. v, err := db.Get([]byte(key), h.ro)
  182. switch err {
  183. case ErrNotFound:
  184. if expectFound {
  185. t.Errorf("Get: key '%s' not found, want found", key)
  186. }
  187. case nil:
  188. found = true
  189. if !expectFound {
  190. t.Errorf("Get: key '%s' found, want not found", key)
  191. }
  192. default:
  193. t.Error("Get: got error: ", err)
  194. }
  195. return
  196. }
  197. func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) {
  198. return h.getr(h.db, key, expectFound)
  199. }
  200. func (h *dbHarness) getValr(db Reader, key, value string) {
  201. t := h.t
  202. found, r := h.getr(db, key, true)
  203. if !found {
  204. return
  205. }
  206. rval := string(r)
  207. if rval != value {
  208. t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value)
  209. }
  210. }
  211. func (h *dbHarness) getVal(key, value string) {
  212. h.getValr(h.db, key, value)
  213. }
  214. func (h *dbHarness) allEntriesFor(key, want string) {
  215. t := h.t
  216. db := h.db
  217. s := db.s
  218. ikey := newIkey([]byte(key), kMaxSeq, ktVal)
  219. iter := db.newRawIterator(nil, nil)
  220. if !iter.Seek(ikey) && iter.Error() != nil {
  221. t.Error("AllEntries: error during seek, err: ", iter.Error())
  222. return
  223. }
  224. res := "[ "
  225. first := true
  226. for iter.Valid() {
  227. if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil {
  228. if s.icmp.uCompare(ikey.ukey(), ukey) != 0 {
  229. break
  230. }
  231. if !first {
  232. res += ", "
  233. }
  234. first = false
  235. switch kt {
  236. case ktVal:
  237. res += string(iter.Value())
  238. case ktDel:
  239. res += "DEL"
  240. }
  241. } else {
  242. if !first {
  243. res += ", "
  244. }
  245. first = false
  246. res += "CORRUPTED"
  247. }
  248. iter.Next()
  249. }
  250. if !first {
  251. res += " "
  252. }
  253. res += "]"
  254. if res != want {
  255. t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want)
  256. }
  257. }
  258. // Return a string that contains all key,value pairs in order,
  259. // formatted like "(k1->v1)(k2->v2)".
  260. func (h *dbHarness) getKeyVal(want string) {
  261. t := h.t
  262. db := h.db
  263. s, err := db.GetSnapshot()
  264. if err != nil {
  265. t.Fatal("GetSnapshot: got error: ", err)
  266. }
  267. res := ""
  268. iter := s.NewIterator(nil, nil)
  269. for iter.Next() {
  270. res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value()))
  271. }
  272. iter.Release()
  273. if res != want {
  274. t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want)
  275. }
  276. s.Release()
  277. }
  278. func (h *dbHarness) waitCompaction() {
  279. t := h.t
  280. db := h.db
  281. if err := db.compSendIdle(db.tcompCmdC); err != nil {
  282. t.Error("compaction error: ", err)
  283. }
  284. }
  285. func (h *dbHarness) waitMemCompaction() {
  286. t := h.t
  287. db := h.db
  288. if err := db.compSendIdle(db.mcompCmdC); err != nil {
  289. t.Error("compaction error: ", err)
  290. }
  291. }
  292. func (h *dbHarness) compactMem() {
  293. t := h.t
  294. db := h.db
  295. t.Log("starting memdb compaction")
  296. db.writeLockC <- struct{}{}
  297. defer func() {
  298. <-db.writeLockC
  299. }()
  300. if _, err := db.rotateMem(0); err != nil {
  301. t.Error("compaction error: ", err)
  302. }
  303. if err := db.compSendIdle(db.mcompCmdC); err != nil {
  304. t.Error("compaction error: ", err)
  305. }
  306. if h.totalTables() == 0 {
  307. t.Error("zero tables after mem compaction")
  308. }
  309. t.Log("memdb compaction done")
  310. }
  311. func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) {
  312. t := h.t
  313. db := h.db
  314. var _min, _max []byte
  315. if min != "" {
  316. _min = []byte(min)
  317. }
  318. if max != "" {
  319. _max = []byte(max)
  320. }
  321. t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max)
  322. if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil {
  323. if wanterr {
  324. t.Log("CompactRangeAt: got error (expected): ", err)
  325. } else {
  326. t.Error("CompactRangeAt: got error: ", err)
  327. }
  328. } else if wanterr {
  329. t.Error("CompactRangeAt: expect error")
  330. }
  331. t.Log("table range compaction done")
  332. }
  333. func (h *dbHarness) compactRangeAt(level int, min, max string) {
  334. h.compactRangeAtErr(level, min, max, false)
  335. }
  336. func (h *dbHarness) compactRange(min, max string) {
  337. t := h.t
  338. db := h.db
  339. t.Logf("starting DB range compaction: min=%q, max=%q", min, max)
  340. var r util.Range
  341. if min != "" {
  342. r.Start = []byte(min)
  343. }
  344. if max != "" {
  345. r.Limit = []byte(max)
  346. }
  347. if err := db.CompactRange(r); err != nil {
  348. t.Error("CompactRange: got error: ", err)
  349. }
  350. t.Log("DB range compaction done")
  351. }
  352. func (h *dbHarness) sizeOf(start, limit string) uint64 {
  353. sz, err := h.db.SizeOf([]util.Range{
  354. {[]byte(start), []byte(limit)},
  355. })
  356. if err != nil {
  357. h.t.Error("SizeOf: got error: ", err)
  358. }
  359. return sz.Sum()
  360. }
  361. func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
  362. sz := h.sizeOf(start, limit)
  363. if sz < low || sz > hi {
  364. h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
  365. shorten(start), shorten(limit), low, hi, sz)
  366. }
  367. }
  368. func (h *dbHarness) getSnapshot() (s *Snapshot) {
  369. s, err := h.db.GetSnapshot()
  370. if err != nil {
  371. h.t.Fatal("GetSnapshot: got error: ", err)
  372. }
  373. return
  374. }
  375. func (h *dbHarness) tablesPerLevel(want string) {
  376. res := ""
  377. nz := 0
  378. v := h.db.s.version()
  379. for level, tt := range v.tables {
  380. if level > 0 {
  381. res += ","
  382. }
  383. res += fmt.Sprint(len(tt))
  384. if len(tt) > 0 {
  385. nz = len(res)
  386. }
  387. }
  388. v.release()
  389. res = res[:nz]
  390. if res != want {
  391. h.t.Errorf("invalid tables len, want=%s, got=%s", want, res)
  392. }
  393. }
  394. func (h *dbHarness) totalTables() (n int) {
  395. v := h.db.s.version()
  396. for _, tt := range v.tables {
  397. n += len(tt)
  398. }
  399. v.release()
  400. return
  401. }
  402. type keyValue interface {
  403. Key() []byte
  404. Value() []byte
  405. }
  406. func testKeyVal(t *testing.T, kv keyValue, want string) {
  407. res := string(kv.Key()) + "->" + string(kv.Value())
  408. if res != want {
  409. t.Errorf("invalid key/value, want=%q, got=%q", want, res)
  410. }
  411. }
  412. func numKey(num int) string {
  413. return fmt.Sprintf("key%06d", num)
  414. }
  415. var _bloom_filter = filter.NewBloomFilter(10)
  416. func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) {
  417. for i := 0; i < 4; i++ {
  418. func() {
  419. switch i {
  420. case 0:
  421. case 1:
  422. if o == nil {
  423. o = &opt.Options{Filter: _bloom_filter}
  424. } else {
  425. old := o
  426. o = &opt.Options{}
  427. *o = *old
  428. o.Filter = _bloom_filter
  429. }
  430. case 2:
  431. if o == nil {
  432. o = &opt.Options{Compression: opt.NoCompression}
  433. } else {
  434. old := o
  435. o = &opt.Options{}
  436. *o = *old
  437. o.Compression = opt.NoCompression
  438. }
  439. }
  440. h := newDbHarnessWopt(t, o)
  441. defer h.close()
  442. switch i {
  443. case 3:
  444. h.reopenDB()
  445. }
  446. f(h)
  447. }()
  448. }
  449. }
  450. func trun(t *testing.T, f func(h *dbHarness)) {
  451. truno(t, nil, f)
  452. }
  453. func testAligned(t *testing.T, name string, offset uintptr) {
  454. if offset%8 != 0 {
  455. t.Errorf("field %s offset is not 64-bit aligned", name)
  456. }
  457. }
  458. func Test_FieldsAligned(t *testing.T) {
  459. p1 := new(DB)
  460. testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq))
  461. p2 := new(session)
  462. testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum))
  463. testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum))
  464. testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum))
  465. testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum))
  466. }
  467. func TestDB_Locking(t *testing.T) {
  468. h := newDbHarness(t)
  469. defer h.stor.Close()
  470. h.openAssert(false)
  471. h.closeDB()
  472. h.openAssert(true)
  473. }
  474. func TestDB_Empty(t *testing.T) {
  475. trun(t, func(h *dbHarness) {
  476. h.get("foo", false)
  477. h.reopenDB()
  478. h.get("foo", false)
  479. })
  480. }
  481. func TestDB_ReadWrite(t *testing.T) {
  482. trun(t, func(h *dbHarness) {
  483. h.put("foo", "v1")
  484. h.getVal("foo", "v1")
  485. h.put("bar", "v2")
  486. h.put("foo", "v3")
  487. h.getVal("foo", "v3")
  488. h.getVal("bar", "v2")
  489. h.reopenDB()
  490. h.getVal("foo", "v3")
  491. h.getVal("bar", "v2")
  492. })
  493. }
  494. func TestDB_PutDeleteGet(t *testing.T) {
  495. trun(t, func(h *dbHarness) {
  496. h.put("foo", "v1")
  497. h.getVal("foo", "v1")
  498. h.put("foo", "v2")
  499. h.getVal("foo", "v2")
  500. h.delete("foo")
  501. h.get("foo", false)
  502. h.reopenDB()
  503. h.get("foo", false)
  504. })
  505. }
  506. func TestDB_EmptyBatch(t *testing.T) {
  507. h := newDbHarness(t)
  508. defer h.close()
  509. h.get("foo", false)
  510. err := h.db.Write(new(Batch), h.wo)
  511. if err != nil {
  512. t.Error("writing empty batch yield error: ", err)
  513. }
  514. h.get("foo", false)
  515. }
  516. func TestDB_GetFromFrozen(t *testing.T) {
  517. h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100})
  518. defer h.close()
  519. h.put("foo", "v1")
  520. h.getVal("foo", "v1")
  521. h.stor.DelaySync(storage.TypeTable) // Block sync calls
  522. h.put("k1", strings.Repeat("x", 100000)) // Fill memtable
  523. h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction
  524. for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ {
  525. time.Sleep(10 * time.Microsecond)
  526. }
  527. if h.db.getFrozenMem() == nil {
  528. h.stor.ReleaseSync(storage.TypeTable)
  529. t.Fatal("No frozen mem")
  530. }
  531. h.getVal("foo", "v1")
  532. h.stor.ReleaseSync(storage.TypeTable) // Release sync calls
  533. h.reopenDB()
  534. h.getVal("foo", "v1")
  535. h.get("k1", true)
  536. h.get("k2", true)
  537. }
  538. func TestDB_GetFromTable(t *testing.T) {
  539. trun(t, func(h *dbHarness) {
  540. h.put("foo", "v1")
  541. h.compactMem()
  542. h.getVal("foo", "v1")
  543. })
  544. }
  545. func TestDB_GetSnapshot(t *testing.T) {
  546. trun(t, func(h *dbHarness) {
  547. bar := strings.Repeat("b", 200)
  548. h.put("foo", "v1")
  549. h.put(bar, "v1")
  550. snap, err := h.db.GetSnapshot()
  551. if err != nil {
  552. t.Fatal("GetSnapshot: got error: ", err)
  553. }
  554. h.put("foo", "v2")
  555. h.put(bar, "v2")
  556. h.getVal("foo", "v2")
  557. h.getVal(bar, "v2")
  558. h.getValr(snap, "foo", "v1")
  559. h.getValr(snap, bar, "v1")
  560. h.compactMem()
  561. h.getVal("foo", "v2")
  562. h.getVal(bar, "v2")
  563. h.getValr(snap, "foo", "v1")
  564. h.getValr(snap, bar, "v1")
  565. snap.Release()
  566. h.reopenDB()
  567. h.getVal("foo", "v2")
  568. h.getVal(bar, "v2")
  569. })
  570. }
  571. func TestDB_GetLevel0Ordering(t *testing.T) {
  572. trun(t, func(h *dbHarness) {
  573. for i := 0; i < 4; i++ {
  574. h.put("bar", fmt.Sprintf("b%d", i))
  575. h.put("foo", fmt.Sprintf("v%d", i))
  576. h.compactMem()
  577. }
  578. h.getVal("foo", "v3")
  579. h.getVal("bar", "b3")
  580. v := h.db.s.version()
  581. t0len := v.tLen(0)
  582. v.release()
  583. if t0len < 2 {
  584. t.Errorf("level-0 tables is less than 2, got %d", t0len)
  585. }
  586. h.reopenDB()
  587. h.getVal("foo", "v3")
  588. h.getVal("bar", "b3")
  589. })
  590. }
  591. func TestDB_GetOrderedByLevels(t *testing.T) {
  592. trun(t, func(h *dbHarness) {
  593. h.put("foo", "v1")
  594. h.compactMem()
  595. h.compactRange("a", "z")
  596. h.getVal("foo", "v1")
  597. h.put("foo", "v2")
  598. h.compactMem()
  599. h.getVal("foo", "v2")
  600. })
  601. }
  602. func TestDB_GetPicksCorrectFile(t *testing.T) {
  603. trun(t, func(h *dbHarness) {
  604. // Arrange to have multiple files in a non-level-0 level.
  605. h.put("a", "va")
  606. h.compactMem()
  607. h.compactRange("a", "b")
  608. h.put("x", "vx")
  609. h.compactMem()
  610. h.compactRange("x", "y")
  611. h.put("f", "vf")
  612. h.compactMem()
  613. h.compactRange("f", "g")
  614. h.getVal("a", "va")
  615. h.getVal("f", "vf")
  616. h.getVal("x", "vx")
  617. h.compactRange("", "")
  618. h.getVal("a", "va")
  619. h.getVal("f", "vf")
  620. h.getVal("x", "vx")
  621. })
  622. }
  623. func TestDB_GetEncountersEmptyLevel(t *testing.T) {
  624. trun(t, func(h *dbHarness) {
  625. // Arrange for the following to happen:
  626. // * sstable A in level 0
  627. // * nothing in level 1
  628. // * sstable B in level 2
  629. // Then do enough Get() calls to arrange for an automatic compaction
  630. // of sstable A. A bug would cause the compaction to be marked as
  631. // occuring at level 1 (instead of the correct level 0).
  632. // Step 1: First place sstables in levels 0 and 2
  633. for i := 0; ; i++ {
  634. if i >= 100 {
  635. t.Fatal("could not fill levels-0 and level-2")
  636. }
  637. v := h.db.s.version()
  638. if v.tLen(0) > 0 && v.tLen(2) > 0 {
  639. v.release()
  640. break
  641. }
  642. v.release()
  643. h.put("a", "begin")
  644. h.put("z", "end")
  645. h.compactMem()
  646. h.getVal("a", "begin")
  647. h.getVal("z", "end")
  648. }
  649. // Step 2: clear level 1 if necessary.
  650. h.compactRangeAt(1, "", "")
  651. h.tablesPerLevel("1,0,1")
  652. h.getVal("a", "begin")
  653. h.getVal("z", "end")
  654. // Step 3: read a bunch of times
  655. for i := 0; i < 200; i++ {
  656. h.get("missing", false)
  657. }
  658. // Step 4: Wait for compaction to finish
  659. h.waitCompaction()
  660. v := h.db.s.version()
  661. if v.tLen(0) > 0 {
  662. t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
  663. }
  664. v.release()
  665. h.getVal("a", "begin")
  666. h.getVal("z", "end")
  667. })
  668. }
  669. func TestDB_IterMultiWithDelete(t *testing.T) {
  670. trun(t, func(h *dbHarness) {
  671. h.put("a", "va")
  672. h.put("b", "vb")
  673. h.put("c", "vc")
  674. h.delete("b")
  675. h.get("b", false)
  676. iter := h.db.NewIterator(nil, nil)
  677. iter.Seek([]byte("c"))
  678. testKeyVal(t, iter, "c->vc")
  679. iter.Prev()
  680. testKeyVal(t, iter, "a->va")
  681. iter.Release()
  682. h.compactMem()
  683. iter = h.db.NewIterator(nil, nil)
  684. iter.Seek([]byte("c"))
  685. testKeyVal(t, iter, "c->vc")
  686. iter.Prev()
  687. testKeyVal(t, iter, "a->va")
  688. iter.Release()
  689. })
  690. }
  691. func TestDB_IteratorPinsRef(t *testing.T) {
  692. h := newDbHarness(t)
  693. defer h.close()
  694. h.put("foo", "hello")
  695. // Get iterator that will yield the current contents of the DB.
  696. iter := h.db.NewIterator(nil, nil)
  697. // Write to force compactions
  698. h.put("foo", "newvalue1")
  699. for i := 0; i < 100; i++ {
  700. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
  701. }
  702. h.put("foo", "newvalue2")
  703. iter.First()
  704. testKeyVal(t, iter, "foo->hello")
  705. if iter.Next() {
  706. t.Errorf("expect eof")
  707. }
  708. iter.Release()
  709. }
  710. func TestDB_Recover(t *testing.T) {
  711. trun(t, func(h *dbHarness) {
  712. h.put("foo", "v1")
  713. h.put("baz", "v5")
  714. h.reopenDB()
  715. h.getVal("foo", "v1")
  716. h.getVal("foo", "v1")
  717. h.getVal("baz", "v5")
  718. h.put("bar", "v2")
  719. h.put("foo", "v3")
  720. h.reopenDB()
  721. h.getVal("foo", "v3")
  722. h.put("foo", "v4")
  723. h.getVal("foo", "v4")
  724. h.getVal("bar", "v2")
  725. h.getVal("baz", "v5")
  726. })
  727. }
  728. func TestDB_RecoverWithEmptyJournal(t *testing.T) {
  729. trun(t, func(h *dbHarness) {
  730. h.put("foo", "v1")
  731. h.put("foo", "v2")
  732. h.reopenDB()
  733. h.reopenDB()
  734. h.put("foo", "v3")
  735. h.reopenDB()
  736. h.getVal("foo", "v3")
  737. })
  738. }
  739. func TestDB_RecoverDuringMemtableCompaction(t *testing.T) {
  740. truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) {
  741. h.stor.DelaySync(storage.TypeTable)
  742. h.put("big1", strings.Repeat("x", 10000000))
  743. h.put("big2", strings.Repeat("y", 1000))
  744. h.put("bar", "v2")
  745. h.stor.ReleaseSync(storage.TypeTable)
  746. h.reopenDB()
  747. h.getVal("bar", "v2")
  748. h.getVal("big1", strings.Repeat("x", 10000000))
  749. h.getVal("big2", strings.Repeat("y", 1000))
  750. })
  751. }
  752. func TestDB_MinorCompactionsHappen(t *testing.T) {
  753. h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000})
  754. defer h.close()
  755. n := 500
  756. key := func(i int) string {
  757. return fmt.Sprintf("key%06d", i)
  758. }
  759. for i := 0; i < n; i++ {
  760. h.put(key(i), key(i)+strings.Repeat("v", 1000))
  761. }
  762. for i := 0; i < n; i++ {
  763. h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
  764. }
  765. h.reopenDB()
  766. for i := 0; i < n; i++ {
  767. h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
  768. }
  769. }
  770. func TestDB_RecoverWithLargeJournal(t *testing.T) {
  771. h := newDbHarness(t)
  772. defer h.close()
  773. h.put("big1", strings.Repeat("1", 200000))
  774. h.put("big2", strings.Repeat("2", 200000))
  775. h.put("small3", strings.Repeat("3", 10))
  776. h.put("small4", strings.Repeat("4", 10))
  777. h.tablesPerLevel("")
  778. // Make sure that if we re-open with a small write buffer size that
  779. // we flush table files in the middle of a large journal file.
  780. h.o.WriteBuffer = 100000
  781. h.reopenDB()
  782. h.getVal("big1", strings.Repeat("1", 200000))
  783. h.getVal("big2", strings.Repeat("2", 200000))
  784. h.getVal("small3", strings.Repeat("3", 10))
  785. h.getVal("small4", strings.Repeat("4", 10))
  786. v := h.db.s.version()
  787. if v.tLen(0) <= 1 {
  788. t.Errorf("tables-0 less than one")
  789. }
  790. v.release()
  791. }
  792. func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) {
  793. h := newDbHarnessWopt(t, &opt.Options{
  794. WriteBuffer: 10000000,
  795. Compression: opt.NoCompression,
  796. })
  797. defer h.close()
  798. v := h.db.s.version()
  799. if v.tLen(0) > 0 {
  800. t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
  801. }
  802. v.release()
  803. n := 80
  804. // Write 8MB (80 values, each 100K)
  805. for i := 0; i < n; i++ {
  806. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
  807. }
  808. // Reopening moves updates to level-0
  809. h.reopenDB()
  810. h.compactRangeAt(0, "", "")
  811. v = h.db.s.version()
  812. if v.tLen(0) > 0 {
  813. t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
  814. }
  815. if v.tLen(1) <= 1 {
  816. t.Errorf("level-1 tables less than 1, got %d", v.tLen(1))
  817. }
  818. v.release()
  819. for i := 0; i < n; i++ {
  820. h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
  821. }
  822. }
  823. func TestDB_RepeatedWritesToSameKey(t *testing.T) {
  824. h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
  825. defer h.close()
  826. maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
  827. value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
  828. for i := 0; i < 5*maxTables; i++ {
  829. h.put("key", value)
  830. n := h.totalTables()
  831. if n > maxTables {
  832. t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
  833. }
  834. }
  835. }
  836. func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
  837. h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
  838. defer h.close()
  839. h.reopenDB()
  840. maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
  841. value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
  842. for i := 0; i < 5*maxTables; i++ {
  843. h.put("key", value)
  844. n := h.totalTables()
  845. if n > maxTables {
  846. t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
  847. }
  848. }
  849. }
  850. func TestDB_SparseMerge(t *testing.T) {
  851. h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
  852. defer h.close()
  853. h.putMulti(h.o.GetNumLevel(), "A", "Z")
  854. // Suppose there is:
  855. // small amount of data with prefix A
  856. // large amount of data with prefix B
  857. // small amount of data with prefix C
  858. // and that recent updates have made small changes to all three prefixes.
  859. // Check that we do not do a compaction that merges all of B in one shot.
  860. h.put("A", "va")
  861. value := strings.Repeat("x", 1000)
  862. for i := 0; i < 100000; i++ {
  863. h.put(fmt.Sprintf("B%010d", i), value)
  864. }
  865. h.put("C", "vc")
  866. h.compactMem()
  867. h.compactRangeAt(0, "", "")
  868. h.waitCompaction()
  869. // Make sparse update
  870. h.put("A", "va2")
  871. h.put("B100", "bvalue2")
  872. h.put("C", "vc2")
  873. h.compactMem()
  874. h.waitCompaction()
  875. h.maxNextLevelOverlappingBytes(20 * 1048576)
  876. h.compactRangeAt(0, "", "")
  877. h.waitCompaction()
  878. h.maxNextLevelOverlappingBytes(20 * 1048576)
  879. h.compactRangeAt(1, "", "")
  880. h.waitCompaction()
  881. h.maxNextLevelOverlappingBytes(20 * 1048576)
  882. }
  883. func TestDB_SizeOf(t *testing.T) {
  884. h := newDbHarnessWopt(t, &opt.Options{
  885. Compression: opt.NoCompression,
  886. WriteBuffer: 10000000,
  887. })
  888. defer h.close()
  889. h.sizeAssert("", "xyz", 0, 0)
  890. h.reopenDB()
  891. h.sizeAssert("", "xyz", 0, 0)
  892. // Write 8MB (80 values, each 100K)
  893. n := 80
  894. s1 := 100000
  895. s2 := 105000
  896. for i := 0; i < n; i++ {
  897. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10))
  898. }
  899. // 0 because SizeOf() does not account for memtable space
  900. h.sizeAssert("", numKey(50), 0, 0)
  901. for r := 0; r < 3; r++ {
  902. h.reopenDB()
  903. for cs := 0; cs < n; cs += 10 {
  904. for i := 0; i < n; i += 10 {
  905. h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i))
  906. h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1)))
  907. h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10))
  908. }
  909. h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50))
  910. h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50))
  911. h.compactRangeAt(0, numKey(cs), numKey(cs+9))
  912. }
  913. v := h.db.s.version()
  914. if v.tLen(0) != 0 {
  915. t.Errorf("level-0 tables was not zero, got %d", v.tLen(0))
  916. }
  917. if v.tLen(1) == 0 {
  918. t.Error("level-1 tables was zero")
  919. }
  920. v.release()
  921. }
  922. }
  923. func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) {
  924. h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
  925. defer h.close()
  926. sizes := []uint64{
  927. 10000,
  928. 10000,
  929. 100000,
  930. 10000,
  931. 100000,
  932. 10000,
  933. 300000,
  934. 10000,
  935. }
  936. for i, n := range sizes {
  937. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10))
  938. }
  939. for r := 0; r < 3; r++ {
  940. h.reopenDB()
  941. var x uint64
  942. for i, n := range sizes {
  943. y := x
  944. if i > 0 {
  945. y += 1000
  946. }
  947. h.sizeAssert("", numKey(i), x, y)
  948. x += n
  949. }
  950. h.sizeAssert(numKey(3), numKey(5), 110000, 111000)
  951. h.compactRangeAt(0, "", "")
  952. }
  953. }
  954. func TestDB_Snapshot(t *testing.T) {
  955. trun(t, func(h *dbHarness) {
  956. h.put("foo", "v1")
  957. s1 := h.getSnapshot()
  958. h.put("foo", "v2")
  959. s2 := h.getSnapshot()
  960. h.put("foo", "v3")
  961. s3 := h.getSnapshot()
  962. h.put("foo", "v4")
  963. h.getValr(s1, "foo", "v1")
  964. h.getValr(s2, "foo", "v2")
  965. h.getValr(s3, "foo", "v3")
  966. h.getVal("foo", "v4")
  967. s3.Release()
  968. h.getValr(s1, "foo", "v1")
  969. h.getValr(s2, "foo", "v2")
  970. h.getVal("foo", "v4")
  971. s1.Release()
  972. h.getValr(s2, "foo", "v2")
  973. h.getVal("foo", "v4")
  974. s2.Release()
  975. h.getVal("foo", "v4")
  976. })
  977. }
  978. func TestDB_SnapshotList(t *testing.T) {
  979. db := &DB{snapsList: list.New()}
  980. e0a := db.acquireSnapshot()
  981. e0b := db.acquireSnapshot()
  982. db.seq = 1
  983. e1 := db.acquireSnapshot()
  984. db.seq = 2
  985. e2 := db.acquireSnapshot()
  986. if db.minSeq() != 0 {
  987. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  988. }
  989. db.releaseSnapshot(e0a)
  990. if db.minSeq() != 0 {
  991. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  992. }
  993. db.releaseSnapshot(e2)
  994. if db.minSeq() != 0 {
  995. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  996. }
  997. db.releaseSnapshot(e0b)
  998. if db.minSeq() != 1 {
  999. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1000. }
  1001. e2 = db.acquireSnapshot()
  1002. if db.minSeq() != 1 {
  1003. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1004. }
  1005. db.releaseSnapshot(e1)
  1006. if db.minSeq() != 2 {
  1007. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1008. }
  1009. db.releaseSnapshot(e2)
  1010. if db.minSeq() != 2 {
  1011. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1012. }
  1013. }
  1014. func TestDB_HiddenValuesAreRemoved(t *testing.T) {
  1015. trun(t, func(h *dbHarness) {
  1016. s := h.db.s
  1017. h.put("foo", "v1")
  1018. h.compactMem()
  1019. m := h.o.GetMaxMemCompationLevel()
  1020. v := s.version()
  1021. num := v.tLen(m)
  1022. v.release()
  1023. if num != 1 {
  1024. t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
  1025. }
  1026. // Place a table at level last-1 to prevent merging with preceding mutation
  1027. h.put("a", "begin")
  1028. h.put("z", "end")
  1029. h.compactMem()
  1030. v = s.version()
  1031. if v.tLen(m) != 1 {
  1032. t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
  1033. }
  1034. if v.tLen(m-1) != 1 {
  1035. t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
  1036. }
  1037. v.release()
  1038. h.delete("foo")
  1039. h.put("foo", "v2")
  1040. h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
  1041. h.compactMem()
  1042. h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
  1043. h.compactRangeAt(m-2, "", "z")
  1044. // DEL eliminated, but v1 remains because we aren't compacting that level
  1045. // (DEL can be eliminated because v2 hides v1).
  1046. h.allEntriesFor("foo", "[ v2, v1 ]")
  1047. h.compactRangeAt(m-1, "", "")
  1048. // Merging last-1 w/ last, so we are the base level for "foo", so
  1049. // DEL is removed. (as is v1).
  1050. h.allEntriesFor("foo", "[ v2 ]")
  1051. })
  1052. }
  1053. func TestDB_DeletionMarkers2(t *testing.T) {
  1054. h := newDbHarness(t)
  1055. defer h.close()
  1056. s := h.db.s
  1057. h.put("foo", "v1")
  1058. h.compactMem()
  1059. m := h.o.GetMaxMemCompationLevel()
  1060. v := s.version()
  1061. num := v.tLen(m)
  1062. v.release()
  1063. if num != 1 {
  1064. t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
  1065. }
  1066. // Place a table at level last-1 to prevent merging with preceding mutation
  1067. h.put("a", "begin")
  1068. h.put("z", "end")
  1069. h.compactMem()
  1070. v = s.version()
  1071. if v.tLen(m) != 1 {
  1072. t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
  1073. }
  1074. if v.tLen(m-1) != 1 {
  1075. t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
  1076. }
  1077. v.release()
  1078. h.delete("foo")
  1079. h.allEntriesFor("foo", "[ DEL, v1 ]")
  1080. h.compactMem() // Moves to level last-2
  1081. h.allEntriesFor("foo", "[ DEL, v1 ]")
  1082. h.compactRangeAt(m-2, "", "")
  1083. // DEL kept: "last" file overlaps
  1084. h.allEntriesFor("foo", "[ DEL, v1 ]")
  1085. h.compactRangeAt(m-1, "", "")
  1086. // Merging last-1 w/ last, so we are the base level for "foo", so
  1087. // DEL is removed. (as is v1).
  1088. h.allEntriesFor("foo", "[ ]")
  1089. }
  1090. func TestDB_CompactionTableOpenError(t *testing.T) {
  1091. h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1})
  1092. defer h.close()
  1093. im := 10
  1094. jm := 10
  1095. for r := 0; r < 2; r++ {
  1096. for i := 0; i < im; i++ {
  1097. for j := 0; j < jm; j++ {
  1098. h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
  1099. }
  1100. h.compactMem()
  1101. }
  1102. }
  1103. if n := h.totalTables(); n != im*2 {
  1104. t.Errorf("total tables is %d, want %d", n, im)
  1105. }
  1106. h.stor.SetEmuErr(storage.TypeTable, tsOpOpen)
  1107. go h.db.CompactRange(util.Range{})
  1108. if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil {
  1109. t.Log("compaction error: ", err)
  1110. }
  1111. h.closeDB0()
  1112. h.openDB()
  1113. h.stor.SetEmuErr(0, tsOpOpen)
  1114. for i := 0; i < im; i++ {
  1115. for j := 0; j < jm; j++ {
  1116. h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
  1117. }
  1118. }
  1119. }
  1120. func TestDB_OverlapInLevel0(t *testing.T) {
  1121. trun(t, func(h *dbHarness) {
  1122. if h.o.GetMaxMemCompationLevel() != 2 {
  1123. t.Fatal("fix test to reflect the config")
  1124. }
  1125. // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
  1126. h.put("100", "v100")
  1127. h.put("999", "v999")
  1128. h.compactMem()
  1129. h.delete("100")
  1130. h.delete("999")
  1131. h.compactMem()
  1132. h.tablesPerLevel("0,1,1")
  1133. // Make files spanning the following ranges in level-0:
  1134. // files[0] 200 .. 900
  1135. // files[1] 300 .. 500
  1136. // Note that files are sorted by min key.
  1137. h.put("300", "v300")
  1138. h.put("500", "v500")
  1139. h.compactMem()
  1140. h.put("200", "v200")
  1141. h.put("600", "v600")
  1142. h.put("900", "v900")
  1143. h.compactMem()
  1144. h.tablesPerLevel("2,1,1")
  1145. // Compact away the placeholder files we created initially
  1146. h.compactRangeAt(1, "", "")
  1147. h.compactRangeAt(2, "", "")
  1148. h.tablesPerLevel("2")
  1149. // Do a memtable compaction. Before bug-fix, the compaction would
  1150. // not detect the overlap with level-0 files and would incorrectly place
  1151. // the deletion in a deeper level.
  1152. h.delete("600")
  1153. h.compactMem()
  1154. h.tablesPerLevel("3")
  1155. h.get("600", false)
  1156. })
  1157. }
  1158. func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) {
  1159. h := newDbHarness(t)
  1160. defer h.close()
  1161. h.reopenDB()
  1162. h.put("b", "v")
  1163. h.reopenDB()
  1164. h.delete("b")
  1165. h.delete("a")
  1166. h.reopenDB()
  1167. h.delete("a")
  1168. h.reopenDB()
  1169. h.put("a", "v")
  1170. h.reopenDB()
  1171. h.reopenDB()
  1172. h.getKeyVal("(a->v)")
  1173. h.waitCompaction()
  1174. h.getKeyVal("(a->v)")
  1175. }
  1176. func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) {
  1177. h := newDbHarness(t)
  1178. defer h.close()
  1179. h.reopenDB()
  1180. h.put("", "")
  1181. h.reopenDB()
  1182. h.delete("e")
  1183. h.put("", "")
  1184. h.reopenDB()
  1185. h.put("c", "cv")
  1186. h.reopenDB()
  1187. h.put("", "")
  1188. h.reopenDB()
  1189. h.put("", "")
  1190. h.waitCompaction()
  1191. h.reopenDB()
  1192. h.put("d", "dv")
  1193. h.reopenDB()
  1194. h.put("", "")
  1195. h.reopenDB()
  1196. h.delete("d")
  1197. h.delete("b")
  1198. h.reopenDB()
  1199. h.getKeyVal("(->)(c->cv)")
  1200. h.waitCompaction()
  1201. h.getKeyVal("(->)(c->cv)")
  1202. }
  1203. func TestDB_SingleEntryMemCompaction(t *testing.T) {
  1204. trun(t, func(h *dbHarness) {
  1205. for i := 0; i < 10; i++ {
  1206. h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer))
  1207. h.compactMem()
  1208. h.put("key", strings.Repeat("v", opt.DefaultBlockSize))
  1209. h.compactMem()
  1210. h.put("k", "v")
  1211. h.compactMem()
  1212. h.put("", "")
  1213. h.compactMem()
  1214. h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2))
  1215. h.compactMem()
  1216. }
  1217. })
  1218. }
  1219. func TestDB_ManifestWriteError(t *testing.T) {
  1220. for i := 0; i < 2; i++ {
  1221. func() {
  1222. h := newDbHarness(t)
  1223. defer h.close()
  1224. h.put("foo", "bar")
  1225. h.getVal("foo", "bar")
  1226. // Mem compaction (will succeed)
  1227. h.compactMem()
  1228. h.getVal("foo", "bar")
  1229. v := h.db.s.version()
  1230. if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 {
  1231. t.Errorf("invalid total tables, want=1 got=%d", n)
  1232. }
  1233. v.release()
  1234. if i == 0 {
  1235. h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite)
  1236. } else {
  1237. h.stor.SetEmuErr(storage.TypeManifest, tsOpSync)
  1238. }
  1239. // Merging compaction (will fail)
  1240. h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true)
  1241. h.db.Close()
  1242. h.stor.SetEmuErr(0, tsOpWrite)
  1243. h.stor.SetEmuErr(0, tsOpSync)
  1244. // Should not lose data
  1245. h.openDB()
  1246. h.getVal("foo", "bar")
  1247. }()
  1248. }
  1249. }
  1250. func assertErr(t *testing.T, err error, wanterr bool) {
  1251. if err != nil {
  1252. if wanterr {
  1253. t.Log("AssertErr: got error (expected): ", err)
  1254. } else {
  1255. t.Error("AssertErr: got error: ", err)
  1256. }
  1257. } else if wanterr {
  1258. t.Error("AssertErr: expect error")
  1259. }
  1260. }
  1261. func TestDB_ClosedIsClosed(t *testing.T) {
  1262. h := newDbHarness(t)
  1263. db := h.db
  1264. var iter, iter2 iterator.Iterator
  1265. var snap *Snapshot
  1266. func() {
  1267. defer h.close()
  1268. h.put("k", "v")
  1269. h.getVal("k", "v")
  1270. iter = db.NewIterator(nil, h.ro)
  1271. iter.Seek([]byte("k"))
  1272. testKeyVal(t, iter, "k->v")
  1273. var err error
  1274. snap, err = db.GetSnapshot()
  1275. if err != nil {
  1276. t.Fatal("GetSnapshot: got error: ", err)
  1277. }
  1278. h.getValr(snap, "k", "v")
  1279. iter2 = snap.NewIterator(nil, h.ro)
  1280. iter2.Seek([]byte("k"))
  1281. testKeyVal(t, iter2, "k->v")
  1282. h.put("foo", "v2")
  1283. h.delete("foo")
  1284. // closing DB
  1285. iter.Release()
  1286. iter2.Release()
  1287. }()
  1288. assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true)
  1289. _, err := db.Get([]byte("k"), h.ro)
  1290. assertErr(t, err, true)
  1291. if iter.Valid() {
  1292. t.Errorf("iter.Valid should false")
  1293. }
  1294. assertErr(t, iter.Error(), false)
  1295. testKeyVal(t, iter, "->")
  1296. if iter.Seek([]byte("k")) {
  1297. t.Errorf("iter.Seek should false")
  1298. }
  1299. assertErr(t, iter.Error(), true)
  1300. assertErr(t, iter2.Error(), false)
  1301. _, err = snap.Get([]byte("k"), h.ro)
  1302. assertErr(t, err, true)
  1303. _, err = db.GetSnapshot()
  1304. assertErr(t, err, true)
  1305. iter3 := db.NewIterator(nil, h.ro)
  1306. assertErr(t, iter3.Error(), true)
  1307. iter3 = snap.NewIterator(nil, h.ro)
  1308. assertErr(t, iter3.Error(), true)
  1309. assertErr(t, db.Delete([]byte("k"), h.wo), true)
  1310. _, err = db.GetProperty("leveldb.stats")
  1311. assertErr(t, err, true)
  1312. _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}})
  1313. assertErr(t, err, true)
  1314. assertErr(t, db.CompactRange(util.Range{}), true)
  1315. assertErr(t, db.Close(), true)
  1316. }
  1317. type numberComparer struct{}
  1318. func (numberComparer) num(x []byte) (n int) {
  1319. fmt.Sscan(string(x[1:len(x)-1]), &n)
  1320. return
  1321. }
  1322. func (numberComparer) Name() string {
  1323. return "test.NumberComparer"
  1324. }
  1325. func (p numberComparer) Compare(a, b []byte) int {
  1326. return p.num(a) - p.num(b)
  1327. }
  1328. func (numberComparer) Separator(dst, a, b []byte) []byte { return nil }
  1329. func (numberComparer) Successor(dst, b []byte) []byte { return nil }
  1330. func TestDB_CustomComparer(t *testing.T) {
  1331. h := newDbHarnessWopt(t, &opt.Options{
  1332. Comparer: numberComparer{},
  1333. WriteBuffer: 1000,
  1334. })
  1335. defer h.close()
  1336. h.put("[10]", "ten")
  1337. h.put("[0x14]", "twenty")
  1338. for i := 0; i < 2; i++ {
  1339. h.getVal("[10]", "ten")
  1340. h.getVal("[0xa]", "ten")
  1341. h.getVal("[20]", "twenty")
  1342. h.getVal("[0x14]", "twenty")
  1343. h.get("[15]", false)
  1344. h.get("[0xf]", false)
  1345. h.compactMem()
  1346. h.compactRange("[0]", "[9999]")
  1347. }
  1348. for n := 0; n < 2; n++ {
  1349. for i := 0; i < 100; i++ {
  1350. v := fmt.Sprintf("[%d]", i*10)
  1351. h.put(v, v)
  1352. }
  1353. h.compactMem()
  1354. h.compactRange("[0]", "[1000000]")
  1355. }
  1356. }
  1357. func TestDB_ManualCompaction(t *testing.T) {
  1358. h := newDbHarness(t)
  1359. defer h.close()
  1360. if h.o.GetMaxMemCompationLevel() != 2 {
  1361. t.Fatal("fix test to reflect the config")
  1362. }
  1363. h.putMulti(3, "p", "q")
  1364. h.tablesPerLevel("1,1,1")
  1365. // Compaction range falls before files
  1366. h.compactRange("", "c")
  1367. h.tablesPerLevel("1,1,1")
  1368. // Compaction range falls after files
  1369. h.compactRange("r", "z")
  1370. h.tablesPerLevel("1,1,1")
  1371. // Compaction range overlaps files
  1372. h.compactRange("p1", "p9")
  1373. h.tablesPerLevel("0,0,1")
  1374. // Populate a different range
  1375. h.putMulti(3, "c", "e")
  1376. h.tablesPerLevel("1,1,2")
  1377. // Compact just the new range
  1378. h.compactRange("b", "f")
  1379. h.tablesPerLevel("0,0,2")
  1380. // Compact all
  1381. h.putMulti(1, "a", "z")
  1382. h.tablesPerLevel("0,1,2")
  1383. h.compactRange("", "")
  1384. h.tablesPerLevel("0,0,1")
  1385. }
  1386. func TestDB_BloomFilter(t *testing.T) {
  1387. h := newDbHarnessWopt(t, &opt.Options{
  1388. DisableBlockCache: true,
  1389. Filter: filter.NewBloomFilter(10),
  1390. })
  1391. defer h.close()
  1392. key := func(i int) string {
  1393. return fmt.Sprintf("key%06d", i)
  1394. }
  1395. const n = 10000
  1396. // Populate multiple layers
  1397. for i := 0; i < n; i++ {
  1398. h.put(key(i), key(i))
  1399. }
  1400. h.compactMem()
  1401. h.compactRange("a", "z")
  1402. for i := 0; i < n; i += 100 {
  1403. h.put(key(i), key(i))
  1404. }
  1405. h.compactMem()
  1406. // Prevent auto compactions triggered by seeks
  1407. h.stor.DelaySync(storage.TypeTable)
  1408. // Lookup present keys. Should rarely read from small sstable.
  1409. h.stor.SetReadCounter(storage.TypeTable)
  1410. for i := 0; i < n; i++ {
  1411. h.getVal(key(i), key(i))
  1412. }
  1413. cnt := int(h.stor.ReadCounter())
  1414. t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
  1415. if min, max := n, n+2*n/100; cnt < min || cnt > max {
  1416. t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
  1417. }
  1418. // Lookup missing keys. Should rarely read from either sstable.
  1419. h.stor.ResetReadCounter()
  1420. for i := 0; i < n; i++ {
  1421. h.get(key(i)+".missing", false)
  1422. }
  1423. cnt = int(h.stor.ReadCounter())
  1424. t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
  1425. if max := 3 * n / 100; cnt > max {
  1426. t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
  1427. }
  1428. h.stor.ReleaseSync(storage.TypeTable)
  1429. }
  1430. func TestDB_Concurrent(t *testing.T) {
  1431. const n, secs, maxkey = 4, 2, 1000
  1432. runtime.GOMAXPROCS(n)
  1433. trun(t, func(h *dbHarness) {
  1434. var closeWg sync.WaitGroup
  1435. var stop uint32
  1436. var cnt [n]uint32
  1437. for i := 0; i < n; i++ {
  1438. closeWg.Add(1)
  1439. go func(i int) {
  1440. var put, get, found uint
  1441. defer func() {
  1442. t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d",
  1443. i, cnt[i], put, get, found, get-found)
  1444. closeWg.Done()
  1445. }()
  1446. rnd := rand.New(rand.NewSource(int64(1000 + i)))
  1447. for atomic.LoadUint32(&stop) == 0 {
  1448. x := cnt[i]
  1449. k := rnd.Intn(maxkey)
  1450. kstr := fmt.Sprintf("%016d", k)
  1451. if (rnd.Int() % 2) > 0 {
  1452. put++
  1453. h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x))
  1454. } else {
  1455. get++
  1456. v, err := h.db.Get([]byte(kstr), h.ro)
  1457. if err == nil {
  1458. found++
  1459. rk, ri, rx := 0, -1, uint32(0)
  1460. fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx)
  1461. if rk != k {
  1462. t.Errorf("invalid key want=%d got=%d", k, rk)
  1463. }
  1464. if ri < 0 || ri >= n {
  1465. t.Error("invalid goroutine number: ", ri)
  1466. } else {
  1467. tx := atomic.LoadUint32(&(cnt[ri]))
  1468. if rx > tx {
  1469. t.Errorf("invalid seq number, %d > %d ", rx, tx)
  1470. }
  1471. }
  1472. } else if err != ErrNotFound {
  1473. t.Error("Get: got error: ", err)
  1474. return
  1475. }
  1476. }
  1477. atomic.AddUint32(&cnt[i], 1)
  1478. }
  1479. }(i)
  1480. }
  1481. time.Sleep(secs * time.Second)
  1482. atomic.StoreUint32(&stop, 1)
  1483. closeWg.Wait()
  1484. })
  1485. runtime.GOMAXPROCS(1)
  1486. }
  1487. func TestDB_Concurrent2(t *testing.T) {
  1488. const n, n2 = 4, 4000
  1489. runtime.GOMAXPROCS(n*2 + 2)
  1490. truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) {
  1491. var closeWg sync.WaitGroup
  1492. var stop uint32
  1493. for i := 0; i < n; i++ {
  1494. closeWg.Add(1)
  1495. go func(i int) {
  1496. for k := 0; atomic.LoadUint32(&stop) == 0; k++ {
  1497. h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
  1498. }
  1499. closeWg.Done()
  1500. }(i)
  1501. }
  1502. for i := 0; i < n; i++ {
  1503. closeWg.Add(1)
  1504. go func(i int) {
  1505. for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- {
  1506. h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
  1507. }
  1508. closeWg.Done()
  1509. }(i)
  1510. }
  1511. cmp := comparer.DefaultComparer
  1512. for i := 0; i < n2; i++ {
  1513. closeWg.Add(1)
  1514. go func(i int) {
  1515. it := h.db.NewIterator(nil, nil)
  1516. var pk []byte
  1517. for it.Next() {
  1518. kk := it.Key()
  1519. if cmp.Compare(kk, pk) <= 0 {
  1520. t.Errorf("iter %d: %q is successor of %q", i, pk, kk)
  1521. }
  1522. pk = append(pk[:0], kk...)
  1523. var k, vk, vi int
  1524. if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil {
  1525. t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err)
  1526. } else if n < 1 {
  1527. t.Errorf("iter %d: Cannot parse key %q", i, it.Key())
  1528. }
  1529. if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil {
  1530. t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err)
  1531. } else if n < 2 {
  1532. t.Errorf("iter %d: Cannot parse value %q", i, it.Value())
  1533. }
  1534. if vk != k {
  1535. t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk)
  1536. }
  1537. }
  1538. if err := it.Error(); err != nil {
  1539. t.Errorf("iter %d: Got error: %v", i, err)
  1540. }
  1541. it.Release()
  1542. closeWg.Done()
  1543. }(i)
  1544. }
  1545. atomic.StoreUint32(&stop, 1)
  1546. closeWg.Wait()
  1547. })
  1548. runtime.GOMAXPROCS(1)
  1549. }
  1550. func TestDB_CreateReopenDbOnFile(t *testing.T) {
  1551. dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
  1552. if err := os.RemoveAll(dbpath); err != nil {
  1553. t.Fatal("cannot remove old db: ", err)
  1554. }
  1555. defer os.RemoveAll(dbpath)
  1556. for i := 0; i < 3; i++ {
  1557. stor, err := storage.OpenFile(dbpath)
  1558. if err != nil {
  1559. t.Fatalf("(%d) cannot open storage: %s", i, err)
  1560. }
  1561. db, err := Open(stor, nil)
  1562. if err != nil {
  1563. t.Fatalf("(%d) cannot open db: %s", i, err)
  1564. }
  1565. if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
  1566. t.Fatalf("(%d) cannot write to db: %s", i, err)
  1567. }
  1568. if err := db.Close(); err != nil {
  1569. t.Fatalf("(%d) cannot close db: %s", i, err)
  1570. }
  1571. if err := stor.Close(); err != nil {
  1572. t.Fatalf("(%d) cannot close storage: %s", i, err)
  1573. }
  1574. }
  1575. }
  1576. func TestDB_CreateReopenDbOnFile2(t *testing.T) {
  1577. dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid()))
  1578. if err := os.RemoveAll(dbpath); err != nil {
  1579. t.Fatal("cannot remove old db: ", err)
  1580. }
  1581. defer os.RemoveAll(dbpath)
  1582. for i := 0; i < 3; i++ {
  1583. db, err := OpenFile(dbpath, nil)
  1584. if err != nil {
  1585. t.Fatalf("(%d) cannot open db: %s", i, err)
  1586. }
  1587. if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
  1588. t.Fatalf("(%d) cannot write to db: %s", i, err)
  1589. }
  1590. if err := db.Close(); err != nil {
  1591. t.Fatalf("(%d) cannot close db: %s", i, err)
  1592. }
  1593. }
  1594. }
  1595. func TestDB_DeletionMarkersOnMemdb(t *testing.T) {
  1596. h := newDbHarness(t)
  1597. defer h.close()
  1598. h.put("foo", "v1")
  1599. h.compactMem()
  1600. h.delete("foo")
  1601. h.get("foo", false)
  1602. h.getKeyVal("")
  1603. }
  1604. func TestDB_LeveldbIssue178(t *testing.T) {
  1605. nKeys := (opt.DefaultCompactionTableSize / 30) * 5
  1606. key1 := func(i int) string {
  1607. return fmt.Sprintf("my_key_%d", i)
  1608. }
  1609. key2 := func(i int) string {
  1610. return fmt.Sprintf("my_key_%d_xxx", i)
  1611. }
  1612. // Disable compression since it affects the creation of layers and the
  1613. // code below is trying to test against a very specific scenario.
  1614. h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
  1615. defer h.close()
  1616. // Create first key range.
  1617. batch := new(Batch)
  1618. for i := 0; i < nKeys; i++ {
  1619. batch.Put([]byte(key1(i)), []byte("value for range 1 key"))
  1620. }
  1621. h.write(batch)
  1622. // Create second key range.
  1623. batch.Reset()
  1624. for i := 0; i < nKeys; i++ {
  1625. batch.Put([]byte(key2(i)), []byte("value for range 2 key"))
  1626. }
  1627. h.write(batch)
  1628. // Delete second key range.
  1629. batch.Reset()
  1630. for i := 0; i < nKeys; i++ {
  1631. batch.Delete([]byte(key2(i)))
  1632. }
  1633. h.write(batch)
  1634. h.waitMemCompaction()
  1635. // Run manual compaction.
  1636. h.compactRange(key1(0), key1(nKeys-1))
  1637. // Checking the keys.
  1638. h.assertNumKeys(nKeys)
  1639. }
  1640. func TestDB_LeveldbIssue200(t *testing.T) {
  1641. h := newDbHarness(t)
  1642. defer h.close()
  1643. h.put("1", "b")
  1644. h.put("2", "c")
  1645. h.put("3", "d")
  1646. h.put("4", "e")
  1647. h.put("5", "f")
  1648. iter := h.db.NewIterator(nil, h.ro)
  1649. // Add an element that should not be reflected in the iterator.
  1650. h.put("25", "cd")
  1651. iter.Seek([]byte("5"))
  1652. assertBytes(t, []byte("5"), iter.Key())
  1653. iter.Prev()
  1654. assertBytes(t, []byte("4"), iter.Key())
  1655. iter.Prev()
  1656. assertBytes(t, []byte("3"), iter.Key())
  1657. iter.Next()
  1658. assertBytes(t, []byte("4"), iter.Key())
  1659. iter.Next()
  1660. assertBytes(t, []byte("5"), iter.Key())
  1661. }
  1662. func TestDB_GoleveldbIssue74(t *testing.T) {
  1663. h := newDbHarnessWopt(t, &opt.Options{
  1664. WriteBuffer: 1 * opt.MiB,
  1665. })
  1666. defer h.close()
  1667. const n, dur = 10000, 5 * time.Second
  1668. runtime.GOMAXPROCS(runtime.NumCPU())
  1669. until := time.Now().Add(dur)
  1670. wg := new(sync.WaitGroup)
  1671. wg.Add(2)
  1672. var done uint32
  1673. go func() {
  1674. var i int
  1675. defer func() {
  1676. t.Logf("WRITER DONE #%d", i)
  1677. atomic.StoreUint32(&done, 1)
  1678. wg.Done()
  1679. }()
  1680. b := new(Batch)
  1681. for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
  1682. iv := fmt.Sprintf("VAL%010d", i)
  1683. for k := 0; k < n; k++ {
  1684. key := fmt.Sprintf("KEY%06d", k)
  1685. b.Put([]byte(key), []byte(key+iv))
  1686. b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key))
  1687. }
  1688. h.write(b)
  1689. b.Reset()
  1690. snap := h.getSnapshot()
  1691. iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
  1692. var k int
  1693. for ; iter.Next(); k++ {
  1694. ptrKey := iter.Key()
  1695. key := iter.Value()
  1696. if _, err := snap.Get(ptrKey, nil); err != nil {
  1697. t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err)
  1698. }
  1699. if value, err := snap.Get(key, nil); err != nil {
  1700. t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err)
  1701. } else if string(value) != string(key)+iv {
  1702. t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value)
  1703. }
  1704. b.Delete(key)
  1705. b.Delete(ptrKey)
  1706. }
  1707. h.write(b)
  1708. iter.Release()
  1709. snap.Release()
  1710. if k != n {
  1711. t.Fatalf("#%d %d != %d", i, k, n)
  1712. }
  1713. }
  1714. }()
  1715. go func() {
  1716. var i int
  1717. defer func() {
  1718. t.Logf("READER DONE #%d", i)
  1719. atomic.StoreUint32(&done, 1)
  1720. wg.Done()
  1721. }()
  1722. for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
  1723. snap := h.getSnapshot()
  1724. iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
  1725. var prevValue string
  1726. var k int
  1727. for ; iter.Next(); k++ {
  1728. ptrKey := iter.Key()
  1729. key := iter.Value()
  1730. if _, err := snap.Get(ptrKey, nil); err != nil {
  1731. t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err)
  1732. }
  1733. if value, err := snap.Get(key, nil); err != nil {
  1734. t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err)
  1735. } else if prevValue != "" && string(value) != string(key)+prevValue {
  1736. t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value)
  1737. } else {
  1738. prevValue = string(value[len(key):])
  1739. }
  1740. }
  1741. iter.Release()
  1742. snap.Release()
  1743. if k > 0 && k != n {
  1744. t.Fatalf("#%d %d != %d", i, k, n)
  1745. }
  1746. }
  1747. }()
  1748. wg.Wait()
  1749. }
  1750. func TestDB_GetProperties(t *testing.T) {
  1751. h := newDbHarness(t)
  1752. defer h.close()
  1753. _, err := h.db.GetProperty("leveldb.num-files-at-level")
  1754. if err == nil {
  1755. t.Error("GetProperty() failed to detect missing level")
  1756. }
  1757. _, err = h.db.GetProperty("leveldb.num-files-at-level0")
  1758. if err != nil {
  1759. t.Error("got unexpected error", err)
  1760. }
  1761. _, err = h.db.GetProperty("leveldb.num-files-at-level0x")
  1762. if err == nil {
  1763. t.Error("GetProperty() failed to detect invalid level")
  1764. }
  1765. }
  1766. func TestDB_GoleveldbIssue72and83(t *testing.T) {
  1767. h := newDbHarnessWopt(t, &opt.Options{
  1768. WriteBuffer: 1 * opt.MiB,
  1769. OpenFilesCacheCapacity: 3,
  1770. })
  1771. defer h.close()
  1772. const n, wn, dur = 10000, 100, 30 * time.Second
  1773. runtime.GOMAXPROCS(runtime.NumCPU())
  1774. randomData := func(prefix byte, i int) []byte {
  1775. data := make([]byte, 1+4+32+64+32)
  1776. _, err := crand.Reader.Read(data[1 : len(data)-8])
  1777. if err != nil {
  1778. panic(err)
  1779. }
  1780. data[0] = prefix
  1781. binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i))
  1782. binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value())
  1783. return data
  1784. }
  1785. keys := make([][]byte, n)
  1786. for i := range keys {
  1787. keys[i] = randomData(1, 0)
  1788. }
  1789. until := time.Now().Add(dur)
  1790. wg := new(sync.WaitGroup)
  1791. wg.Add(3)
  1792. var done uint32
  1793. go func() {
  1794. i := 0
  1795. defer func() {
  1796. t.Logf("WRITER DONE #%d", i)
  1797. wg.Done()
  1798. }()
  1799. b := new(Batch)
  1800. for ; i < wn && atomic.LoadUint32(&done) == 0; i++ {
  1801. b.Reset()
  1802. for _, k1 := range keys {
  1803. k2 := randomData(2, i)
  1804. b.Put(k2, randomData(42, i))
  1805. b.Put(k1, k2)
  1806. }
  1807. if err := h.db.Write(b, h.wo); err != nil {
  1808. atomic.StoreUint32(&done, 1)
  1809. t.Fatalf("WRITER #%d db.Write: %v", i, err)
  1810. }
  1811. }
  1812. }()
  1813. go func() {
  1814. var i int
  1815. defer func() {
  1816. t.Logf("READER0 DONE #%d", i)
  1817. atomic.StoreUint32(&done, 1)
  1818. wg.Done()
  1819. }()
  1820. for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
  1821. snap := h.getSnapshot()
  1822. seq := snap.elem.seq
  1823. if seq == 0 {
  1824. snap.Release()
  1825. continue
  1826. }
  1827. iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
  1828. writei := int(seq/(n*2) - 1)
  1829. var k int
  1830. for ; iter.Next(); k++ {
  1831. k1 := iter.Key()
  1832. k2 := iter.Value()
  1833. k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:])
  1834. k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value()
  1835. if k1checksum0 != k1checksum1 {
  1836. t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0)
  1837. }
  1838. k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:])
  1839. k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value()
  1840. if k2checksum0 != k2checksum1 {
  1841. t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1)
  1842. }
  1843. kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:]))
  1844. if writei != kwritei {
  1845. t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei)
  1846. }
  1847. if _, err := snap.Get(k2, nil); err != nil {
  1848. t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2)
  1849. }
  1850. }
  1851. if err := iter.Error(); err != nil {
  1852. t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err)
  1853. }
  1854. iter.Release()
  1855. snap.Release()
  1856. if k > 0 && k != n {
  1857. t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n)
  1858. }
  1859. }
  1860. }()
  1861. go func() {
  1862. var i int
  1863. defer func() {
  1864. t.Logf("READER1 DONE #%d", i)
  1865. atomic.StoreUint32(&done, 1)
  1866. wg.Done()
  1867. }()
  1868. for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
  1869. iter := h.db.NewIterator(nil, nil)
  1870. seq := iter.(*dbIter).seq
  1871. if seq == 0 {
  1872. iter.Release()
  1873. continue
  1874. }
  1875. writei := int(seq/(n*2) - 1)
  1876. var k int
  1877. for ok := iter.Last(); ok; ok = iter.Prev() {
  1878. k++
  1879. }
  1880. if err := iter.Error(); err != nil {
  1881. t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err)
  1882. }
  1883. iter.Release()
  1884. if m := (writei+1)*n + n; k != m {
  1885. t.Fatalf("READER1 #%d W#%d short read, got=%d want=