PageRenderTime 63ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 1ms

/leveldb/db_test.go

https://bitbucket.org/cloudwallet/goleveldb
Go | 2926 lines | 2450 code | 387 blank | 89 comment | 564 complexity | dca3d6303df2098d4ec9263616753f2c MD5 | raw file
Possible License(s): BSD-3-Clause

Large files files are truncated, but you can click here to view the full file

  1. // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
  2. // All rights reserved.
  3. //
  4. // Use of this source code is governed by a BSD-style license that can be
  5. // found in the LICENSE file.
  6. package leveldb
  7. import (
  8. "bytes"
  9. "container/list"
  10. crand "crypto/rand"
  11. "encoding/binary"
  12. "fmt"
  13. "math/rand"
  14. "os"
  15. "path/filepath"
  16. "runtime"
  17. "strings"
  18. "sync"
  19. "sync/atomic"
  20. "testing"
  21. "time"
  22. "unsafe"
  23. "github.com/onsi/gomega"
  24. "bitbucket.org/cloudwallet/goleveldb/leveldb/comparer"
  25. "bitbucket.org/cloudwallet/goleveldb/leveldb/errors"
  26. "bitbucket.org/cloudwallet/goleveldb/leveldb/filter"
  27. "bitbucket.org/cloudwallet/goleveldb/leveldb/iterator"
  28. "bitbucket.org/cloudwallet/goleveldb/leveldb/opt"
  29. "bitbucket.org/cloudwallet/goleveldb/leveldb/storage"
  30. "bitbucket.org/cloudwallet/goleveldb/leveldb/testutil"
  31. "bitbucket.org/cloudwallet/goleveldb/leveldb/util"
  32. )
  33. func tkey(i int) []byte {
  34. return []byte(fmt.Sprintf("%016d", i))
  35. }
  36. func tval(seed, n int) []byte {
  37. r := rand.New(rand.NewSource(int64(seed)))
  38. return randomString(r, n)
  39. }
  40. func testingLogger(t *testing.T) func(log string) {
  41. return func(log string) {
  42. t.Log(log)
  43. }
  44. }
  45. func testingPreserveOnFailed(t *testing.T) func() (preserve bool, err error) {
  46. return func() (preserve bool, err error) {
  47. preserve = t.Failed()
  48. return
  49. }
  50. }
  51. type dbHarness struct {
  52. t *testing.T
  53. stor *testutil.Storage
  54. db *DB
  55. o *opt.Options
  56. ro *opt.ReadOptions
  57. wo *opt.WriteOptions
  58. }
  59. func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness {
  60. h := new(dbHarness)
  61. h.init(t, o)
  62. return h
  63. }
  64. func newDbHarness(t *testing.T) *dbHarness {
  65. return newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true})
  66. }
  67. func (h *dbHarness) init(t *testing.T, o *opt.Options) {
  68. gomega.RegisterTestingT(t)
  69. h.t = t
  70. h.stor = testutil.NewStorage()
  71. h.stor.OnLog(testingLogger(t))
  72. h.stor.OnClose(testingPreserveOnFailed(t))
  73. h.o = o
  74. h.ro = nil
  75. h.wo = nil
  76. if err := h.openDB0(); err != nil {
  77. // So that it will come after fatal message.
  78. defer h.stor.Close()
  79. h.t.Fatal("Open (init): got error: ", err)
  80. }
  81. }
  82. func (h *dbHarness) openDB0() (err error) {
  83. h.t.Log("opening DB")
  84. h.db, err = Open(h.stor, h.o)
  85. return
  86. }
  87. func (h *dbHarness) openDB() {
  88. if err := h.openDB0(); err != nil {
  89. h.t.Fatal("Open: got error: ", err)
  90. }
  91. }
  92. func (h *dbHarness) closeDB0() error {
  93. h.t.Log("closing DB")
  94. return h.db.Close()
  95. }
  96. func (h *dbHarness) closeDB() {
  97. if h.db != nil {
  98. if err := h.closeDB0(); err != nil {
  99. h.t.Error("Close: got error: ", err)
  100. }
  101. }
  102. h.stor.CloseCheck()
  103. runtime.GC()
  104. }
  105. func (h *dbHarness) reopenDB() {
  106. if h.db != nil {
  107. h.closeDB()
  108. }
  109. h.openDB()
  110. }
  111. func (h *dbHarness) close() {
  112. if h.db != nil {
  113. h.closeDB0()
  114. h.db = nil
  115. }
  116. h.stor.Close()
  117. h.stor = nil
  118. runtime.GC()
  119. }
  120. func (h *dbHarness) openAssert(want bool) {
  121. db, err := Open(h.stor, h.o)
  122. if err != nil {
  123. if want {
  124. h.t.Error("Open: assert: got error: ", err)
  125. } else {
  126. h.t.Log("Open: assert: got error (expected): ", err)
  127. }
  128. } else {
  129. if !want {
  130. h.t.Error("Open: assert: expect error")
  131. }
  132. db.Close()
  133. }
  134. }
  135. func (h *dbHarness) write(batch *Batch) {
  136. if err := h.db.Write(batch, h.wo); err != nil {
  137. h.t.Error("Write: got error: ", err)
  138. }
  139. }
  140. func (h *dbHarness) put(key, value string) {
  141. if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil {
  142. h.t.Error("Put: got error: ", err)
  143. }
  144. }
  145. func (h *dbHarness) putMulti(n int, low, hi string) {
  146. for i := 0; i < n; i++ {
  147. h.put(low, "begin")
  148. h.put(hi, "end")
  149. h.compactMem()
  150. }
  151. }
  152. func (h *dbHarness) maxNextLevelOverlappingBytes(want int64) {
  153. t := h.t
  154. db := h.db
  155. var (
  156. maxOverlaps int64
  157. maxLevel int
  158. )
  159. v := db.s.version()
  160. if len(v.levels) > 2 {
  161. for i, tt := range v.levels[1 : len(v.levels)-1] {
  162. level := i + 1
  163. next := v.levels[level+1]
  164. for _, t := range tt {
  165. r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false)
  166. sum := r.size()
  167. if sum > maxOverlaps {
  168. maxOverlaps = sum
  169. maxLevel = level
  170. }
  171. }
  172. }
  173. }
  174. v.release()
  175. if maxOverlaps > want {
  176. t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel)
  177. } else {
  178. t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want)
  179. }
  180. }
  181. func (h *dbHarness) delete(key string) {
  182. t := h.t
  183. db := h.db
  184. err := db.Delete([]byte(key), h.wo)
  185. if err != nil {
  186. t.Error("Delete: got error: ", err)
  187. }
  188. }
  189. func (h *dbHarness) assertNumKeys(want int) {
  190. iter := h.db.NewIterator(nil, h.ro)
  191. defer iter.Release()
  192. got := 0
  193. for iter.Next() {
  194. got++
  195. }
  196. if err := iter.Error(); err != nil {
  197. h.t.Error("assertNumKeys: ", err)
  198. }
  199. if want != got {
  200. h.t.Errorf("assertNumKeys: want=%d got=%d", want, got)
  201. }
  202. }
  203. func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) {
  204. t := h.t
  205. v, err := db.Get([]byte(key), h.ro)
  206. switch err {
  207. case ErrNotFound:
  208. if expectFound {
  209. t.Errorf("Get: key '%s' not found, want found", key)
  210. }
  211. case nil:
  212. found = true
  213. if !expectFound {
  214. t.Errorf("Get: key '%s' found, want not found", key)
  215. }
  216. default:
  217. t.Error("Get: got error: ", err)
  218. }
  219. return
  220. }
  221. func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) {
  222. return h.getr(h.db, key, expectFound)
  223. }
  224. func (h *dbHarness) getValr(db Reader, key, value string) {
  225. t := h.t
  226. found, r := h.getr(db, key, true)
  227. if !found {
  228. return
  229. }
  230. rval := string(r)
  231. if rval != value {
  232. t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value)
  233. }
  234. }
  235. func (h *dbHarness) getVal(key, value string) {
  236. h.getValr(h.db, key, value)
  237. }
  238. func (h *dbHarness) allEntriesFor(key, want string) {
  239. t := h.t
  240. db := h.db
  241. s := db.s
  242. ikey := makeInternalKey(nil, []byte(key), keyMaxSeq, keyTypeVal)
  243. iter := db.newRawIterator(nil, nil, nil, nil)
  244. if !iter.Seek(ikey) && iter.Error() != nil {
  245. t.Error("AllEntries: error during seek, err: ", iter.Error())
  246. return
  247. }
  248. res := "[ "
  249. first := true
  250. for iter.Valid() {
  251. if ukey, _, kt, kerr := parseInternalKey(iter.Key()); kerr == nil {
  252. if s.icmp.uCompare(ikey.ukey(), ukey) != 0 {
  253. break
  254. }
  255. if !first {
  256. res += ", "
  257. }
  258. first = false
  259. switch kt {
  260. case keyTypeVal:
  261. res += string(iter.Value())
  262. case keyTypeDel:
  263. res += "DEL"
  264. }
  265. } else {
  266. if !first {
  267. res += ", "
  268. }
  269. first = false
  270. res += "CORRUPTED"
  271. }
  272. iter.Next()
  273. }
  274. if !first {
  275. res += " "
  276. }
  277. res += "]"
  278. if res != want {
  279. t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want)
  280. }
  281. }
  282. // Return a string that contains all key,value pairs in order,
  283. // formatted like "(k1->v1)(k2->v2)".
  284. func (h *dbHarness) getKeyVal(want string) {
  285. t := h.t
  286. db := h.db
  287. s, err := db.GetSnapshot()
  288. if err != nil {
  289. t.Fatal("GetSnapshot: got error: ", err)
  290. }
  291. res := ""
  292. iter := s.NewIterator(nil, nil)
  293. for iter.Next() {
  294. res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value()))
  295. }
  296. iter.Release()
  297. if res != want {
  298. t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want)
  299. }
  300. s.Release()
  301. }
  302. func (h *dbHarness) waitCompaction() {
  303. t := h.t
  304. db := h.db
  305. if err := db.compTriggerWait(db.tcompCmdC); err != nil {
  306. t.Error("compaction error: ", err)
  307. }
  308. }
  309. func (h *dbHarness) waitMemCompaction() {
  310. t := h.t
  311. db := h.db
  312. if err := db.compTriggerWait(db.mcompCmdC); err != nil {
  313. t.Error("compaction error: ", err)
  314. }
  315. }
  316. func (h *dbHarness) compactMem() {
  317. t := h.t
  318. db := h.db
  319. t.Log("starting memdb compaction")
  320. db.writeLockC <- struct{}{}
  321. defer func() {
  322. <-db.writeLockC
  323. }()
  324. if _, err := db.rotateMem(0, true); err != nil {
  325. t.Error("compaction error: ", err)
  326. }
  327. if h.totalTables() == 0 {
  328. t.Error("zero tables after mem compaction")
  329. }
  330. t.Log("memdb compaction done")
  331. }
  332. func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) {
  333. t := h.t
  334. db := h.db
  335. var _min, _max []byte
  336. if min != "" {
  337. _min = []byte(min)
  338. }
  339. if max != "" {
  340. _max = []byte(max)
  341. }
  342. t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max)
  343. if err := db.compTriggerRange(db.tcompCmdC, level, _min, _max); err != nil {
  344. if wanterr {
  345. t.Log("CompactRangeAt: got error (expected): ", err)
  346. } else {
  347. t.Error("CompactRangeAt: got error: ", err)
  348. }
  349. } else if wanterr {
  350. t.Error("CompactRangeAt: expect error")
  351. }
  352. t.Log("table range compaction done")
  353. }
  354. func (h *dbHarness) compactRangeAt(level int, min, max string) {
  355. h.compactRangeAtErr(level, min, max, false)
  356. }
  357. func (h *dbHarness) compactRange(min, max string) {
  358. t := h.t
  359. db := h.db
  360. t.Logf("starting DB range compaction: min=%q, max=%q", min, max)
  361. var r util.Range
  362. if min != "" {
  363. r.Start = []byte(min)
  364. }
  365. if max != "" {
  366. r.Limit = []byte(max)
  367. }
  368. if err := db.CompactRange(r); err != nil {
  369. t.Error("CompactRange: got error: ", err)
  370. }
  371. t.Log("DB range compaction done")
  372. }
  373. func (h *dbHarness) sizeOf(start, limit string) int64 {
  374. sz, err := h.db.SizeOf([]util.Range{
  375. {[]byte(start), []byte(limit)},
  376. })
  377. if err != nil {
  378. h.t.Error("SizeOf: got error: ", err)
  379. }
  380. return sz.Sum()
  381. }
  382. func (h *dbHarness) sizeAssert(start, limit string, low, hi int64) {
  383. sz := h.sizeOf(start, limit)
  384. if sz < low || sz > hi {
  385. h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
  386. shorten(start), shorten(limit), low, hi, sz)
  387. }
  388. }
  389. func (h *dbHarness) getSnapshot() (s *Snapshot) {
  390. s, err := h.db.GetSnapshot()
  391. if err != nil {
  392. h.t.Fatal("GetSnapshot: got error: ", err)
  393. }
  394. return
  395. }
  396. func (h *dbHarness) getTablesPerLevel() string {
  397. res := ""
  398. nz := 0
  399. v := h.db.s.version()
  400. for level, tables := range v.levels {
  401. if level > 0 {
  402. res += ","
  403. }
  404. res += fmt.Sprint(len(tables))
  405. if len(tables) > 0 {
  406. nz = len(res)
  407. }
  408. }
  409. v.release()
  410. return res[:nz]
  411. }
  412. func (h *dbHarness) tablesPerLevel(want string) {
  413. res := h.getTablesPerLevel()
  414. if res != want {
  415. h.t.Errorf("invalid tables len, want=%s, got=%s", want, res)
  416. }
  417. }
  418. func (h *dbHarness) totalTables() (n int) {
  419. v := h.db.s.version()
  420. for _, tables := range v.levels {
  421. n += len(tables)
  422. }
  423. v.release()
  424. return
  425. }
  426. type keyValue interface {
  427. Key() []byte
  428. Value() []byte
  429. }
  430. func testKeyVal(t *testing.T, kv keyValue, want string) {
  431. res := string(kv.Key()) + "->" + string(kv.Value())
  432. if res != want {
  433. t.Errorf("invalid key/value, want=%q, got=%q", want, res)
  434. }
  435. }
  436. func numKey(num int) string {
  437. return fmt.Sprintf("key%06d", num)
  438. }
  439. var testingBloomFilter = filter.NewBloomFilter(10)
  440. func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) {
  441. for i := 0; i < 4; i++ {
  442. func() {
  443. switch i {
  444. case 0:
  445. case 1:
  446. if o == nil {
  447. o = &opt.Options{
  448. DisableLargeBatchTransaction: true,
  449. Filter: testingBloomFilter,
  450. }
  451. } else {
  452. old := o
  453. o = &opt.Options{}
  454. *o = *old
  455. o.Filter = testingBloomFilter
  456. }
  457. case 2:
  458. if o == nil {
  459. o = &opt.Options{
  460. DisableLargeBatchTransaction: true,
  461. Compression: opt.NoCompression,
  462. }
  463. } else {
  464. old := o
  465. o = &opt.Options{}
  466. *o = *old
  467. o.Compression = opt.NoCompression
  468. }
  469. }
  470. h := newDbHarnessWopt(t, o)
  471. defer h.close()
  472. switch i {
  473. case 3:
  474. h.reopenDB()
  475. }
  476. f(h)
  477. }()
  478. }
  479. }
  480. func trun(t *testing.T, f func(h *dbHarness)) {
  481. truno(t, nil, f)
  482. }
  483. func testAligned(t *testing.T, name string, offset uintptr) {
  484. if offset%8 != 0 {
  485. t.Errorf("field %s offset is not 64-bit aligned", name)
  486. }
  487. }
  488. func Test_FieldsAligned(t *testing.T) {
  489. p1 := new(DB)
  490. testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq))
  491. p2 := new(session)
  492. testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum))
  493. testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum))
  494. testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum))
  495. testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum))
  496. }
  497. func TestDB_Locking(t *testing.T) {
  498. h := newDbHarness(t)
  499. defer h.stor.Close()
  500. h.openAssert(false)
  501. h.closeDB()
  502. h.openAssert(true)
  503. }
  504. func TestDB_Empty(t *testing.T) {
  505. trun(t, func(h *dbHarness) {
  506. h.get("foo", false)
  507. h.reopenDB()
  508. h.get("foo", false)
  509. })
  510. }
  511. func TestDB_ReadWrite(t *testing.T) {
  512. trun(t, func(h *dbHarness) {
  513. h.put("foo", "v1")
  514. h.getVal("foo", "v1")
  515. h.put("bar", "v2")
  516. h.put("foo", "v3")
  517. h.getVal("foo", "v3")
  518. h.getVal("bar", "v2")
  519. h.reopenDB()
  520. h.getVal("foo", "v3")
  521. h.getVal("bar", "v2")
  522. })
  523. }
  524. func TestDB_PutDeleteGet(t *testing.T) {
  525. trun(t, func(h *dbHarness) {
  526. h.put("foo", "v1")
  527. h.getVal("foo", "v1")
  528. h.put("foo", "v2")
  529. h.getVal("foo", "v2")
  530. h.delete("foo")
  531. h.get("foo", false)
  532. h.reopenDB()
  533. h.get("foo", false)
  534. })
  535. }
  536. func TestDB_EmptyBatch(t *testing.T) {
  537. h := newDbHarness(t)
  538. defer h.close()
  539. h.get("foo", false)
  540. err := h.db.Write(new(Batch), h.wo)
  541. if err != nil {
  542. t.Error("writing empty batch yield error: ", err)
  543. }
  544. h.get("foo", false)
  545. }
  546. func TestDB_GetFromFrozen(t *testing.T) {
  547. h := newDbHarnessWopt(t, &opt.Options{
  548. DisableLargeBatchTransaction: true,
  549. WriteBuffer: 100100,
  550. })
  551. defer h.close()
  552. h.put("foo", "v1")
  553. h.getVal("foo", "v1")
  554. h.stor.Stall(testutil.ModeSync, storage.TypeTable) // Block sync calls
  555. h.put("k1", strings.Repeat("x", 100000)) // Fill memtable
  556. h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction
  557. for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ {
  558. time.Sleep(10 * time.Microsecond)
  559. }
  560. if h.db.getFrozenMem() == nil {
  561. h.stor.Release(testutil.ModeSync, storage.TypeTable)
  562. t.Fatal("No frozen mem")
  563. }
  564. h.getVal("foo", "v1")
  565. h.stor.Release(testutil.ModeSync, storage.TypeTable) // Release sync calls
  566. h.reopenDB()
  567. h.getVal("foo", "v1")
  568. h.get("k1", true)
  569. h.get("k2", true)
  570. }
  571. func TestDB_GetFromTable(t *testing.T) {
  572. trun(t, func(h *dbHarness) {
  573. h.put("foo", "v1")
  574. h.compactMem()
  575. h.getVal("foo", "v1")
  576. })
  577. }
  578. func TestDB_GetSnapshot(t *testing.T) {
  579. trun(t, func(h *dbHarness) {
  580. bar := strings.Repeat("b", 200)
  581. h.put("foo", "v1")
  582. h.put(bar, "v1")
  583. snap, err := h.db.GetSnapshot()
  584. if err != nil {
  585. t.Fatal("GetSnapshot: got error: ", err)
  586. }
  587. h.put("foo", "v2")
  588. h.put(bar, "v2")
  589. h.getVal("foo", "v2")
  590. h.getVal(bar, "v2")
  591. h.getValr(snap, "foo", "v1")
  592. h.getValr(snap, bar, "v1")
  593. h.compactMem()
  594. h.getVal("foo", "v2")
  595. h.getVal(bar, "v2")
  596. h.getValr(snap, "foo", "v1")
  597. h.getValr(snap, bar, "v1")
  598. snap.Release()
  599. h.reopenDB()
  600. h.getVal("foo", "v2")
  601. h.getVal(bar, "v2")
  602. })
  603. }
  604. func TestDB_GetLevel0Ordering(t *testing.T) {
  605. trun(t, func(h *dbHarness) {
  606. h.db.memdbMaxLevel = 2
  607. for i := 0; i < 4; i++ {
  608. h.put("bar", fmt.Sprintf("b%d", i))
  609. h.put("foo", fmt.Sprintf("v%d", i))
  610. h.compactMem()
  611. }
  612. h.getVal("foo", "v3")
  613. h.getVal("bar", "b3")
  614. v := h.db.s.version()
  615. t0len := v.tLen(0)
  616. v.release()
  617. if t0len < 2 {
  618. t.Errorf("level-0 tables is less than 2, got %d", t0len)
  619. }
  620. h.reopenDB()
  621. h.getVal("foo", "v3")
  622. h.getVal("bar", "b3")
  623. })
  624. }
  625. func TestDB_GetOrderedByLevels(t *testing.T) {
  626. trun(t, func(h *dbHarness) {
  627. h.put("foo", "v1")
  628. h.compactMem()
  629. h.compactRange("a", "z")
  630. h.getVal("foo", "v1")
  631. h.put("foo", "v2")
  632. h.compactMem()
  633. h.getVal("foo", "v2")
  634. })
  635. }
  636. func TestDB_GetPicksCorrectFile(t *testing.T) {
  637. trun(t, func(h *dbHarness) {
  638. // Arrange to have multiple files in a non-level-0 level.
  639. h.put("a", "va")
  640. h.compactMem()
  641. h.compactRange("a", "b")
  642. h.put("x", "vx")
  643. h.compactMem()
  644. h.compactRange("x", "y")
  645. h.put("f", "vf")
  646. h.compactMem()
  647. h.compactRange("f", "g")
  648. h.getVal("a", "va")
  649. h.getVal("f", "vf")
  650. h.getVal("x", "vx")
  651. h.compactRange("", "")
  652. h.getVal("a", "va")
  653. h.getVal("f", "vf")
  654. h.getVal("x", "vx")
  655. })
  656. }
  657. func TestDB_GetEncountersEmptyLevel(t *testing.T) {
  658. trun(t, func(h *dbHarness) {
  659. h.db.memdbMaxLevel = 2
  660. // Arrange for the following to happen:
  661. // * sstable A in level 0
  662. // * nothing in level 1
  663. // * sstable B in level 2
  664. // Then do enough Get() calls to arrange for an automatic compaction
  665. // of sstable A. A bug would cause the compaction to be marked as
  666. // occurring at level 1 (instead of the correct level 0).
  667. // Step 1: First place sstables in levels 0 and 2
  668. for i := 0; ; i++ {
  669. if i >= 100 {
  670. t.Fatal("could not fill levels-0 and level-2")
  671. }
  672. v := h.db.s.version()
  673. if v.tLen(0) > 0 && v.tLen(2) > 0 {
  674. v.release()
  675. break
  676. }
  677. v.release()
  678. h.put("a", "begin")
  679. h.put("z", "end")
  680. h.compactMem()
  681. h.getVal("a", "begin")
  682. h.getVal("z", "end")
  683. }
  684. // Step 2: clear level 1 if necessary.
  685. h.compactRangeAt(1, "", "")
  686. h.tablesPerLevel("1,0,1")
  687. h.getVal("a", "begin")
  688. h.getVal("z", "end")
  689. // Step 3: read a bunch of times
  690. for i := 0; i < 200; i++ {
  691. h.get("missing", false)
  692. }
  693. // Step 4: Wait for compaction to finish
  694. h.waitCompaction()
  695. v := h.db.s.version()
  696. if v.tLen(0) > 0 {
  697. t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
  698. }
  699. v.release()
  700. h.getVal("a", "begin")
  701. h.getVal("z", "end")
  702. })
  703. }
  704. func TestDB_IterMultiWithDelete(t *testing.T) {
  705. trun(t, func(h *dbHarness) {
  706. h.put("a", "va")
  707. h.put("b", "vb")
  708. h.put("c", "vc")
  709. h.delete("b")
  710. h.get("b", false)
  711. iter := h.db.NewIterator(nil, nil)
  712. iter.Seek([]byte("c"))
  713. testKeyVal(t, iter, "c->vc")
  714. iter.Prev()
  715. testKeyVal(t, iter, "a->va")
  716. iter.Release()
  717. h.compactMem()
  718. iter = h.db.NewIterator(nil, nil)
  719. iter.Seek([]byte("c"))
  720. testKeyVal(t, iter, "c->vc")
  721. iter.Prev()
  722. testKeyVal(t, iter, "a->va")
  723. iter.Release()
  724. })
  725. }
  726. func TestDB_IteratorPinsRef(t *testing.T) {
  727. h := newDbHarness(t)
  728. defer h.close()
  729. h.put("foo", "hello")
  730. // Get iterator that will yield the current contents of the DB.
  731. iter := h.db.NewIterator(nil, nil)
  732. // Write to force compactions
  733. h.put("foo", "newvalue1")
  734. for i := 0; i < 100; i++ {
  735. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
  736. }
  737. h.put("foo", "newvalue2")
  738. iter.First()
  739. testKeyVal(t, iter, "foo->hello")
  740. if iter.Next() {
  741. t.Errorf("expect eof")
  742. }
  743. iter.Release()
  744. }
  745. func TestDB_Recover(t *testing.T) {
  746. trun(t, func(h *dbHarness) {
  747. h.put("foo", "v1")
  748. h.put("baz", "v5")
  749. h.reopenDB()
  750. h.getVal("foo", "v1")
  751. h.getVal("foo", "v1")
  752. h.getVal("baz", "v5")
  753. h.put("bar", "v2")
  754. h.put("foo", "v3")
  755. h.reopenDB()
  756. h.getVal("foo", "v3")
  757. h.put("foo", "v4")
  758. h.getVal("foo", "v4")
  759. h.getVal("bar", "v2")
  760. h.getVal("baz", "v5")
  761. })
  762. }
  763. func TestDB_RecoverWithEmptyJournal(t *testing.T) {
  764. trun(t, func(h *dbHarness) {
  765. h.put("foo", "v1")
  766. h.put("foo", "v2")
  767. h.reopenDB()
  768. h.reopenDB()
  769. h.put("foo", "v3")
  770. h.reopenDB()
  771. h.getVal("foo", "v3")
  772. })
  773. }
  774. func TestDB_RecoverDuringMemtableCompaction(t *testing.T) {
  775. truno(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 1000000}, func(h *dbHarness) {
  776. h.stor.Stall(testutil.ModeSync, storage.TypeTable)
  777. h.put("big1", strings.Repeat("x", 10000000))
  778. h.put("big2", strings.Repeat("y", 1000))
  779. h.put("bar", "v2")
  780. h.stor.Release(testutil.ModeSync, storage.TypeTable)
  781. h.reopenDB()
  782. h.getVal("bar", "v2")
  783. h.getVal("big1", strings.Repeat("x", 10000000))
  784. h.getVal("big2", strings.Repeat("y", 1000))
  785. })
  786. }
  787. func TestDB_MinorCompactionsHappen(t *testing.T) {
  788. h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 10000})
  789. defer h.close()
  790. n := 500
  791. key := func(i int) string {
  792. return fmt.Sprintf("key%06d", i)
  793. }
  794. for i := 0; i < n; i++ {
  795. h.put(key(i), key(i)+strings.Repeat("v", 1000))
  796. }
  797. for i := 0; i < n; i++ {
  798. h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
  799. }
  800. h.reopenDB()
  801. for i := 0; i < n; i++ {
  802. h.getVal(key(i), key(i)+strings.Repeat("v", 1000))
  803. }
  804. }
  805. func TestDB_RecoverWithLargeJournal(t *testing.T) {
  806. h := newDbHarness(t)
  807. defer h.close()
  808. h.put("big1", strings.Repeat("1", 200000))
  809. h.put("big2", strings.Repeat("2", 200000))
  810. h.put("small3", strings.Repeat("3", 10))
  811. h.put("small4", strings.Repeat("4", 10))
  812. h.tablesPerLevel("")
  813. // Make sure that if we re-open with a small write buffer size that
  814. // we flush table files in the middle of a large journal file.
  815. h.o.WriteBuffer = 100000
  816. h.reopenDB()
  817. h.getVal("big1", strings.Repeat("1", 200000))
  818. h.getVal("big2", strings.Repeat("2", 200000))
  819. h.getVal("small3", strings.Repeat("3", 10))
  820. h.getVal("small4", strings.Repeat("4", 10))
  821. v := h.db.s.version()
  822. if v.tLen(0) <= 1 {
  823. t.Errorf("tables-0 less than one")
  824. }
  825. v.release()
  826. }
  827. func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) {
  828. h := newDbHarnessWopt(t, &opt.Options{
  829. DisableLargeBatchTransaction: true,
  830. WriteBuffer: 10000000,
  831. Compression: opt.NoCompression,
  832. })
  833. defer h.close()
  834. v := h.db.s.version()
  835. if v.tLen(0) > 0 {
  836. t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
  837. }
  838. v.release()
  839. n := 80
  840. // Write 8MB (80 values, each 100K)
  841. for i := 0; i < n; i++ {
  842. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
  843. }
  844. // Reopening moves updates to level-0
  845. h.reopenDB()
  846. h.compactRangeAt(0, "", "")
  847. v = h.db.s.version()
  848. if v.tLen(0) > 0 {
  849. t.Errorf("level-0 tables more than 0, got %d", v.tLen(0))
  850. }
  851. if v.tLen(1) <= 1 {
  852. t.Errorf("level-1 tables less than 1, got %d", v.tLen(1))
  853. }
  854. v.release()
  855. for i := 0; i < n; i++ {
  856. h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10))
  857. }
  858. }
  859. func TestDB_RepeatedWritesToSameKey(t *testing.T) {
  860. h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 100000})
  861. defer h.close()
  862. maxTables := h.o.GetWriteL0PauseTrigger() + 7
  863. value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
  864. for i := 0; i < 5*maxTables; i++ {
  865. h.put("key", value)
  866. n := h.totalTables()
  867. if n > maxTables {
  868. t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
  869. }
  870. }
  871. }
  872. func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
  873. h := newDbHarnessWopt(t, &opt.Options{
  874. DisableLargeBatchTransaction: true,
  875. WriteBuffer: 100000,
  876. })
  877. defer h.close()
  878. h.reopenDB()
  879. maxTables := h.o.GetWriteL0PauseTrigger() + 7
  880. value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
  881. for i := 0; i < 5*maxTables; i++ {
  882. h.put("key", value)
  883. n := h.totalTables()
  884. if n > maxTables {
  885. t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i)
  886. }
  887. }
  888. }
  889. func TestDB_SparseMerge(t *testing.T) {
  890. h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, Compression: opt.NoCompression})
  891. defer h.close()
  892. h.putMulti(7, "A", "Z")
  893. // Suppose there is:
  894. // small amount of data with prefix A
  895. // large amount of data with prefix B
  896. // small amount of data with prefix C
  897. // and that recent updates have made small changes to all three prefixes.
  898. // Check that we do not do a compaction that merges all of B in one shot.
  899. h.put("A", "va")
  900. value := strings.Repeat("x", 1000)
  901. for i := 0; i < 100000; i++ {
  902. h.put(fmt.Sprintf("B%010d", i), value)
  903. }
  904. h.put("C", "vc")
  905. h.compactMem()
  906. h.compactRangeAt(0, "", "")
  907. h.waitCompaction()
  908. // Make sparse update
  909. h.put("A", "va2")
  910. h.put("B100", "bvalue2")
  911. h.put("C", "vc2")
  912. h.compactMem()
  913. h.waitCompaction()
  914. h.maxNextLevelOverlappingBytes(20 * 1048576)
  915. h.compactRangeAt(0, "", "")
  916. h.waitCompaction()
  917. h.maxNextLevelOverlappingBytes(20 * 1048576)
  918. h.compactRangeAt(1, "", "")
  919. h.waitCompaction()
  920. h.maxNextLevelOverlappingBytes(20 * 1048576)
  921. }
  922. func TestDB_SizeOf(t *testing.T) {
  923. h := newDbHarnessWopt(t, &opt.Options{
  924. DisableLargeBatchTransaction: true,
  925. Compression: opt.NoCompression,
  926. WriteBuffer: 10000000,
  927. })
  928. defer h.close()
  929. h.sizeAssert("", "xyz", 0, 0)
  930. h.reopenDB()
  931. h.sizeAssert("", "xyz", 0, 0)
  932. // Write 8MB (80 values, each 100K)
  933. n := 80
  934. s1 := 100000
  935. s2 := 105000
  936. for i := 0; i < n; i++ {
  937. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10))
  938. }
  939. // 0 because SizeOf() does not account for memtable space
  940. h.sizeAssert("", numKey(50), 0, 0)
  941. for r := 0; r < 3; r++ {
  942. h.reopenDB()
  943. for cs := 0; cs < n; cs += 10 {
  944. for i := 0; i < n; i += 10 {
  945. h.sizeAssert("", numKey(i), int64(s1*i), int64(s2*i))
  946. h.sizeAssert("", numKey(i)+".suffix", int64(s1*(i+1)), int64(s2*(i+1)))
  947. h.sizeAssert(numKey(i), numKey(i+10), int64(s1*10), int64(s2*10))
  948. }
  949. h.sizeAssert("", numKey(50), int64(s1*50), int64(s2*50))
  950. h.sizeAssert("", numKey(50)+".suffix", int64(s1*50), int64(s2*50))
  951. h.compactRangeAt(0, numKey(cs), numKey(cs+9))
  952. }
  953. v := h.db.s.version()
  954. if v.tLen(0) != 0 {
  955. t.Errorf("level-0 tables was not zero, got %d", v.tLen(0))
  956. }
  957. if v.tLen(1) == 0 {
  958. t.Error("level-1 tables was zero")
  959. }
  960. v.release()
  961. }
  962. }
  963. func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) {
  964. h := newDbHarnessWopt(t, &opt.Options{
  965. DisableLargeBatchTransaction: true,
  966. Compression: opt.NoCompression,
  967. })
  968. defer h.close()
  969. sizes := []int64{
  970. 10000,
  971. 10000,
  972. 100000,
  973. 10000,
  974. 100000,
  975. 10000,
  976. 300000,
  977. 10000,
  978. }
  979. for i, n := range sizes {
  980. h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10))
  981. }
  982. for r := 0; r < 3; r++ {
  983. h.reopenDB()
  984. var x int64
  985. for i, n := range sizes {
  986. y := x
  987. if i > 0 {
  988. y += 1000
  989. }
  990. h.sizeAssert("", numKey(i), x, y)
  991. x += n
  992. }
  993. h.sizeAssert(numKey(3), numKey(5), 110000, 111000)
  994. h.compactRangeAt(0, "", "")
  995. }
  996. }
  997. func TestDB_Snapshot(t *testing.T) {
  998. trun(t, func(h *dbHarness) {
  999. h.put("foo", "v1")
  1000. s1 := h.getSnapshot()
  1001. h.put("foo", "v2")
  1002. s2 := h.getSnapshot()
  1003. h.put("foo", "v3")
  1004. s3 := h.getSnapshot()
  1005. h.put("foo", "v4")
  1006. h.getValr(s1, "foo", "v1")
  1007. h.getValr(s2, "foo", "v2")
  1008. h.getValr(s3, "foo", "v3")
  1009. h.getVal("foo", "v4")
  1010. s3.Release()
  1011. h.getValr(s1, "foo", "v1")
  1012. h.getValr(s2, "foo", "v2")
  1013. h.getVal("foo", "v4")
  1014. s1.Release()
  1015. h.getValr(s2, "foo", "v2")
  1016. h.getVal("foo", "v4")
  1017. s2.Release()
  1018. h.getVal("foo", "v4")
  1019. })
  1020. }
  1021. func TestDB_SnapshotList(t *testing.T) {
  1022. db := &DB{snapsList: list.New()}
  1023. e0a := db.acquireSnapshot()
  1024. e0b := db.acquireSnapshot()
  1025. db.seq = 1
  1026. e1 := db.acquireSnapshot()
  1027. db.seq = 2
  1028. e2 := db.acquireSnapshot()
  1029. if db.minSeq() != 0 {
  1030. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1031. }
  1032. db.releaseSnapshot(e0a)
  1033. if db.minSeq() != 0 {
  1034. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1035. }
  1036. db.releaseSnapshot(e2)
  1037. if db.minSeq() != 0 {
  1038. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1039. }
  1040. db.releaseSnapshot(e0b)
  1041. if db.minSeq() != 1 {
  1042. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1043. }
  1044. e2 = db.acquireSnapshot()
  1045. if db.minSeq() != 1 {
  1046. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1047. }
  1048. db.releaseSnapshot(e1)
  1049. if db.minSeq() != 2 {
  1050. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1051. }
  1052. db.releaseSnapshot(e2)
  1053. if db.minSeq() != 2 {
  1054. t.Fatalf("invalid sequence number, got=%d", db.minSeq())
  1055. }
  1056. }
  1057. func TestDB_HiddenValuesAreRemoved(t *testing.T) {
  1058. trun(t, func(h *dbHarness) {
  1059. s := h.db.s
  1060. m := 2
  1061. h.db.memdbMaxLevel = m
  1062. h.put("foo", "v1")
  1063. h.compactMem()
  1064. v := s.version()
  1065. num := v.tLen(m)
  1066. v.release()
  1067. if num != 1 {
  1068. t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
  1069. }
  1070. // Place a table at level last-1 to prevent merging with preceding mutation
  1071. h.put("a", "begin")
  1072. h.put("z", "end")
  1073. h.compactMem()
  1074. v = s.version()
  1075. if v.tLen(m) != 1 {
  1076. t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
  1077. }
  1078. if v.tLen(m-1) != 1 {
  1079. t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
  1080. }
  1081. v.release()
  1082. h.delete("foo")
  1083. h.put("foo", "v2")
  1084. h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
  1085. h.compactMem()
  1086. h.allEntriesFor("foo", "[ v2, DEL, v1 ]")
  1087. h.compactRangeAt(m-2, "", "z")
  1088. // DEL eliminated, but v1 remains because we aren't compacting that level
  1089. // (DEL can be eliminated because v2 hides v1).
  1090. h.allEntriesFor("foo", "[ v2, v1 ]")
  1091. h.compactRangeAt(m-1, "", "")
  1092. // Merging last-1 w/ last, so we are the base level for "foo", so
  1093. // DEL is removed. (as is v1).
  1094. h.allEntriesFor("foo", "[ v2 ]")
  1095. })
  1096. }
  1097. func TestDB_DeletionMarkers2(t *testing.T) {
  1098. h := newDbHarness(t)
  1099. defer h.close()
  1100. s := h.db.s
  1101. m := 2
  1102. h.db.memdbMaxLevel = m
  1103. h.put("foo", "v1")
  1104. h.compactMem()
  1105. v := s.version()
  1106. num := v.tLen(m)
  1107. v.release()
  1108. if num != 1 {
  1109. t.Errorf("invalid level-%d len, want=1 got=%d", m, num)
  1110. }
  1111. // Place a table at level last-1 to prevent merging with preceding mutation
  1112. h.put("a", "begin")
  1113. h.put("z", "end")
  1114. h.compactMem()
  1115. v = s.version()
  1116. if v.tLen(m) != 1 {
  1117. t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m))
  1118. }
  1119. if v.tLen(m-1) != 1 {
  1120. t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1))
  1121. }
  1122. v.release()
  1123. h.delete("foo")
  1124. h.allEntriesFor("foo", "[ DEL, v1 ]")
  1125. h.compactMem() // Moves to level last-2
  1126. h.allEntriesFor("foo", "[ DEL, v1 ]")
  1127. h.compactRangeAt(m-2, "", "")
  1128. // DEL kept: "last" file overlaps
  1129. h.allEntriesFor("foo", "[ DEL, v1 ]")
  1130. h.compactRangeAt(m-1, "", "")
  1131. // Merging last-1 w/ last, so we are the base level for "foo", so
  1132. // DEL is removed. (as is v1).
  1133. h.allEntriesFor("foo", "[ ]")
  1134. }
  1135. func TestDB_CompactionTableOpenError(t *testing.T) {
  1136. h := newDbHarnessWopt(t, &opt.Options{
  1137. DisableLargeBatchTransaction: true,
  1138. OpenFilesCacheCapacity: -1,
  1139. })
  1140. defer h.close()
  1141. h.db.memdbMaxLevel = 2
  1142. im := 10
  1143. jm := 10
  1144. for r := 0; r < 2; r++ {
  1145. for i := 0; i < im; i++ {
  1146. for j := 0; j < jm; j++ {
  1147. h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
  1148. }
  1149. h.compactMem()
  1150. }
  1151. }
  1152. if n := h.totalTables(); n != im*2 {
  1153. t.Errorf("total tables is %d, want %d", n, im*2)
  1154. }
  1155. h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, errors.New("open error during table compaction"))
  1156. go h.db.CompactRange(util.Range{})
  1157. if err := h.db.compTriggerWait(h.db.tcompCmdC); err != nil {
  1158. t.Log("compaction error: ", err)
  1159. }
  1160. h.closeDB0()
  1161. h.openDB()
  1162. h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, nil)
  1163. for i := 0; i < im; i++ {
  1164. for j := 0; j < jm; j++ {
  1165. h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j))
  1166. }
  1167. }
  1168. }
  1169. func TestDB_OverlapInLevel0(t *testing.T) {
  1170. trun(t, func(h *dbHarness) {
  1171. h.db.memdbMaxLevel = 2
  1172. // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
  1173. h.put("100", "v100")
  1174. h.put("999", "v999")
  1175. h.compactMem()
  1176. h.delete("100")
  1177. h.delete("999")
  1178. h.compactMem()
  1179. h.tablesPerLevel("0,1,1")
  1180. // Make files spanning the following ranges in level-0:
  1181. // files[0] 200 .. 900
  1182. // files[1] 300 .. 500
  1183. // Note that files are sorted by min key.
  1184. h.put("300", "v300")
  1185. h.put("500", "v500")
  1186. h.compactMem()
  1187. h.put("200", "v200")
  1188. h.put("600", "v600")
  1189. h.put("900", "v900")
  1190. h.compactMem()
  1191. h.tablesPerLevel("2,1,1")
  1192. // Compact away the placeholder files we created initially
  1193. h.compactRangeAt(1, "", "")
  1194. h.compactRangeAt(2, "", "")
  1195. h.tablesPerLevel("2")
  1196. // Do a memtable compaction. Before bug-fix, the compaction would
  1197. // not detect the overlap with level-0 files and would incorrectly place
  1198. // the deletion in a deeper level.
  1199. h.delete("600")
  1200. h.compactMem()
  1201. h.tablesPerLevel("3")
  1202. h.get("600", false)
  1203. })
  1204. }
  1205. func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) {
  1206. h := newDbHarness(t)
  1207. defer h.close()
  1208. h.reopenDB()
  1209. h.put("b", "v")
  1210. h.reopenDB()
  1211. h.delete("b")
  1212. h.delete("a")
  1213. h.reopenDB()
  1214. h.delete("a")
  1215. h.reopenDB()
  1216. h.put("a", "v")
  1217. h.reopenDB()
  1218. h.reopenDB()
  1219. h.getKeyVal("(a->v)")
  1220. h.waitCompaction()
  1221. h.getKeyVal("(a->v)")
  1222. }
  1223. func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) {
  1224. h := newDbHarness(t)
  1225. defer h.close()
  1226. h.reopenDB()
  1227. h.put("", "")
  1228. h.reopenDB()
  1229. h.delete("e")
  1230. h.put("", "")
  1231. h.reopenDB()
  1232. h.put("c", "cv")
  1233. h.reopenDB()
  1234. h.put("", "")
  1235. h.reopenDB()
  1236. h.put("", "")
  1237. h.waitCompaction()
  1238. h.reopenDB()
  1239. h.put("d", "dv")
  1240. h.reopenDB()
  1241. h.put("", "")
  1242. h.reopenDB()
  1243. h.delete("d")
  1244. h.delete("b")
  1245. h.reopenDB()
  1246. h.getKeyVal("(->)(c->cv)")
  1247. h.waitCompaction()
  1248. h.getKeyVal("(->)(c->cv)")
  1249. }
  1250. func TestDB_SingleEntryMemCompaction(t *testing.T) {
  1251. trun(t, func(h *dbHarness) {
  1252. for i := 0; i < 10; i++ {
  1253. h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer))
  1254. h.compactMem()
  1255. h.put("key", strings.Repeat("v", opt.DefaultBlockSize))
  1256. h.compactMem()
  1257. h.put("k", "v")
  1258. h.compactMem()
  1259. h.put("", "")
  1260. h.compactMem()
  1261. h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2))
  1262. h.compactMem()
  1263. }
  1264. })
  1265. }
  1266. func TestDB_ManifestWriteError(t *testing.T) {
  1267. for i := 0; i < 2; i++ {
  1268. func() {
  1269. h := newDbHarness(t)
  1270. defer h.close()
  1271. h.put("foo", "bar")
  1272. h.getVal("foo", "bar")
  1273. // Mem compaction (will succeed)
  1274. h.compactMem()
  1275. h.getVal("foo", "bar")
  1276. v := h.db.s.version()
  1277. if n := v.tLen(0); n != 1 {
  1278. t.Errorf("invalid total tables, want=1 got=%d", n)
  1279. }
  1280. v.release()
  1281. if i == 0 {
  1282. h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, errors.New("manifest write error"))
  1283. } else {
  1284. h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, errors.New("manifest sync error"))
  1285. }
  1286. // Merging compaction (will fail)
  1287. h.compactRangeAtErr(0, "", "", true)
  1288. h.db.Close()
  1289. h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, nil)
  1290. h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, nil)
  1291. // Should not lose data
  1292. h.openDB()
  1293. h.getVal("foo", "bar")
  1294. }()
  1295. }
  1296. }
  1297. func assertErr(t *testing.T, err error, wanterr bool) {
  1298. if err != nil {
  1299. if wanterr {
  1300. t.Log("AssertErr: got error (expected): ", err)
  1301. } else {
  1302. t.Error("AssertErr: got error: ", err)
  1303. }
  1304. } else if wanterr {
  1305. t.Error("AssertErr: expect error")
  1306. }
  1307. }
  1308. func TestDB_ClosedIsClosed(t *testing.T) {
  1309. h := newDbHarness(t)
  1310. db := h.db
  1311. var iter, iter2 iterator.Iterator
  1312. var snap *Snapshot
  1313. func() {
  1314. defer h.close()
  1315. h.put("k", "v")
  1316. h.getVal("k", "v")
  1317. iter = db.NewIterator(nil, h.ro)
  1318. iter.Seek([]byte("k"))
  1319. testKeyVal(t, iter, "k->v")
  1320. var err error
  1321. snap, err = db.GetSnapshot()
  1322. if err != nil {
  1323. t.Fatal("GetSnapshot: got error: ", err)
  1324. }
  1325. h.getValr(snap, "k", "v")
  1326. iter2 = snap.NewIterator(nil, h.ro)
  1327. iter2.Seek([]byte("k"))
  1328. testKeyVal(t, iter2, "k->v")
  1329. h.put("foo", "v2")
  1330. h.delete("foo")
  1331. // closing DB
  1332. iter.Release()
  1333. iter2.Release()
  1334. }()
  1335. assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true)
  1336. _, err := db.Get([]byte("k"), h.ro)
  1337. assertErr(t, err, true)
  1338. if iter.Valid() {
  1339. t.Errorf("iter.Valid should false")
  1340. }
  1341. assertErr(t, iter.Error(), false)
  1342. testKeyVal(t, iter, "->")
  1343. if iter.Seek([]byte("k")) {
  1344. t.Errorf("iter.Seek should false")
  1345. }
  1346. assertErr(t, iter.Error(), true)
  1347. assertErr(t, iter2.Error(), false)
  1348. _, err = snap.Get([]byte("k"), h.ro)
  1349. assertErr(t, err, true)
  1350. _, err = db.GetSnapshot()
  1351. assertErr(t, err, true)
  1352. iter3 := db.NewIterator(nil, h.ro)
  1353. assertErr(t, iter3.Error(), true)
  1354. iter3 = snap.NewIterator(nil, h.ro)
  1355. assertErr(t, iter3.Error(), true)
  1356. assertErr(t, db.Delete([]byte("k"), h.wo), true)
  1357. _, err = db.GetProperty("leveldb.stats")
  1358. assertErr(t, err, true)
  1359. _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}})
  1360. assertErr(t, err, true)
  1361. assertErr(t, db.CompactRange(util.Range{}), true)
  1362. assertErr(t, db.Close(), true)
  1363. }
  1364. type numberComparer struct{}
  1365. func (numberComparer) num(x []byte) (n int) {
  1366. fmt.Sscan(string(x[1:len(x)-1]), &n)
  1367. return
  1368. }
  1369. func (numberComparer) Name() string {
  1370. return "test.NumberComparer"
  1371. }
  1372. func (p numberComparer) Compare(a, b []byte) int {
  1373. return p.num(a) - p.num(b)
  1374. }
  1375. func (numberComparer) Separator(dst, a, b []byte) []byte { return nil }
  1376. func (numberComparer) Successor(dst, b []byte) []byte { return nil }
  1377. func TestDB_CustomComparer(t *testing.T) {
  1378. h := newDbHarnessWopt(t, &opt.Options{
  1379. DisableLargeBatchTransaction: true,
  1380. Comparer: numberComparer{},
  1381. WriteBuffer: 1000,
  1382. })
  1383. defer h.close()
  1384. h.put("[10]", "ten")
  1385. h.put("[0x14]", "twenty")
  1386. for i := 0; i < 2; i++ {
  1387. h.getVal("[10]", "ten")
  1388. h.getVal("[0xa]", "ten")
  1389. h.getVal("[20]", "twenty")
  1390. h.getVal("[0x14]", "twenty")
  1391. h.get("[15]", false)
  1392. h.get("[0xf]", false)
  1393. h.compactMem()
  1394. h.compactRange("[0]", "[9999]")
  1395. }
  1396. for n := 0; n < 2; n++ {
  1397. for i := 0; i < 100; i++ {
  1398. v := fmt.Sprintf("[%d]", i*10)
  1399. h.put(v, v)
  1400. }
  1401. h.compactMem()
  1402. h.compactRange("[0]", "[1000000]")
  1403. }
  1404. }
  1405. func TestDB_ManualCompaction(t *testing.T) {
  1406. h := newDbHarness(t)
  1407. defer h.close()
  1408. h.db.memdbMaxLevel = 2
  1409. h.putMulti(3, "p", "q")
  1410. h.tablesPerLevel("1,1,1")
  1411. // Compaction range falls before files
  1412. h.compactRange("", "c")
  1413. h.tablesPerLevel("1,1,1")
  1414. // Compaction range falls after files
  1415. h.compactRange("r", "z")
  1416. h.tablesPerLevel("1,1,1")
  1417. // Compaction range overlaps files
  1418. h.compactRange("p1", "p9")
  1419. h.tablesPerLevel("0,0,1")
  1420. // Populate a different range
  1421. h.putMulti(3, "c", "e")
  1422. h.tablesPerLevel("1,1,2")
  1423. // Compact just the new range
  1424. h.compactRange("b", "f")
  1425. h.tablesPerLevel("0,0,2")
  1426. // Compact all
  1427. h.putMulti(1, "a", "z")
  1428. h.tablesPerLevel("0,1,2")
  1429. h.compactRange("", "")
  1430. h.tablesPerLevel("0,0,1")
  1431. }
  1432. func TestDB_BloomFilter(t *testing.T) {
  1433. h := newDbHarnessWopt(t, &opt.Options{
  1434. DisableLargeBatchTransaction: true,
  1435. DisableBlockCache: true,
  1436. Filter: filter.NewBloomFilter(10),
  1437. })
  1438. defer h.close()
  1439. key := func(i int) string {
  1440. return fmt.Sprintf("key%06d", i)
  1441. }
  1442. const n = 10000
  1443. // Populate multiple layers
  1444. for i := 0; i < n; i++ {
  1445. h.put(key(i), key(i))
  1446. }
  1447. h.compactMem()
  1448. h.compactRange("a", "z")
  1449. for i := 0; i < n; i += 100 {
  1450. h.put(key(i), key(i))
  1451. }
  1452. h.compactMem()
  1453. // Prevent auto compactions triggered by seeks
  1454. h.stor.Stall(testutil.ModeSync, storage.TypeTable)
  1455. // Lookup present keys. Should rarely read from small sstable.
  1456. h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable)
  1457. for i := 0; i < n; i++ {
  1458. h.getVal(key(i), key(i))
  1459. }
  1460. cnt, _ := h.stor.Counter(testutil.ModeRead, storage.TypeTable)
  1461. t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
  1462. if min, max := n, n+2*n/100; cnt < min || cnt > max {
  1463. t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
  1464. }
  1465. // Lookup missing keys. Should rarely read from either sstable.
  1466. h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable)
  1467. for i := 0; i < n; i++ {
  1468. h.get(key(i)+".missing", false)
  1469. }
  1470. cnt, _ = h.stor.Counter(testutil.ModeRead, storage.TypeTable)
  1471. t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
  1472. if max := 3 * n / 100; cnt > max {
  1473. t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
  1474. }
  1475. h.stor.Release(testutil.ModeSync, storage.TypeTable)
  1476. }
  1477. func TestDB_Concurrent(t *testing.T) {
  1478. const n, secs, maxkey = 4, 6, 1000
  1479. h := newDbHarness(t)
  1480. defer h.close()
  1481. runtime.GOMAXPROCS(runtime.NumCPU())
  1482. var (
  1483. closeWg sync.WaitGroup
  1484. stop uint32
  1485. cnt [n]uint32
  1486. )
  1487. for i := 0; i < n; i++ {
  1488. closeWg.Add(1)
  1489. go func(i int) {
  1490. var put, get, found uint
  1491. defer func() {
  1492. t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d",
  1493. i, cnt[i], put, get, found, get-found)
  1494. closeWg.Done()
  1495. }()
  1496. rnd := rand.New(rand.NewSource(int64(1000 + i)))
  1497. for atomic.LoadUint32(&stop) == 0 {
  1498. x := cnt[i]
  1499. k := rnd.Intn(maxkey)
  1500. kstr := fmt.Sprintf("%016d", k)
  1501. if (rnd.Int() % 2) > 0 {
  1502. put++
  1503. h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x))
  1504. } else {
  1505. get++
  1506. v, err := h.db.Get([]byte(kstr), h.ro)
  1507. if err == nil {
  1508. found++
  1509. rk, ri, rx := 0, -1, uint32(0)
  1510. fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx)
  1511. if rk != k {
  1512. t.Errorf("invalid key want=%d got=%d", k, rk)
  1513. }
  1514. if ri < 0 || ri >= n {
  1515. t.Error("invalid goroutine number: ", ri)
  1516. } else {
  1517. tx := atomic.LoadUint32(&(cnt[ri]))
  1518. if rx > tx {
  1519. t.Errorf("invalid seq number, %d > %d ", rx, tx)
  1520. }
  1521. }
  1522. } else if err != ErrNotFound {
  1523. t.Error("Get: got error: ", err)
  1524. return
  1525. }
  1526. }
  1527. atomic.AddUint32(&cnt[i], 1)
  1528. }
  1529. }(i)
  1530. }
  1531. time.Sleep(secs * time.Second)
  1532. atomic.StoreUint32(&stop, 1)
  1533. closeWg.Wait()
  1534. }
  1535. func TestDB_ConcurrentIterator(t *testing.T) {
  1536. const n, n2 = 4, 1000
  1537. h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 30})
  1538. defer h.close()
  1539. runtime.GOMAXPROCS(runtime.NumCPU())
  1540. var (
  1541. closeWg sync.WaitGroup
  1542. stop uint32
  1543. )
  1544. for i := 0; i < n; i++ {
  1545. closeWg.Add(1)
  1546. go func(i int) {
  1547. for k := 0; atomic.LoadUint32(&stop) == 0; k++ {
  1548. h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
  1549. }
  1550. closeWg.Done()
  1551. }(i)
  1552. }
  1553. for i := 0; i < n; i++ {
  1554. closeWg.Add(1)
  1555. go func(i int) {
  1556. for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- {
  1557. h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10))
  1558. }
  1559. closeWg.Done()
  1560. }(i)
  1561. }
  1562. cmp := comparer.DefaultComparer
  1563. for i := 0; i < n2; i++ {
  1564. closeWg.Add(1)
  1565. go func(i int) {
  1566. it := h.db.NewIterator(nil, nil)
  1567. var pk []byte
  1568. for it.Next() {
  1569. kk := it.Key()
  1570. if cmp.Compare(kk, pk) <= 0 {
  1571. t.Errorf("iter %d: %q is successor of %q", i, pk, kk)
  1572. }
  1573. pk = append(pk[:0], kk...)
  1574. var k, vk, vi int
  1575. if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil {
  1576. t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err)
  1577. } else if n < 1 {
  1578. t.Errorf("iter %d: Cannot parse key %q", i, it.Key())
  1579. }
  1580. if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil {
  1581. t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err)
  1582. } else if n < 2 {
  1583. t.Errorf("iter %d: Cannot parse value %q", i, it.Value())
  1584. }
  1585. if vk != k {
  1586. t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk)
  1587. }
  1588. }
  1589. if err := it.Error(); err != nil {
  1590. t.Errorf("iter %d: Got error: %v", i, err)
  1591. }
  1592. it.Release()
  1593. closeWg.Done()
  1594. }(i)
  1595. }
  1596. atomic.StoreUint32(&stop, 1)
  1597. closeWg.Wait()
  1598. }
  1599. func TestDB_ConcurrentWrite(t *testing.T) {
  1600. const n, bk, niter = 10, 3, 10000
  1601. h := newDbHarness(t)
  1602. defer h.close()
  1603. runtime.GOMAXPROCS(runtime.NumCPU())
  1604. var wg sync.WaitGroup
  1605. for i := 0; i < n; i++ {
  1606. wg.Add(1)
  1607. go func(i int) {
  1608. defer wg.Done()
  1609. for k := 0; k < niter; k++ {
  1610. kstr := fmt.Sprintf("put-%d.%d", i, k)
  1611. vstr := fmt.Sprintf("v%d", k)
  1612. h.put(kstr, vstr)
  1613. // Key should immediately available after put returns.
  1614. h.getVal(kstr, vstr)
  1615. }
  1616. }(i)
  1617. }
  1618. for i := 0; i < n; i++ {
  1619. wg.Add(1)
  1620. batch := &Batch{}
  1621. go func(i int) {
  1622. defer wg.Done()
  1623. for k := 0; k < niter; k++ {
  1624. batch.Reset()
  1625. for j := 0; j < bk; j++ {
  1626. batch.Put([]byte(fmt.Sprintf("batch-%d.%d.%d", i, k, j)), []byte(fmt.Sprintf("v%d", k)))
  1627. }
  1628. h.write(batch)
  1629. // Key should immediately available after put returns.
  1630. for j := 0; j < bk; j++ {
  1631. h.getVal(fmt.Sprintf("batch-%d.%d.%d", i, k, j), fmt.Sprintf("v%d", k))
  1632. }
  1633. }
  1634. }(i)
  1635. }
  1636. wg.Wait()
  1637. }
  1638. func TestDB_CreateReopenDbOnFile(t *testing.T) {
  1639. dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
  1640. if err := os.RemoveAll(dbpath); err != nil {
  1641. t.Fatal("cannot remove old db: ", err)
  1642. }
  1643. defer os.RemoveAll(dbpath)
  1644. for i := 0; i < 3; i++ {
  1645. stor, err := storage.OpenFile(dbpath, false)
  1646. if err != nil {
  1647. t.Fatalf("(%d) cannot open storage: %s", i, err)
  1648. }
  1649. db, err := Open(stor, nil)
  1650. if err != nil {
  1651. t.Fatalf("(%d) cannot open db: %s", i, err)
  1652. }
  1653. if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
  1654. t.Fatalf("(%d) cannot write to db: %s", i, err)
  1655. }
  1656. if err := db.Close(); err != nil {
  1657. t.Fatalf("(%d) cannot close db: %s", i, err)
  1658. }
  1659. if err := stor.Close(); err != nil {
  1660. t.Fatalf("(%d) cannot close storage: %s", i, err)
  1661. }
  1662. }
  1663. }
  1664. func TestDB_CreateReopenDbOnFile2(t *testing.T) {
  1665. dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid()))
  1666. if err := os.RemoveAll(dbpath); err != nil {
  1667. t.Fatal("cannot remove old db: ", err)
  1668. }
  1669. defer os.RemoveAll(dbpath)
  1670. for i := 0; i < 3; i++ {
  1671. db, err := OpenFile(dbpath, nil)
  1672. if err != nil {
  1673. t.Fatalf("(%d) cannot open db: %s", i, err)
  1674. }
  1675. if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil {
  1676. t.Fatalf("(%d) cannot write to db: %s", i, err)
  1677. }
  1678. if err := db.Close(); err != nil {
  1679. t.Fatalf("(%d) cannot close db: %s", i, err)
  1680. }
  1681. }
  1682. }
  1683. func TestDB_DeletionMarkersOnMemdb(t *testing.T) {
  1684. h := newDbHarness(t)
  1685. defer h.close()
  1686. h.put("foo", "v1")
  1687. h.compactMem()
  1688. h.delete("foo")
  1689. h.get("foo", false)
  1690. h.getKeyVal("")
  1691. }
  1692. func TestDB_LeveldbIssue178(t *testing.T) {
  1693. nKeys := (opt.DefaultCompactionTableSize / 30) * 5
  1694. key1 := func(i int) string {
  1695. return fmt.Sprintf("my_key_%d", i)
  1696. }
  1697. key2 := func(i int) string {
  1698. return fmt.Sprintf("my_key_%d_xxx", i)
  1699. }
  1700. // Disable compression since it affects the creation of layers and the
  1701. // code below is trying to test against a very specific scenario.
  1702. h := newDbHarnessWopt(t, &opt.Options{
  1703. DisableLargeBatchTransaction: true,
  1704. Compression: opt.NoCompression,
  1705. })
  1706. defer h.close()
  1707. // Create first key range.
  1708. batch := new(Batch)
  1709. for i := 0; i < nKeys; i++ {
  1710. batch.Put([]byte(key1(i)), []byte("value for range 1 key"))
  1711. }
  1712. h.write(batch)
  1713. // Create second key range.
  1714. batch.Reset()
  1715. for i := 0; i < nKeys; i++ {
  1716. batch.Put([]byte(key2(i)), []byte("value for range 2 key"))
  1717. }
  1718. h.write(batch)
  1719. // Delete second key range.
  1720. batch.Reset()
  1721. for i := 0; i < nKeys; i++ {
  1722. batch.Delete([]byte(key2(i)))
  1723. }
  1724. h.write(batch)
  1725. h.waitMemCompaction()
  1726. // Run manual compaction.
  1727. h.compactRange(key1(0), key1(nKeys-1))
  1728. // Checking the keys.
  1729. h.assertNumKeys(nKeys)
  1730. }
  1731. func TestDB_LeveldbIssue200(t *testing.T) {
  1732. h := newDbHarness(t)
  1733. defer h.close()
  1734. h.put("1", "b")
  1735. h.put("2", "c")
  1736. h.put("3", "d")
  1737. h.put("4", "e")
  1738. h.put("5", "f")
  1739. iter := h.db.NewIterator(nil, h.ro)
  1740. // Add an element that should not be reflected in the iterator.
  1741. h.put("25", "cd")
  1742. iter.Seek([]byte("5"))
  1743. assertBytes(t, []byte("5"), iter.Key())
  1744. iter.Prev()
  1745. assertBytes(t, []byte("4"), iter.Key())
  1746. iter.Prev()
  1747. assertBytes(t, []byte("3"), iter.Key())
  1748. iter.Next()
  1749. assertBytes(t, []byte("4"), iter.Key())
  1750. iter.Next()
  1751. assertBytes(t, []byte("5"), iter.Key())
  1752. }
  1753. func TestDB_GoleveldbIssue74(t *testing.T) {
  1754. h := newDbHarnessWopt(t, &opt.Options{
  1755. DisableLargeBatchTransaction: true,
  1756. WriteBuffer: 1 * opt.MiB,
  1757. })
  1758. defer h.close()
  1759. const n, dur = 10000, 5 * time.Second
  1760. runtime.GOMAXPROCS(runtime.NumCPU())
  1761. until := time.Now().Add(dur)
  1762. wg := new(sync.WaitGroup)
  1763. wg.Add(2)
  1764. var done uint32
  1765. go func() {
  1766. var i int
  1767. defer func() {
  1768. t.Logf("WRITER DONE #%d", i)
  1769. atomic.StoreUint32(&done, 1)
  1770. wg.Done()
  1771. }()
  1772. b := new(Batch)
  1773. for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
  1774. iv := fmt.Sprintf("VAL%010d", i)
  1775. for k := 0; k < n; k++ {
  1776. key := fmt.Sprintf("KEY%06d", k)
  1777. b.Put([]byte(key), []byte(key+iv))
  1778. b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key))
  1779. }
  1780. h.write(b)
  1781. b.Reset()
  1782. snap := h.getSnapshot()
  1783. iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
  1784. var k int
  1785. for ; iter.Next(); k++ {
  1786. ptrKey := iter.Key()
  1787. key := iter.Value()
  1788. if _, err := snap.Get(ptrKey, nil); err != nil {
  1789. t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err)
  1790. }
  1791. if value, err := snap.Get(key, nil); err != nil {
  1792. t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err)
  1793. } else if string(value) != string(key)+iv {
  1794. t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value)
  1795. }
  1796. b.Delete(key)
  1797. b.Delete(ptrKey)
  1798. }
  1799. h.write(b)
  1800. iter.Release()
  1801. snap.Release()
  1802. if k != n {
  1803. t.Fatalf("#%d %d != %d", i, k, n)
  1804. }
  1805. }
  1806. }()
  1807. go func() {
  1808. var i int
  1809. defer func() {
  1810. t.Logf("READER DONE #%d", i)
  1811. atomic.StoreUint32(&done, 1)
  1812. wg.Done()
  1813. }()
  1814. for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
  1815. snap := h.getSnapshot()
  1816. iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
  1817. var prevValue string
  1818. var k int
  1819. for ; iter.Next(); k++ {
  1820. ptrKey := iter.Key()
  1821. key := iter.Value()
  1822. if _, err := snap.Get(ptrKey, nil); err != nil {
  1823. t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err)
  1824. }
  1825. if value, err := snap.Get(key, nil); err != nil {
  1826. t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err)
  1827. } else if prevValue != "" && string(value) != string(key)+prevValue {
  1828. t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value)
  1829. } else {
  1830. prevValue = string(value[len(key):])
  1831. }
  1832. }
  1833. iter.Release()
  1834. snap.Release()
  1835. if k > 0 && k != n {
  1836. t.Fatalf("#%d %d != %d", i, k, n)
  1837. }
  1838. }
  1839. }()
  1840. wg.Wait()
  1841. }
  1842. func TestDB_GetProperties(t *testing.T) {
  1843. h := newDbHarness(t)
  1844. defer h.close()
  1845. _, err := h.db.GetProperty("leveldb.num-files-at-level")
  1846. if err == nil {
  1847. t.Error("GetProperty() failed to detect missing level")
  1848. }
  1849. _, err = h.db.GetProperty("leveldb.num-files-at-level0")
  1850. if err != nil {
  1851. t.Error("got unexpected error", err)
  1852. }
  1853. _, err = h.db.GetProperty("leveldb.num-files-at-level0x")
  1854. if err == nil {
  1855. t.Error("GetProperty() failed to detect invalid level")
  1856. }
  1857. }
  1858. func TestDB_GoleveldbIssue72and83(t *testing.T) {
  1859. h := newDbHarnessWopt(t, &opt.Options{
  1860. DisableLargeBatchTransaction: true,
  1861. WriteBuffer: 1 * opt.MiB,
  1862. OpenFilesCacheCapacity: 3,
  1863. })
  1864. defer h.close()
  1865. const n, wn, dur = 10000, 100, 30 * time.Second
  1866. runtime.GOMAXPROCS(runtime.NumCPU())
  1867. randomData := func(prefix byte, i int) []byte {
  1868. data := make([]byte, 1+4+32+64+32)
  1869. _, err := crand.Reader.Read(data[1 : len(data)-8])
  1870. if err != nil {
  1871. panic(err)
  1872. }
  1873. data[0] = prefix
  1874. binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i))
  1875. binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value())
  1876. return data
  1877. }
  1878. keys := make([][]byte, n)
  1879. for i := range keys {
  1880. keys[i] = randomData(1, 0)
  1881. }
  1882. until := time.Now().Add(dur)
  1883. wg :=

Large files files are truncated, but you can click here to view the full file