Merge pull request #260 from pingcap/c4pt0r/mvcc-remove-snapshot-interface

kv : Remove engine.Snapshot interface.
This commit is contained in:
dongxu
2015-10-08 13:04:58 +08:00
47 changed files with 162 additions and 4641 deletions

10
Godeps/Godeps.json generated
View File

@ -5,6 +5,11 @@
"./..."
],
"Deps": [
{
"ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.0-129-g51f99c8",
"Rev": "51f99c862475898df9773747d3accd05a7ca33c1"
},
{
"ImportPath": "github.com/go-sql-driver/mysql",
"Comment": "v1.2-118-g3dd7008",
@ -18,11 +23,6 @@
"ImportPath": "github.com/juju/errors",
"Rev": "4567a5e69fd3130ca0d89f69478e7ac025b67452"
},
{
"ImportPath": "github.com/ngaut/bolt",
"Comment": "v1.0-117-g430b1ff",
"Rev": "430b1ff461fe33eb4655b5116d139c75497ede66"
},
{
"ImportPath": "github.com/ngaut/log",
"Rev": "a0a08fbeee2359177b429b973f61a519e1372bc7"

View File

@ -617,5 +617,9 @@ Below is a list of public, open source projects that use Bolt:
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
backed by boltdb.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
If you are using Bolt in a project please send a pull request to add it to the list.

View File

@ -1,4 +1,4 @@
// +build !windows,!plan9
// +build !windows,!plan9,!solaris
package bolt

View File

@ -0,0 +1,101 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(f *os.File, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Whence = 0
lock.Pid = 0
if exclusive {
lock.Type = syscall.F_WRLCK
} else {
lock.Type = syscall.F_RDLCK
}
err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(f *os.File) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
if err := db.file.Sync(); err != nil {
return fmt.Errorf("file sync error: %s", err)
}
}
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

View File

@ -346,7 +346,8 @@ func (b *Bucket) NextSequence() (uint64, error) {
// ForEach executes a function for each key/value pair in a bucket.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller.
// the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior.
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed

View File

@ -344,7 +344,7 @@ func (cmd *DumpCommand) Run(args ...string) error {
for i, pageID := range pageIDs {
// Print a separator.
if i > 0 {
fmt.Fprintln(cmd.Stdout, "===============================================\n")
fmt.Fprintln(cmd.Stdout, "===============================================")
}
// Print page to stdout.
@ -465,7 +465,7 @@ func (cmd *PageCommand) Run(args ...string) error {
for i, pageID := range pageIDs {
// Print a separator.
if i > 0 {
fmt.Fprintln(cmd.Stdout, "===============================================\n")
fmt.Fprintln(cmd.Stdout, "===============================================")
}
// Retrieve page info and page size.
@ -917,7 +917,7 @@ func (cmd *BenchCommand) Run(args ...string) error {
// Write to the database.
var results BenchResults
if err := cmd.runWrites(db, options, &results); err != nil {
return fmt.Errorf("write: ", err)
return fmt.Errorf("write: %v", err)
}
// Read from the database.

View File

@ -21,9 +21,6 @@ const version = 2
// Represents a marker value to indicate that a file is a Bolt DB.
const magic uint32 = 0xED0CDAED
// Default mmap size
const defaultSize = 500 * 1024 * 1024
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
// syncing changes to a file. This is required as some operating systems,
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
@ -194,7 +191,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
}
// Memory map the data file.
if err := db.mmap(defaultSize); err != nil {
if err := db.mmap(0); err != nil {
_ = db.close()
return nil, err
}

View File

@ -236,7 +236,8 @@ func (tx *Tx) close() {
var freelistPendingN = tx.db.freelist.pending_count()
var freelistAlloc = tx.db.freelist.size()
// Remove writer lock.
// Remove transaction ref & writer lock.
tx.db.rwtx = nil
tx.db.rwlock.Unlock()
// Merge statistics.
@ -250,7 +251,12 @@ func (tx *Tx) close() {
} else {
tx.db.removeTx(tx)
}
// Clear all references.
tx.db = nil
tx.meta = nil
tx.root = Bucket{tx: tx}
tx.pages = nil
}
// Copy writes the entire database to a writer.

View File

@ -1,170 +0,0 @@
package bolt_test
import (
"bytes"
"encoding/binary"
"errors"
"hash/fnv"
"sync"
"testing"
"github.com/boltdb/bolt"
)
func validateBatchBench(b *testing.B, db *TestDB) {
var rollback = errors.New("sentinel error to cause rollback")
validate := func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte("bench"))
h := fnv.New32a()
buf := make([]byte, 4)
for id := uint32(0); id < 1000; id++ {
binary.LittleEndian.PutUint32(buf, id)
h.Reset()
h.Write(buf[:])
k := h.Sum(nil)
v := bucket.Get(k)
if v == nil {
b.Errorf("not found id=%d key=%x", id, k)
continue
}
if g, e := v, []byte("filler"); !bytes.Equal(g, e) {
b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e)
}
if err := bucket.Delete(k); err != nil {
return err
}
}
// should be empty now
c := bucket.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
b.Errorf("unexpected key: %x = %q", k, v)
}
return rollback
}
if err := db.Update(validate); err != nil && err != rollback {
b.Error(err)
}
}
func BenchmarkDBBatchAutomatic(b *testing.B) {
db := NewTestDB()
defer db.Close()
db.MustCreateBucket([]byte("bench"))
b.ResetTimer()
for i := 0; i < b.N; i++ {
start := make(chan struct{})
var wg sync.WaitGroup
for round := 0; round < 1000; round++ {
wg.Add(1)
go func(id uint32) {
defer wg.Done()
<-start
h := fnv.New32a()
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, id)
h.Write(buf[:])
k := h.Sum(nil)
insert := func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("bench"))
return b.Put(k, []byte("filler"))
}
if err := db.Batch(insert); err != nil {
b.Error(err)
return
}
}(uint32(round))
}
close(start)
wg.Wait()
}
b.StopTimer()
validateBatchBench(b, db)
}
func BenchmarkDBBatchSingle(b *testing.B) {
db := NewTestDB()
defer db.Close()
db.MustCreateBucket([]byte("bench"))
b.ResetTimer()
for i := 0; i < b.N; i++ {
start := make(chan struct{})
var wg sync.WaitGroup
for round := 0; round < 1000; round++ {
wg.Add(1)
go func(id uint32) {
defer wg.Done()
<-start
h := fnv.New32a()
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, id)
h.Write(buf[:])
k := h.Sum(nil)
insert := func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("bench"))
return b.Put(k, []byte("filler"))
}
if err := db.Update(insert); err != nil {
b.Error(err)
return
}
}(uint32(round))
}
close(start)
wg.Wait()
}
b.StopTimer()
validateBatchBench(b, db)
}
func BenchmarkDBBatchManual10x100(b *testing.B) {
db := NewTestDB()
defer db.Close()
db.MustCreateBucket([]byte("bench"))
b.ResetTimer()
for i := 0; i < b.N; i++ {
start := make(chan struct{})
var wg sync.WaitGroup
for major := 0; major < 10; major++ {
wg.Add(1)
go func(id uint32) {
defer wg.Done()
<-start
insert100 := func(tx *bolt.Tx) error {
h := fnv.New32a()
buf := make([]byte, 4)
for minor := uint32(0); minor < 100; minor++ {
binary.LittleEndian.PutUint32(buf, uint32(id*100+minor))
h.Reset()
h.Write(buf[:])
k := h.Sum(nil)
b := tx.Bucket([]byte("bench"))
if err := b.Put(k, []byte("filler")); err != nil {
return err
}
}
return nil
}
if err := db.Update(insert100); err != nil {
b.Fatal(err)
}
}(uint32(major))
}
close(start)
wg.Wait()
}
b.StopTimer()
validateBatchBench(b, db)
}

View File

@ -1,148 +0,0 @@
package bolt_test
import (
"encoding/binary"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"github.com/boltdb/bolt"
)
// Set this to see how the counts are actually updated.
const verbose = false
// Counter updates a counter in Bolt for every URL path requested.
type counter struct {
db *bolt.DB
}
func (c counter) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// Communicates the new count from a successful database
// transaction.
var result uint64
increment := func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("hits"))
if err != nil {
return err
}
key := []byte(req.URL.String())
// Decode handles key not found for us.
count := decode(b.Get(key)) + 1
b.Put(key, encode(count))
// All good, communicate new count.
result = count
return nil
}
if err := c.db.Batch(increment); err != nil {
http.Error(rw, err.Error(), 500)
return
}
if verbose {
log.Printf("server: %s: %d", req.URL.String(), result)
}
rw.Header().Set("Content-Type", "application/octet-stream")
fmt.Fprintf(rw, "%d\n", result)
}
func client(id int, base string, paths []string) error {
// Process paths in random order.
rng := rand.New(rand.NewSource(int64(id)))
permutation := rng.Perm(len(paths))
for i := range paths {
path := paths[permutation[i]]
resp, err := http.Get(base + path)
if err != nil {
return err
}
defer resp.Body.Close()
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if verbose {
log.Printf("client: %s: %s", path, buf)
}
}
return nil
}
func ExampleDB_Batch() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Start our web server
count := counter{db}
srv := httptest.NewServer(count)
defer srv.Close()
// Decrease the batch size to make things more interesting.
db.MaxBatchSize = 3
// Get every path multiple times concurrently.
const clients = 10
paths := []string{
"/foo",
"/bar",
"/baz",
"/quux",
"/thud",
"/xyzzy",
}
errors := make(chan error, clients)
for i := 0; i < clients; i++ {
go func(id int) {
errors <- client(id, srv.URL, paths)
}(i)
}
// Check all responses to make sure there's no error.
for i := 0; i < clients; i++ {
if err := <-errors; err != nil {
fmt.Printf("client error: %v", err)
return
}
}
// Check the final result
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("hits"))
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("hits to %s: %d\n", k, decode(v))
}
return nil
})
// Output:
// hits to /bar: 10
// hits to /baz: 10
// hits to /foo: 10
// hits to /quux: 10
// hits to /thud: 10
// hits to /xyzzy: 10
}
// encode marshals a counter.
func encode(n uint64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, n)
return buf
}
// decode unmarshals a counter. Nil buffers are decoded as 0.
func decode(buf []byte) uint64 {
if buf == nil {
return 0
}
return binary.BigEndian.Uint64(buf)
}

View File

@ -1,167 +0,0 @@
package bolt_test
import (
"testing"
"time"
"github.com/boltdb/bolt"
)
// Ensure two functions can perform updates in a single batch.
func TestDB_Batch(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.MustCreateBucket([]byte("widgets"))
// Iterate over multiple updates in separate goroutines.
n := 2
ch := make(chan error)
for i := 0; i < n; i++ {
go func(i int) {
ch <- db.Batch(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
})
}(i)
}
// Check all responses to make sure there's no error.
for i := 0; i < n; i++ {
if err := <-ch; err != nil {
t.Fatal(err)
}
}
// Ensure data is correct.
db.MustView(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < n; i++ {
if v := b.Get(u64tob(uint64(i))); v == nil {
t.Errorf("key not found: %d", i)
}
}
return nil
})
}
func TestDB_Batch_Panic(t *testing.T) {
db := NewTestDB()
defer db.Close()
var sentinel int
var bork = &sentinel
var problem interface{}
var err error
// Execute a function inside a batch that panics.
func() {
defer func() {
if p := recover(); p != nil {
problem = p
}
}()
err = db.Batch(func(tx *bolt.Tx) error {
panic(bork)
})
}()
// Verify there is no error.
if g, e := err, error(nil); g != e {
t.Fatalf("wrong error: %v != %v", g, e)
}
// Verify the panic was captured.
if g, e := problem, bork; g != e {
t.Fatalf("wrong error: %v != %v", g, e)
}
}
func TestDB_BatchFull(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.MustCreateBucket([]byte("widgets"))
const size = 3
// buffered so we never leak goroutines
ch := make(chan error, size)
put := func(i int) {
ch <- db.Batch(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
})
}
db.MaxBatchSize = size
// high enough to never trigger here
db.MaxBatchDelay = 1 * time.Hour
go put(1)
go put(2)
// Give the batch a chance to exhibit bugs.
time.Sleep(10 * time.Millisecond)
// not triggered yet
select {
case <-ch:
t.Fatalf("batch triggered too early")
default:
}
go put(3)
// Check all responses to make sure there's no error.
for i := 0; i < size; i++ {
if err := <-ch; err != nil {
t.Fatal(err)
}
}
// Ensure data is correct.
db.MustView(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i <= size; i++ {
if v := b.Get(u64tob(uint64(i))); v == nil {
t.Errorf("key not found: %d", i)
}
}
return nil
})
}
func TestDB_BatchTime(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.MustCreateBucket([]byte("widgets"))
const size = 1
// buffered so we never leak goroutines
ch := make(chan error, size)
put := func(i int) {
ch <- db.Batch(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
})
}
db.MaxBatchSize = 1000
db.MaxBatchDelay = 0
go put(1)
// Batch must trigger by time alone.
// Check all responses to make sure there's no error.
for i := 0; i < size; i++ {
if err := <-ch; err != nil {
t.Fatal(err)
}
}
// Ensure data is correct.
db.MustView(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i <= size; i++ {
if v := b.Get(u64tob(uint64(i))); v == nil {
t.Errorf("key not found: %d", i)
}
}
return nil
})
}

View File

@ -1,36 +0,0 @@
package bolt_test
import (
"fmt"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// assert fails the test if the condition is false.
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
tb.FailNow()
}
}
// ok fails the test if an err is not nil.
func ok(tb testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
tb.FailNow()
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,145 +0,0 @@
package main_test
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"testing"
"github.com/boltdb/bolt"
"github.com/boltdb/bolt/cmd/bolt"
)
// Ensure the "info" command can print information about a database.
func TestInfoCommand_Run(t *testing.T) {
db := MustOpen(0666, nil)
db.DB.Close()
defer db.Close()
// Run the info command.
m := NewMain()
if err := m.Run("info", db.Path); err != nil {
t.Fatal(err)
}
}
// Ensure the "stats" command can execute correctly.
func TestStatsCommand_Run(t *testing.T) {
// Ignore
if os.Getpagesize() != 4096 {
t.Skip("system does not use 4KB page size")
}
db := MustOpen(0666, nil)
defer db.Close()
if err := db.Update(func(tx *bolt.Tx) error {
// Create "foo" bucket.
b, err := tx.CreateBucket([]byte("foo"))
if err != nil {
return err
}
for i := 0; i < 10; i++ {
if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
return err
}
}
// Create "bar" bucket.
b, err = tx.CreateBucket([]byte("bar"))
if err != nil {
return err
}
for i := 0; i < 100; i++ {
if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
return err
}
}
// Create "baz" bucket.
b, err = tx.CreateBucket([]byte("baz"))
if err != nil {
return err
}
if err := b.Put([]byte("key"), []byte("value")); err != nil {
return err
}
return nil
}); err != nil {
t.Fatal(err)
}
db.DB.Close()
// Generate expected result.
exp := "Aggregate statistics for 3 buckets\n\n" +
"Page count statistics\n" +
"\tNumber of logical branch pages: 0\n" +
"\tNumber of physical branch overflow pages: 0\n" +
"\tNumber of logical leaf pages: 1\n" +
"\tNumber of physical leaf overflow pages: 0\n" +
"Tree statistics\n" +
"\tNumber of keys/value pairs: 111\n" +
"\tNumber of levels in B+tree: 1\n" +
"Page size utilization\n" +
"\tBytes allocated for physical branch pages: 0\n" +
"\tBytes actually used for branch data: 0 (0%)\n" +
"\tBytes allocated for physical leaf pages: 4096\n" +
"\tBytes actually used for leaf data: 1996 (48%)\n" +
"Bucket statistics\n" +
"\tTotal number of buckets: 3\n" +
"\tTotal number on inlined buckets: 2 (66%)\n" +
"\tBytes used for inlined buckets: 236 (11%)\n"
// Run the command.
m := NewMain()
if err := m.Run("stats", db.Path); err != nil {
t.Fatal(err)
} else if m.Stdout.String() != exp {
t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String())
}
}
// Main represents a test wrapper for main.Main that records output.
type Main struct {
*main.Main
Stdin bytes.Buffer
Stdout bytes.Buffer
Stderr bytes.Buffer
}
// NewMain returns a new instance of Main.
func NewMain() *Main {
m := &Main{Main: main.NewMain()}
m.Main.Stdin = &m.Stdin
m.Main.Stdout = &m.Stdout
m.Main.Stderr = &m.Stderr
return m
}
// MustOpen creates a Bolt database in a temporary location.
func MustOpen(mode os.FileMode, options *bolt.Options) *DB {
// Create temporary path.
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
db, err := bolt.Open(f.Name(), mode, options)
if err != nil {
panic(err.Error())
}
return &DB{DB: db, Path: f.Name()}
}
// DB is a test wrapper for bolt.DB.
type DB struct {
*bolt.DB
Path string
}
// Close closes and removes the database.
func (db *DB) Close() error {
defer os.Remove(db.Path)
return db.DB.Close()
}

View File

@ -1,511 +0,0 @@
package bolt_test
import (
"bytes"
"encoding/binary"
"fmt"
"os"
"sort"
"testing"
"testing/quick"
"github.com/boltdb/bolt"
)
// Ensure that a cursor can return a reference to the bucket that created it.
func TestCursor_Bucket(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
c := b.Cursor()
equals(t, b, c.Bucket())
return nil
})
}
// Ensure that a Tx cursor can seek to the appropriate keys.
func TestCursor_Seek(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
ok(t, err)
ok(t, b.Put([]byte("foo"), []byte("0001")))
ok(t, b.Put([]byte("bar"), []byte("0002")))
ok(t, b.Put([]byte("baz"), []byte("0003")))
_, err = b.CreateBucket([]byte("bkt"))
ok(t, err)
return nil
})
db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
// Exact match should go to the key.
k, v := c.Seek([]byte("bar"))
equals(t, []byte("bar"), k)
equals(t, []byte("0002"), v)
// Inexact match should go to the next key.
k, v = c.Seek([]byte("bas"))
equals(t, []byte("baz"), k)
equals(t, []byte("0003"), v)
// Low key should go to the first key.
k, v = c.Seek([]byte(""))
equals(t, []byte("bar"), k)
equals(t, []byte("0002"), v)
// High key should return no key.
k, v = c.Seek([]byte("zzz"))
assert(t, k == nil, "")
assert(t, v == nil, "")
// Buckets should return their key but no value.
k, v = c.Seek([]byte("bkt"))
equals(t, []byte("bkt"), k)
assert(t, v == nil, "")
return nil
})
}
func TestCursor_Delete(t *testing.T) {
db := NewTestDB()
defer db.Close()
var count = 1000
// Insert every other key between 0 and $count.
db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
for i := 0; i < count; i += 1 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i))
b.Put(k, make([]byte, 100))
}
b.CreateBucket([]byte("sub"))
return nil
})
db.Update(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
bound := make([]byte, 8)
binary.BigEndian.PutUint64(bound, uint64(count/2))
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
if err := c.Delete(); err != nil {
return err
}
}
c.Seek([]byte("sub"))
err := c.Delete()
equals(t, err, bolt.ErrIncompatibleValue)
return nil
})
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
equals(t, b.Stats().KeyN, count/2+1)
return nil
})
}
// Ensure that a Tx cursor can seek to the appropriate keys when there are a
// large number of keys. This test also checks that seek will always move
// forward to the next key.
//
// Related: https://github.com/boltdb/bolt/pull/187
func TestCursor_Seek_Large(t *testing.T) {
db := NewTestDB()
defer db.Close()
var count = 10000
// Insert every other key between 0 and $count.
db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
for i := 0; i < count; i += 100 {
for j := i; j < i+100; j += 2 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(j))
b.Put(k, make([]byte, 100))
}
}
return nil
})
db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
for i := 0; i < count; i++ {
seek := make([]byte, 8)
binary.BigEndian.PutUint64(seek, uint64(i))
k, _ := c.Seek(seek)
// The last seek is beyond the end of the the range so
// it should return nil.
if i == count-1 {
assert(t, k == nil, "")
continue
}
// Otherwise we should seek to the exact key or the next key.
num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
equals(t, uint64(i), num)
} else {
equals(t, uint64(i+1), num)
}
}
return nil
})
}
// Ensure that a cursor can iterate over an empty bucket without error.
func TestCursor_EmptyBucket(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
assert(t, k == nil, "")
assert(t, v == nil, "")
return nil
})
}
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
func TestCursor_EmptyBucketReverse(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
assert(t, k == nil, "")
assert(t, v == nil, "")
return nil
})
}
// Ensure that a Tx cursor can iterate over a single root with a couple elements.
func TestCursor_Iterate_Leaf(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
equals(t, string(k), "bar")
equals(t, v, []byte{1})
k, v = c.Next()
equals(t, string(k), "baz")
equals(t, v, []byte{})
k, v = c.Next()
equals(t, string(k), "foo")
equals(t, v, []byte{0})
k, v = c.Next()
assert(t, k == nil, "")
assert(t, v == nil, "")
k, v = c.Next()
assert(t, k == nil, "")
assert(t, v == nil, "")
tx.Rollback()
}
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
func TestCursor_LeafRootReverse(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
equals(t, string(k), "foo")
equals(t, v, []byte{0})
k, v = c.Prev()
equals(t, string(k), "baz")
equals(t, v, []byte{})
k, v = c.Prev()
equals(t, string(k), "bar")
equals(t, v, []byte{1})
k, v = c.Prev()
assert(t, k == nil, "")
assert(t, v == nil, "")
k, v = c.Prev()
assert(t, k == nil, "")
assert(t, v == nil, "")
tx.Rollback()
}
// Ensure that a Tx cursor can restart from the beginning.
func TestCursor_Restart(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, _ := c.First()
equals(t, string(k), "bar")
k, _ = c.Next()
equals(t, string(k), "foo")
k, _ = c.First()
equals(t, string(k), "bar")
k, _ = c.Next()
equals(t, string(k), "foo")
tx.Rollback()
}
// Ensure that a Tx can iterate over all elements in a bucket.
func TestCursor_QuickCheck(t *testing.T) {
f := func(items testdata) bool {
db := NewTestDB()
defer db.Close()
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
ok(t, b.Put(item.Key, item.Value))
}
ok(t, tx.Commit())
// Sort test data.
sort.Sort(items)
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
equals(t, k, items[index].Key)
equals(t, v, items[index].Value)
index++
}
equals(t, len(items), index)
tx.Rollback()
return true
}
if err := quick.Check(f, qconfig()); err != nil {
t.Error(err)
}
}
// Ensure that a transaction can iterate over all elements in a bucket in reverse.
func TestCursor_QuickCheck_Reverse(t *testing.T) {
f := func(items testdata) bool {
db := NewTestDB()
defer db.Close()
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
ok(t, b.Put(item.Key, item.Value))
}
ok(t, tx.Commit())
// Sort test data.
sort.Sort(revtestdata(items))
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
equals(t, k, items[index].Key)
equals(t, v, items[index].Value)
index++
}
equals(t, len(items), index)
tx.Rollback()
return true
}
if err := quick.Check(f, qconfig()); err != nil {
t.Error(err)
}
}
// Ensure that a Tx cursor can iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
ok(t, err)
_, err = b.CreateBucket([]byte("foo"))
ok(t, err)
_, err = b.CreateBucket([]byte("bar"))
ok(t, err)
_, err = b.CreateBucket([]byte("baz"))
ok(t, err)
return nil
})
db.View(func(tx *bolt.Tx) error {
var names []string
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
names = append(names, string(k))
assert(t, v == nil, "")
}
equals(t, names, []string{"bar", "baz", "foo"})
return nil
})
}
// Ensure that a Tx cursor can reverse iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
ok(t, err)
_, err = b.CreateBucket([]byte("foo"))
ok(t, err)
_, err = b.CreateBucket([]byte("bar"))
ok(t, err)
_, err = b.CreateBucket([]byte("baz"))
ok(t, err)
return nil
})
db.View(func(tx *bolt.Tx) error {
var names []string
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil; k, v = c.Prev() {
names = append(names, string(k))
assert(t, v == nil, "")
}
equals(t, names, []string{"foo", "baz", "bar"})
return nil
})
}
func ExampleCursor() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Start a read-write transaction.
db.Update(func(tx *bolt.Tx) error {
// Create a new bucket.
tx.CreateBucket([]byte("animals"))
// Insert data into a bucket.
b := tx.Bucket([]byte("animals"))
b.Put([]byte("dog"), []byte("fun"))
b.Put([]byte("cat"), []byte("lame"))
b.Put([]byte("liger"), []byte("awesome"))
// Create a cursor for iteration.
c := b.Cursor()
// Iterate over items in sorted key order. This starts from the
// first key/value pair and updates the k/v variables to the
// next key/value on each iteration.
//
// The loop finishes at the end of the cursor when a nil key is returned.
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("A %s is %s.\n", k, v)
}
return nil
})
// Output:
// A cat is lame.
// A dog is fun.
// A liger is awesome.
}
func ExampleCursor_reverse() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Start a read-write transaction.
db.Update(func(tx *bolt.Tx) error {
// Create a new bucket.
tx.CreateBucket([]byte("animals"))
// Insert data into a bucket.
b := tx.Bucket([]byte("animals"))
b.Put([]byte("dog"), []byte("fun"))
b.Put([]byte("cat"), []byte("lame"))
b.Put([]byte("liger"), []byte("awesome"))
// Create a cursor for iteration.
c := b.Cursor()
// Iterate over items in reverse sorted key order. This starts
// from the last key/value pair and updates the k/v variables to
// the previous key/value on each iteration.
//
// The loop finishes at the beginning of the cursor when a nil key
// is returned.
for k, v := c.Last(); k != nil; k, v = c.Prev() {
fmt.Printf("A %s is %s.\n", k, v)
}
return nil
})
// Output:
// A liger is awesome.
// A dog is fun.
// A cat is lame.
}

View File

@ -1,903 +0,0 @@
package bolt_test
import (
"encoding/binary"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"regexp"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/boltdb/bolt"
)
var statsFlag = flag.Bool("stats", false, "show performance stats")
// Ensure that opening a database with a bad path returns an error.
func TestOpen_BadPath(t *testing.T) {
db, err := bolt.Open("", 0666, nil)
assert(t, err != nil, "err: %s", err)
assert(t, db == nil, "")
}
// Ensure that a database can be opened without error.
func TestOpen(t *testing.T) {
path := tempfile()
defer os.Remove(path)
db, err := bolt.Open(path, 0666, nil)
assert(t, db != nil, "")
ok(t, err)
equals(t, db.Path(), path)
ok(t, db.Close())
}
// Ensure that opening an already open database file will timeout.
func TestOpen_Timeout(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("timeout not supported on windows")
}
path := tempfile()
defer os.Remove(path)
// Open a data file.
db0, err := bolt.Open(path, 0666, nil)
assert(t, db0 != nil, "")
ok(t, err)
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond})
assert(t, db1 == nil, "")
equals(t, bolt.ErrTimeout, err)
assert(t, time.Since(start) > 100*time.Millisecond, "")
db0.Close()
}
// Ensure that opening an already open database file will wait until its closed.
func TestOpen_Wait(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("timeout not supported on windows")
}
path := tempfile()
defer os.Remove(path)
// Open a data file.
db0, err := bolt.Open(path, 0666, nil)
assert(t, db0 != nil, "")
ok(t, err)
// Close it in just a bit.
time.AfterFunc(100*time.Millisecond, func() { db0.Close() })
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond})
assert(t, db1 != nil, "")
ok(t, err)
assert(t, time.Since(start) > 100*time.Millisecond, "")
}
// Ensure that opening a database does not increase its size.
// https://github.com/boltdb/bolt/issues/291
func TestOpen_Size(t *testing.T) {
// Open a data file.
db := NewTestDB()
path := db.Path()
defer db.Close()
// Insert until we get above the minimum 4MB size.
ok(t, db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
for i := 0; i < 10000; i++ {
ok(t, b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)))
}
return nil
}))
// Close database and grab the size.
db.DB.Close()
sz := fileSize(path)
if sz == 0 {
t.Fatalf("unexpected new file size: %d", sz)
}
// Reopen database, update, and check size again.
db0, err := bolt.Open(path, 0666, nil)
ok(t, err)
ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) }))
ok(t, db0.Close())
newSz := fileSize(path)
if newSz == 0 {
t.Fatalf("unexpected new file size: %d", newSz)
}
// Compare the original size with the new size.
if sz != newSz {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
}
// Ensure that opening a database beyond the max step size does not increase its size.
// https://github.com/boltdb/bolt/issues/303
func TestOpen_Size_Large(t *testing.T) {
if testing.Short() {
t.Skip("short mode")
}
// Open a data file.
db := NewTestDB()
path := db.Path()
defer db.Close()
// Insert until we get above the minimum 4MB size.
var index uint64
for i := 0; i < 10000; i++ {
ok(t, db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
for j := 0; j < 1000; j++ {
ok(t, b.Put(u64tob(index), make([]byte, 50)))
index++
}
return nil
}))
}
// Close database and grab the size.
db.DB.Close()
sz := fileSize(path)
if sz == 0 {
t.Fatalf("unexpected new file size: %d", sz)
} else if sz < (1 << 30) {
t.Fatalf("expected larger initial size: %d", sz)
}
// Reopen database, update, and check size again.
db0, err := bolt.Open(path, 0666, nil)
ok(t, err)
ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) }))
ok(t, db0.Close())
newSz := fileSize(path)
if newSz == 0 {
t.Fatalf("unexpected new file size: %d", newSz)
}
// Compare the original size with the new size.
if sz != newSz {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
}
// Ensure that a re-opened database is consistent.
func TestOpen_Check(t *testing.T) {
path := tempfile()
defer os.Remove(path)
db, err := bolt.Open(path, 0666, nil)
ok(t, err)
ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
db.Close()
db, err = bolt.Open(path, 0666, nil)
ok(t, err)
ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
db.Close()
}
// Ensure that the database returns an error if the file handle cannot be open.
func TestDB_Open_FileError(t *testing.T) {
path := tempfile()
defer os.Remove(path)
_, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil)
assert(t, err.(*os.PathError) != nil, "")
equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path)
equals(t, "open", err.(*os.PathError).Op)
}
// Ensure that write errors to the meta file handler during initialization are returned.
func TestDB_Open_MetaInitWriteError(t *testing.T) {
t.Skip("pending")
}
// Ensure that a database that is too small returns an error.
func TestDB_Open_FileTooSmall(t *testing.T) {
path := tempfile()
defer os.Remove(path)
db, err := bolt.Open(path, 0666, nil)
ok(t, err)
db.Close()
// corrupt the database
ok(t, os.Truncate(path, int64(os.Getpagesize())))
db, err = bolt.Open(path, 0666, nil)
equals(t, errors.New("file size too small"), err)
}
// Ensure that a database can be opened in read-only mode by multiple processes
// and that a database can not be opened in read-write mode and in read-only
// mode at the same time.
func TestOpen_ReadOnly(t *testing.T) {
bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`)
path := tempfile()
defer os.Remove(path)
// Open in read-write mode.
db, err := bolt.Open(path, 0666, nil)
ok(t, db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket(bucket)
if err != nil {
return err
}
return b.Put(key, value)
}))
assert(t, db != nil, "")
assert(t, !db.IsReadOnly(), "")
ok(t, err)
ok(t, db.Close())
// Open in read-only mode.
db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
ok(t, err)
defer db0.Close()
// Opening in read-write mode should return an error.
_, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100})
assert(t, err != nil, "")
// And again (in read-only mode).
db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
ok(t, err)
defer db1.Close()
// Verify both read-only databases are accessible.
for _, db := range []*bolt.DB{db0, db1} {
// Verify is is in read only mode indeed.
assert(t, db.IsReadOnly(), "")
// Read-only databases should not allow updates.
assert(t,
bolt.ErrDatabaseReadOnly == db.Update(func(*bolt.Tx) error {
panic(`should never get here`)
}),
"")
// Read-only databases should not allow beginning writable txns.
_, err = db.Begin(true)
assert(t, bolt.ErrDatabaseReadOnly == err, "")
// Verify the data.
ok(t, db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
if b == nil {
return fmt.Errorf("expected bucket `%s`", string(bucket))
}
got := string(b.Get(key))
expected := string(value)
if got != expected {
return fmt.Errorf("expected `%s`, got `%s`", expected, got)
}
return nil
}))
}
}
// TODO(benbjohnson): Test corruption at every byte of the first two pages.
// Ensure that a database cannot open a transaction when it's not open.
func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
var db bolt.DB
tx, err := db.Begin(false)
assert(t, tx == nil, "")
equals(t, err, bolt.ErrDatabaseNotOpen)
}
// Ensure that a read-write transaction can be retrieved.
func TestDB_BeginRW(t *testing.T) {
db := NewTestDB()
defer db.Close()
tx, err := db.Begin(true)
assert(t, tx != nil, "")
ok(t, err)
assert(t, tx.DB() == db.DB, "")
equals(t, tx.Writable(), true)
ok(t, tx.Commit())
}
// Ensure that opening a transaction while the DB is closed returns an error.
func TestDB_BeginRW_Closed(t *testing.T) {
var db bolt.DB
tx, err := db.Begin(true)
equals(t, err, bolt.ErrDatabaseNotOpen)
assert(t, tx == nil, "")
}
func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) }
func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) }
// Ensure that a database cannot close while transactions are open.
func testDB_Close_PendingTx(t *testing.T, writable bool) {
db := NewTestDB()
defer db.Close()
// Start transaction.
tx, err := db.Begin(true)
if err != nil {
t.Fatal(err)
}
// Open update in separate goroutine.
done := make(chan struct{})
go func() {
db.Close()
close(done)
}()
// Ensure database hasn't closed.
time.Sleep(100 * time.Millisecond)
select {
case <-done:
t.Fatal("database closed too early")
default:
}
// Commit transaction.
if err := tx.Commit(); err != nil {
t.Fatal(err)
}
// Ensure database closed now.
time.Sleep(100 * time.Millisecond)
select {
case <-done:
default:
t.Fatal("database did not close")
}
}
// Ensure a database can provide a transactional block.
func TestDB_Update(t *testing.T) {
db := NewTestDB()
defer db.Close()
err := db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar"))
b.Put([]byte("baz"), []byte("bat"))
b.Delete([]byte("foo"))
return nil
})
ok(t, err)
err = db.View(func(tx *bolt.Tx) error {
assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
ok(t, err)
}
// Ensure a closed database returns an error while running a transaction block
func TestDB_Update_Closed(t *testing.T) {
var db bolt.DB
err := db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
return nil
})
equals(t, err, bolt.ErrDatabaseNotOpen)
}
// Ensure a panic occurs while trying to commit a managed transaction.
func TestDB_Update_ManualCommit(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Commit()
}()
return nil
})
assert(t, ok, "expected panic")
}
// Ensure a panic occurs while trying to rollback a managed transaction.
func TestDB_Update_ManualRollback(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Rollback()
}()
return nil
})
assert(t, ok, "expected panic")
}
// Ensure a panic occurs while trying to commit a managed transaction.
func TestDB_View_ManualCommit(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Commit()
}()
return nil
})
assert(t, ok, "expected panic")
}
// Ensure a panic occurs while trying to rollback a managed transaction.
func TestDB_View_ManualRollback(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Rollback()
}()
return nil
})
assert(t, ok, "expected panic")
}
// Ensure a write transaction that panics does not hold open locks.
func TestDB_Update_Panic(t *testing.T) {
db := NewTestDB()
defer db.Close()
func() {
defer func() {
if r := recover(); r != nil {
t.Log("recover: update", r)
}
}()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
panic("omg")
})
}()
// Verify we can update again.
err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
ok(t, err)
// Verify that our change persisted.
err = db.Update(func(tx *bolt.Tx) error {
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
return nil
})
}
// Ensure a database can return an error through a read-only transactional block.
func TestDB_View_Error(t *testing.T) {
db := NewTestDB()
defer db.Close()
err := db.View(func(tx *bolt.Tx) error {
return errors.New("xxx")
})
equals(t, errors.New("xxx"), err)
}
// Ensure a read transaction that panics does not hold open locks.
func TestDB_View_Panic(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
return nil
})
func() {
defer func() {
if r := recover(); r != nil {
t.Log("recover: view", r)
}
}()
db.View(func(tx *bolt.Tx) error {
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
panic("omg")
})
}()
// Verify that we can still use read transactions.
db.View(func(tx *bolt.Tx) error {
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
return nil
})
}
// Ensure that an error is returned when a database write fails.
func TestDB_Commit_WriteFail(t *testing.T) {
t.Skip("pending") // TODO(benbjohnson)
}
// Ensure that DB stats can be returned.
func TestDB_Stats(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
stats := db.Stats()
equals(t, 2, stats.TxStats.PageCount)
equals(t, 0, stats.FreePageN)
equals(t, 2, stats.PendingPageN)
}
// Ensure that database pages are in expected order and type.
func TestDB_Consistency(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
for i := 0; i < 10; i++ {
db.Update(func(tx *bolt.Tx) error {
ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil
})
}
db.Update(func(tx *bolt.Tx) error {
p, _ := tx.Page(0)
assert(t, p != nil, "")
equals(t, "meta", p.Type)
p, _ = tx.Page(1)
assert(t, p != nil, "")
equals(t, "meta", p.Type)
p, _ = tx.Page(2)
assert(t, p != nil, "")
equals(t, "free", p.Type)
p, _ = tx.Page(3)
assert(t, p != nil, "")
equals(t, "free", p.Type)
p, _ = tx.Page(4)
assert(t, p != nil, "")
equals(t, "leaf", p.Type)
p, _ = tx.Page(5)
assert(t, p != nil, "")
equals(t, "freelist", p.Type)
p, _ = tx.Page(6)
assert(t, p == nil, "")
return nil
})
}
// Ensure that DB stats can be substracted from one another.
func TestDBStats_Sub(t *testing.T) {
var a, b bolt.Stats
a.TxStats.PageCount = 3
a.FreePageN = 4
b.TxStats.PageCount = 10
b.FreePageN = 14
diff := b.Sub(&a)
equals(t, 7, diff.TxStats.PageCount)
// free page stats are copied from the receiver and not subtracted
equals(t, 14, diff.FreePageN)
}
func ExampleDB_Update() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Execute several commands within a write transaction.
err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
return err
}
return nil
})
// If our transactional block didn't return an error then our data is saved.
if err == nil {
db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value)
return nil
})
}
// Output:
// The value of 'foo' is: bar
}
func ExampleDB_View() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Insert data into a bucket.
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("people"))
b := tx.Bucket([]byte("people"))
b.Put([]byte("john"), []byte("doe"))
b.Put([]byte("susy"), []byte("que"))
return nil
})
// Access data from within a read-only transactional block.
db.View(func(tx *bolt.Tx) error {
v := tx.Bucket([]byte("people")).Get([]byte("john"))
fmt.Printf("John's last name is %s.\n", v)
return nil
})
// Output:
// John's last name is doe.
}
func ExampleDB_Begin_ReadOnly() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Create a bucket.
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
// Create several keys in a transaction.
tx, _ := db.Begin(true)
b := tx.Bucket([]byte("widgets"))
b.Put([]byte("john"), []byte("blue"))
b.Put([]byte("abby"), []byte("red"))
b.Put([]byte("zephyr"), []byte("purple"))
tx.Commit()
// Iterate over the values in sorted key order.
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("%s likes %s\n", k, v)
}
tx.Rollback()
// Output:
// abby likes red
// john likes blue
// zephyr likes purple
}
// TestDB represents a wrapper around a Bolt DB to handle temporary file
// creation and automatic cleanup on close.
type TestDB struct {
*bolt.DB
}
// NewTestDB returns a new instance of TestDB.
func NewTestDB() *TestDB {
db, err := bolt.Open(tempfile(), 0666, nil)
if err != nil {
panic("cannot open db: " + err.Error())
}
return &TestDB{db}
}
// MustView executes a read-only function. Panic on error.
func (db *TestDB) MustView(fn func(tx *bolt.Tx) error) {
if err := db.DB.View(func(tx *bolt.Tx) error {
return fn(tx)
}); err != nil {
panic(err.Error())
}
}
// MustUpdate executes a read-write function. Panic on error.
func (db *TestDB) MustUpdate(fn func(tx *bolt.Tx) error) {
if err := db.DB.View(func(tx *bolt.Tx) error {
return fn(tx)
}); err != nil {
panic(err.Error())
}
}
// MustCreateBucket creates a new bucket. Panic on error.
func (db *TestDB) MustCreateBucket(name []byte) {
if err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte(name))
return err
}); err != nil {
panic(err.Error())
}
}
// Close closes the database and deletes the underlying file.
func (db *TestDB) Close() {
// Log statistics.
if *statsFlag {
db.PrintStats()
}
// Check database consistency after every test.
db.MustCheck()
// Close database and remove file.
defer os.Remove(db.Path())
db.DB.Close()
}
// PrintStats prints the database stats
func (db *TestDB) PrintStats() {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
// MustCheck runs a consistency check on the database and panics if any errors are found.
func (db *TestDB) MustCheck() {
db.Update(func(tx *bolt.Tx) error {
// Collect all the errors.
var errors []error
for err := range tx.Check() {
errors = append(errors, err)
if len(errors) > 10 {
break
}
}
// If errors occurred, copy the DB and print the errors.
if len(errors) > 0 {
var path = tempfile()
tx.CopyFile(path, 0600)
// Print errors.
fmt.Print("\n\n")
fmt.Printf("consistency check failed (%d errors)\n", len(errors))
for _, err := range errors {
fmt.Println(err)
}
fmt.Println("")
fmt.Println("db saved to:")
fmt.Println(path)
fmt.Print("\n\n")
os.Exit(-1)
}
return nil
})
}
// CopyTempFile copies a database to a temporary file.
func (db *TestDB) CopyTempFile() {
path := tempfile()
db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
return f.Name()
}
// mustContainKeys checks that a bucket contains a given set of keys.
func mustContainKeys(b *bolt.Bucket, m map[string]string) {
found := make(map[string]string)
b.ForEach(func(k, _ []byte) error {
found[string(k)] = ""
return nil
})
// Check for keys found in bucket that shouldn't be there.
var keys []string
for k, _ := range found {
if _, ok := m[string(k)]; !ok {
keys = append(keys, k)
}
}
if len(keys) > 0 {
sort.Strings(keys)
panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ",")))
}
// Check for keys not found in bucket that should be there.
for k, _ := range m {
if _, ok := found[string(k)]; !ok {
keys = append(keys, k)
}
}
if len(keys) > 0 {
sort.Strings(keys)
panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ",")))
}
}
func trunc(b []byte, length int) []byte {
if length < len(b) {
return b[:length]
}
return b
}
func truncDuration(d time.Duration) string {
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
}
func fileSize(path string) int64 {
fi, err := os.Stat(path)
if err != nil {
return 0
}
return fi.Size()
}
func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
// u64tob converts a uint64 into an 8-byte slice.
func u64tob(v uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, v)
return b
}
// btou64 converts an 8-byte slice into an uint64.
func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) }

View File

@ -1,156 +0,0 @@
package bolt
import (
"math/rand"
"reflect"
"sort"
"testing"
"unsafe"
)
// Ensure that a page is added to a transaction's freelist.
func TestFreelist_free(t *testing.T) {
f := newFreelist()
f.free(100, &page{id: 12})
if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
}
}
// Ensure that a page and its overflow is added to a transaction's freelist.
func TestFreelist_free_overflow(t *testing.T) {
f := newFreelist()
f.free(100, &page{id: 12, overflow: 3})
if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
}
}
// Ensure that a transaction's free pages can be released.
func TestFreelist_release(t *testing.T) {
f := newFreelist()
f.free(100, &page{id: 12, overflow: 1})
f.free(100, &page{id: 9})
f.free(102, &page{id: 39})
f.release(100)
f.release(101)
if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
f.release(102)
if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
}
// Ensure that a freelist can find contiguous blocks of pages.
func TestFreelist_allocate(t *testing.T) {
f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
if id := int(f.allocate(3)); id != 3 {
t.Fatalf("exp=3; got=%v", id)
}
if id := int(f.allocate(1)); id != 6 {
t.Fatalf("exp=6; got=%v", id)
}
if id := int(f.allocate(3)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if id := int(f.allocate(2)); id != 12 {
t.Fatalf("exp=12; got=%v", id)
}
if id := int(f.allocate(1)); id != 7 {
t.Fatalf("exp=7; got=%v", id)
}
if id := int(f.allocate(0)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if id := int(f.allocate(0)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
if id := int(f.allocate(1)); id != 9 {
t.Fatalf("exp=9; got=%v", id)
}
if id := int(f.allocate(1)); id != 18 {
t.Fatalf("exp=18; got=%v", id)
}
if id := int(f.allocate(1)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
}
// Ensure that a freelist can deserialize from a freelist page.
func TestFreelist_read(t *testing.T) {
// Create a page.
var buf [4096]byte
page := (*page)(unsafe.Pointer(&buf[0]))
page.flags = freelistPageFlag
page.count = 2
// Insert 2 page ids.
ids := (*[3]pgid)(unsafe.Pointer(&page.ptr))
ids[0] = 23
ids[1] = 50
// Deserialize page into a freelist.
f := newFreelist()
f.read(page)
// Ensure that there are two page ids in the freelist.
if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
}
// Ensure that a freelist can serialize into a freelist page.
func TestFreelist_write(t *testing.T) {
// Create a freelist and write it to a page.
var buf [4096]byte
f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)}
f.pending[100] = []pgid{28, 11}
f.pending[101] = []pgid{3}
p := (*page)(unsafe.Pointer(&buf[0]))
f.write(p)
// Read the page back out.
f2 := newFreelist()
f2.read(p)
// Ensure that the freelist is correct.
// All pages should be present and in reverse order.
if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
t.Fatalf("exp=%v; got=%v", exp, f2.ids)
}
}
func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) }
func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) }
func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) }
func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) }
func benchmark_FreelistRelease(b *testing.B, size int) {
ids := randomPgids(size)
pending := randomPgids(len(ids) / 400)
b.ResetTimer()
for i := 0; i < b.N; i++ {
f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}}
f.release(1)
}
}
func randomPgids(n int) []pgid {
rand.Seed(42)
pgids := make(pgids, n)
for i := range pgids {
pgids[i] = pgid(rand.Int63())
}
sort.Sort(pgids)
return pgids
}

View File

@ -1,156 +0,0 @@
package bolt
import (
"testing"
"unsafe"
)
// Ensure that a node can insert a key/value.
func TestNode_put(t *testing.T) {
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}}
n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0)
n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
if len(n.inodes) != 3 {
t.Fatalf("exp=3; got=%d", len(n.inodes))
}
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
}
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
}
if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
}
if n.inodes[2].flags != uint32(leafPageFlag) {
t.Fatalf("not a leaf: %d", n.inodes[2].flags)
}
}
// Ensure that a node can deserialize from a leaf page.
func TestNode_read_LeafPage(t *testing.T) {
// Create a page.
var buf [4096]byte
page := (*page)(unsafe.Pointer(&buf[0]))
page.flags = leafPageFlag
page.count = 2
// Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr))
nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2
nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4
// Write data for the nodes at the end.
data := (*[4096]byte)(unsafe.Pointer(&nodes[2]))
copy(data[:], []byte("barfooz"))
copy(data[7:], []byte("helloworldbye"))
// Deserialize page into a leaf.
n := &node{}
n.read(page)
// Check that there are two inodes with correct data.
if !n.isLeaf {
t.Fatal("expected leaf")
}
if len(n.inodes) != 2 {
t.Fatalf("exp=2; got=%d", len(n.inodes))
}
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
}
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
}
}
// Ensure that a node can serialize into a leaf page.
func TestNode_write_LeafPage(t *testing.T) {
// Create a node.
n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0)
n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0)
n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0)
// Write it to a page.
var buf [4096]byte
p := (*page)(unsafe.Pointer(&buf[0]))
n.write(p)
// Read the page back in.
n2 := &node{}
n2.read(p)
// Check that the two pages are the same.
if len(n2.inodes) != 3 {
t.Fatalf("exp=3; got=%d", len(n2.inodes))
}
if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
}
if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
}
if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
}
}
// Ensure that a node can split into appropriate subgroups.
func TestNode_split(t *testing.T) {
// Create a node.
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
// Split between 2 & 3.
n.split(100)
var parent = n.parent
if len(parent.children) != 2 {
t.Fatalf("exp=2; got=%d", len(parent.children))
}
if len(parent.children[0].inodes) != 2 {
t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
}
if len(parent.children[1].inodes) != 3 {
t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
}
}
// Ensure that a page with the minimum number of inodes just returns a single node.
func TestNode_split_MinKeys(t *testing.T) {
// Create a node.
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
// Split.
n.split(20)
if n.parent != nil {
t.Fatalf("expected nil parent")
}
}
// Ensure that a node that has keys that all fit on a page just returns one leaf.
func TestNode_split_SinglePage(t *testing.T) {
// Create a node.
n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
// Split.
n.split(4096)
if n.parent != nil {
t.Fatalf("expected nil parent")
}
}

View File

@ -1,72 +0,0 @@
package bolt
import (
"reflect"
"sort"
"testing"
"testing/quick"
)
// Ensure that the page type can be returned in human readable format.
func TestPage_typ(t *testing.T) {
if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
t.Fatalf("exp=branch; got=%v", typ)
}
if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
t.Fatalf("exp=leaf; got=%v", typ)
}
if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
t.Fatalf("exp=meta; got=%v", typ)
}
if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
t.Fatalf("exp=freelist; got=%v", typ)
}
if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
t.Fatalf("exp=unknown<4e20>; got=%v", typ)
}
}
// Ensure that the hexdump debugging function doesn't blow up.
func TestPage_dump(t *testing.T) {
(&page{id: 256}).hexdump(16)
}
func TestPgids_merge(t *testing.T) {
a := pgids{4, 5, 6, 10, 11, 12, 13, 27}
b := pgids{1, 3, 8, 9, 25, 30}
c := a.merge(b)
if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) {
t.Errorf("mismatch: %v", c)
}
a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36}
b = pgids{8, 9, 25, 30}
c = a.merge(b)
if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) {
t.Errorf("mismatch: %v", c)
}
}
func TestPgids_merge_quick(t *testing.T) {
if err := quick.Check(func(a, b pgids) bool {
// Sort incoming lists.
sort.Sort(a)
sort.Sort(b)
// Merge the two lists together.
got := a.merge(b)
// The expected value should be the two lists combined and sorted.
exp := append(a, b...)
sort.Sort(exp)
if !reflect.DeepEqual(exp, got) {
t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got)
return false
}
return true
}, nil); err != nil {
t.Fatal(err)
}
}

View File

@ -1,79 +0,0 @@
package bolt_test
import (
"bytes"
"flag"
"fmt"
"math/rand"
"os"
"reflect"
"testing/quick"
"time"
)
// testing/quick defaults to 5 iterations and a random seed.
// You can override these settings from the command line:
//
// -quick.count The number of iterations to perform.
// -quick.seed The seed to use for randomizing.
// -quick.maxitems The maximum number of items to insert into a DB.
// -quick.maxksize The maximum size of a key.
// -quick.maxvsize The maximum size of a value.
//
var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int
func init() {
flag.IntVar(&qcount, "quick.count", 5, "")
flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "")
flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "")
flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "")
flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "")
flag.Parse()
fmt.Fprintln(os.Stderr, "seed:", qseed)
fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
}
func qconfig() *quick.Config {
return &quick.Config{
MaxCount: qcount,
Rand: rand.New(rand.NewSource(int64(qseed))),
}
}
type testdata []testdataitem
func (t testdata) Len() int { return len(t) }
func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 }
func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
n := rand.Intn(qmaxitems-1) + 1
items := make(testdata, n)
for i := 0; i < n; i++ {
item := &items[i]
item.Key = randByteSlice(rand, 1, qmaxksize)
item.Value = randByteSlice(rand, 0, qmaxvsize)
}
return reflect.ValueOf(items)
}
type revtestdata []testdataitem
func (t revtestdata) Len() int { return len(t) }
func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 }
type testdataitem struct {
Key []byte
Value []byte
}
func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte {
n := rand.Intn(maxSize-minSize) + minSize
b := make([]byte, n)
for i := 0; i < n; i++ {
b[i] = byte(rand.Intn(255))
}
return b
}

View File

@ -1,327 +0,0 @@
package bolt_test
import (
"bytes"
"fmt"
"math/rand"
"sync"
"testing"
"github.com/boltdb/bolt"
)
func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) }
func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) }
func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) }
func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) }
func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) }
func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) }
func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) }
func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) }
func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) }
func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) }
// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety.
func testSimulate(t *testing.T, threadCount, parallelism int) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
rand.Seed(int64(qseed))
// A list of operations that readers and writers can perform.
var readerHandlers = []simulateHandler{simulateGetHandler}
var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
var versions = make(map[int]*QuickDB)
versions[1] = NewQuickDB()
db := NewTestDB()
defer db.Close()
var mutex sync.Mutex
// Run n threads in parallel, each with their own operation.
var wg sync.WaitGroup
var threads = make(chan bool, parallelism)
var i int
for {
threads <- true
wg.Add(1)
writable := ((rand.Int() % 100) < 20) // 20% writers
// Choose an operation to execute.
var handler simulateHandler
if writable {
handler = writerHandlers[rand.Intn(len(writerHandlers))]
} else {
handler = readerHandlers[rand.Intn(len(readerHandlers))]
}
// Execute a thread for the given operation.
go func(writable bool, handler simulateHandler) {
defer wg.Done()
// Start transaction.
tx, err := db.Begin(writable)
if err != nil {
t.Fatal("tx begin: ", err)
}
// Obtain current state of the dataset.
mutex.Lock()
var qdb = versions[tx.ID()]
if writable {
qdb = versions[tx.ID()-1].Copy()
}
mutex.Unlock()
// Make sure we commit/rollback the tx at the end and update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.ID()] = qdb
mutex.Unlock()
ok(t, tx.Commit())
}()
} else {
defer tx.Rollback()
}
// Ignore operation if we don't have data yet.
if qdb == nil {
return
}
// Execute handler.
handler(tx, qdb)
// Release a thread back to the scheduling loop.
<-threads
}(writable, handler)
i++
if i > threadCount {
break
}
}
// Wait until all threads are done.
wg.Wait()
}
type simulateHandler func(tx *bolt.Tx, qdb *QuickDB)
// Retrieves a key from the database and verifies that it is what is expected.
func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) {
// Randomly retrieve an existing exist.
keys := qdb.Rand()
if len(keys) == 0 {
return
}
// Retrieve root bucket.
b := tx.Bucket(keys[0])
if b == nil {
panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4)))
}
// Drill into nested buckets.
for _, key := range keys[1 : len(keys)-1] {
b = b.Bucket(key)
if b == nil {
panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key))
}
}
// Verify key/value on the final bucket.
expected := qdb.Get(keys)
actual := b.Get(keys[len(keys)-1])
if !bytes.Equal(actual, expected) {
fmt.Println("=== EXPECTED ===")
fmt.Println(expected)
fmt.Println("=== ACTUAL ===")
fmt.Println(actual)
fmt.Println("=== END ===")
panic("value mismatch")
}
}
// Inserts a key into the database.
func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) {
var err error
keys, value := randKeys(), randValue()
// Retrieve root bucket.
b := tx.Bucket(keys[0])
if b == nil {
b, err = tx.CreateBucket(keys[0])
if err != nil {
panic("create bucket: " + err.Error())
}
}
// Create nested buckets, if necessary.
for _, key := range keys[1 : len(keys)-1] {
child := b.Bucket(key)
if child != nil {
b = child
} else {
b, err = b.CreateBucket(key)
if err != nil {
panic("create bucket: " + err.Error())
}
}
}
// Insert into database.
if err := b.Put(keys[len(keys)-1], value); err != nil {
panic("put: " + err.Error())
}
// Insert into in-memory database.
qdb.Put(keys, value)
}
// QuickDB is an in-memory database that replicates the functionality of the
// Bolt DB type except that it is entirely in-memory. It is meant for testing
// that the Bolt database is consistent.
type QuickDB struct {
sync.RWMutex
m map[string]interface{}
}
// NewQuickDB returns an instance of QuickDB.
func NewQuickDB() *QuickDB {
return &QuickDB{m: make(map[string]interface{})}
}
// Get retrieves the value at a key path.
func (db *QuickDB) Get(keys [][]byte) []byte {
db.RLock()
defer db.RUnlock()
m := db.m
for _, key := range keys[:len(keys)-1] {
value := m[string(key)]
if value == nil {
return nil
}
switch value := value.(type) {
case map[string]interface{}:
m = value
case []byte:
return nil
}
}
// Only return if it's a simple value.
if value, ok := m[string(keys[len(keys)-1])].([]byte); ok {
return value
}
return nil
}
// Put inserts a value into a key path.
func (db *QuickDB) Put(keys [][]byte, value []byte) {
db.Lock()
defer db.Unlock()
// Build buckets all the way down the key path.
m := db.m
for _, key := range keys[:len(keys)-1] {
if _, ok := m[string(key)].([]byte); ok {
return // Keypath intersects with a simple value. Do nothing.
}
if m[string(key)] == nil {
m[string(key)] = make(map[string]interface{})
}
m = m[string(key)].(map[string]interface{})
}
// Insert value into the last key.
m[string(keys[len(keys)-1])] = value
}
// Rand returns a random key path that points to a simple value.
func (db *QuickDB) Rand() [][]byte {
db.RLock()
defer db.RUnlock()
if len(db.m) == 0 {
return nil
}
var keys [][]byte
db.rand(db.m, &keys)
return keys
}
func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) {
i, index := 0, rand.Intn(len(m))
for k, v := range m {
if i == index {
*keys = append(*keys, []byte(k))
if v, ok := v.(map[string]interface{}); ok {
db.rand(v, keys)
}
return
}
i++
}
panic("quickdb rand: out-of-range")
}
// Copy copies the entire database.
func (db *QuickDB) Copy() *QuickDB {
db.RLock()
defer db.RUnlock()
return &QuickDB{m: db.copy(db.m)}
}
func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} {
clone := make(map[string]interface{}, len(m))
for k, v := range m {
switch v := v.(type) {
case map[string]interface{}:
clone[k] = db.copy(v)
default:
clone[k] = v
}
}
return clone
}
func randKey() []byte {
var min, max = 1, 1024
n := rand.Intn(max-min) + min
b := make([]byte, n)
for i := 0; i < n; i++ {
b[i] = byte(rand.Intn(255))
}
return b
}
func randKeys() [][]byte {
var keys [][]byte
var count = rand.Intn(2) + 2
for i := 0; i < count; i++ {
keys = append(keys, randKey())
}
return keys
}
func randValue() []byte {
n := rand.Intn(8192)
b := make([]byte, n)
for i := 0; i < n; i++ {
b[i] = byte(rand.Intn(255))
}
return b
}

View File

@ -1,456 +0,0 @@
package bolt_test
import (
"errors"
"fmt"
"os"
"testing"
"github.com/boltdb/bolt"
)
// Ensure that committing a closed transaction returns an error.
func TestTx_Commit_Closed(t *testing.T) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("foo"))
ok(t, tx.Commit())
equals(t, tx.Commit(), bolt.ErrTxClosed)
}
// Ensure that rolling back a closed transaction returns an error.
func TestTx_Rollback_Closed(t *testing.T) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
ok(t, tx.Rollback())
equals(t, tx.Rollback(), bolt.ErrTxClosed)
}
// Ensure that committing a read-only transaction returns an error.
func TestTx_Commit_ReadOnly(t *testing.T) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(false)
equals(t, tx.Commit(), bolt.ErrTxNotWritable)
}
// Ensure that a transaction can retrieve a cursor on the root bucket.
func TestTx_Cursor(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.CreateBucket([]byte("woojits"))
c := tx.Cursor()
k, v := c.First()
equals(t, "widgets", string(k))
assert(t, v == nil, "")
k, v = c.Next()
equals(t, "woojits", string(k))
assert(t, v == nil, "")
k, v = c.Next()
assert(t, k == nil, "")
assert(t, v == nil, "")
return nil
})
}
// Ensure that creating a bucket with a read-only transaction returns an error.
func TestTx_CreateBucket_ReadOnly(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.View(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("foo"))
assert(t, b == nil, "")
equals(t, bolt.ErrTxNotWritable, err)
return nil
})
}
// Ensure that creating a bucket on a closed transaction returns an error.
func TestTx_CreateBucket_Closed(t *testing.T) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.Commit()
b, err := tx.CreateBucket([]byte("foo"))
assert(t, b == nil, "")
equals(t, bolt.ErrTxClosed, err)
}
// Ensure that a Tx can retrieve a bucket.
func TestTx_Bucket(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
assert(t, b != nil, "")
return nil
})
}
// Ensure that a Tx retrieving a non-existent key returns nil.
func TestTx_Get_Missing(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
assert(t, value == nil, "")
return nil
})
}
// Ensure that a bucket can be created and retrieved.
func TestTx_CreateBucket(t *testing.T) {
db := NewTestDB()
defer db.Close()
// Create a bucket.
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert(t, b != nil, "")
ok(t, err)
return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
assert(t, b != nil, "")
return nil
})
}
// Ensure that a bucket can be created if it doesn't already exist.
func TestTx_CreateBucketIfNotExists(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
assert(t, b != nil, "")
ok(t, err)
b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
assert(t, b != nil, "")
ok(t, err)
b, err = tx.CreateBucketIfNotExists([]byte{})
assert(t, b == nil, "")
equals(t, bolt.ErrBucketNameRequired, err)
b, err = tx.CreateBucketIfNotExists(nil)
assert(t, b == nil, "")
equals(t, bolt.ErrBucketNameRequired, err)
return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
assert(t, b != nil, "")
return nil
})
}
// Ensure that a bucket cannot be created twice.
func TestTx_CreateBucket_Exists(t *testing.T) {
db := NewTestDB()
defer db.Close()
// Create a bucket.
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert(t, b != nil, "")
ok(t, err)
return nil
})
// Create the same bucket again.
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert(t, b == nil, "")
equals(t, bolt.ErrBucketExists, err)
return nil
})
}
// Ensure that a bucket is created with a non-blank name.
func TestTx_CreateBucket_NameRequired(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket(nil)
assert(t, b == nil, "")
equals(t, bolt.ErrBucketNameRequired, err)
return nil
})
}
// Ensure that a bucket can be deleted.
func TestTx_DeleteBucket(t *testing.T) {
db := NewTestDB()
defer db.Close()
// Create a bucket and add a value.
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
// Delete the bucket and make sure we can't get the value.
db.Update(func(tx *bolt.Tx) error {
ok(t, tx.DeleteBucket([]byte("widgets")))
assert(t, tx.Bucket([]byte("widgets")) == nil, "")
return nil
})
db.Update(func(tx *bolt.Tx) error {
// Create the bucket again and make sure there's not a phantom value.
b, err := tx.CreateBucket([]byte("widgets"))
assert(t, b != nil, "")
ok(t, err)
assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
return nil
})
}
// Ensure that deleting a bucket on a closed transaction returns an error.
func TestTx_DeleteBucket_Closed(t *testing.T) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.Commit()
equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed)
}
// Ensure that deleting a bucket with a read-only transaction returns an error.
func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.View(func(tx *bolt.Tx) error {
equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable)
return nil
})
}
// Ensure that nothing happens when deleting a bucket that doesn't exist.
func TestTx_DeleteBucket_NotFound(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
return nil
})
}
// Ensure that no error is returned when a tx.ForEach function does not return
// an error.
func TestTx_ForEach_NoError(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
equals(t, nil, tx.ForEach(func(name []byte, b *bolt.Bucket) error {
return nil
}))
return nil
})
}
// Ensure that an error is returned when a tx.ForEach function returns an error.
func TestTx_ForEach_WithError(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
err := errors.New("foo")
equals(t, err, tx.ForEach(func(name []byte, b *bolt.Bucket) error {
return err
}))
return nil
})
}
// Ensure that Tx commit handlers are called after a transaction successfully commits.
func TestTx_OnCommit(t *testing.T) {
var x int
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
equals(t, 3, x)
}
// Ensure that Tx commit handlers are NOT called after a transaction rolls back.
func TestTx_OnCommit_Rollback(t *testing.T) {
var x int
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
tx.CreateBucket([]byte("widgets"))
return errors.New("rollback this commit")
})
equals(t, 0, x)
}
// Ensure that the database can be copied to a file path.
func TestTx_CopyFile(t *testing.T) {
db := NewTestDB()
defer db.Close()
var dest = tempfile()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) }))
db2, err := bolt.Open(dest, 0600, nil)
ok(t, err)
defer db2.Close()
db2.View(func(tx *bolt.Tx) error {
equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
}
type failWriterError struct{}
func (failWriterError) Error() string {
return "error injected for tests"
}
type failWriter struct {
// fail after this many bytes
After int
}
func (f *failWriter) Write(p []byte) (n int, err error) {
n = len(p)
if n > f.After {
n = f.After
err = failWriterError{}
}
f.After -= n
return n, err
}
// Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Meta(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) })
equals(t, err.Error(), "meta copy: error injected for tests")
}
// Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Normal(t *testing.T) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) })
equals(t, err.Error(), "error injected for tests")
}
func ExampleTx_Rollback() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Create a bucket.
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
// Set a value for a key.
db.Update(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
})
// Update the key but rollback the transaction so it never saves.
tx, _ := db.Begin(true)
b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("baz"))
tx.Rollback()
// Ensure that our original value is still set.
db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' is still: %s\n", value)
return nil
})
// Output:
// The value for 'foo' is still: bar
}
func ExampleTx_CopyFile() {
// Open the database.
db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path())
defer db.Close()
// Create a bucket and a key.
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
// Copy the database to another file.
toFile := tempfile()
db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) })
defer os.Remove(toFile)
// Open the cloned database.
db2, _ := bolt.Open(toFile, 0666, nil)
defer db2.Close()
// Ensure that the key exists in the copy.
db2.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' in the clone is: %s\n", value)
return nil
})
// Output:
// The value for 'foo' in the clone is: bar
}

View File

@ -17,8 +17,8 @@ import (
"os"
"path"
"github.com/boltdb/bolt"
"github.com/juju/errors"
"github.com/ngaut/bolt"
"github.com/ngaut/log"
"github.com/pingcap/tidb/store/localstore/engine"
)
@ -53,12 +53,12 @@ func (d *db) Get(key []byte) ([]byte, error) {
return value, errors.Trace(err)
}
func (d *db) GetSnapshot() (engine.Snapshot, error) {
func (d *db) Seek(startKey []byte) (engine.Iterator, error) {
tx, err := d.DB.Begin(false)
if err != nil {
return nil, err
return nil, errors.Trace(err)
}
return &snapshot{tx}, nil
return &iterator{tx: tx, key: startKey}, nil
}
func (d *db) NewBatch() engine.Batch {
@ -95,33 +95,6 @@ func (d *db) Close() error {
return d.DB.Close()
}
type snapshot struct {
*bolt.Tx
}
func (s *snapshot) Get(key []byte) ([]byte, error) {
b := s.Tx.Bucket(bucketName)
v := b.Get(key)
if v == nil {
return nil, nil
}
value := append([]byte(nil), v...)
return value, nil
}
func (s *snapshot) NewIterator(startKey []byte) engine.Iterator {
return &iterator{tx: s.Tx, key: startKey}
}
func (s *snapshot) Release() {
err := s.Tx.Rollback()
if err != nil {
log.Errorf("commit err %v", err)
}
}
type iterator struct {
tx *bolt.Tx
*bolt.Cursor
@ -154,7 +127,10 @@ func (i *iterator) Value() []byte {
}
func (i *iterator) Release() {
err := i.tx.Rollback()
if err != nil {
log.Errorf("commit err %v", err)
}
}
type write struct {

View File

@ -17,7 +17,7 @@ import (
"os"
"testing"
"github.com/ngaut/bolt"
"github.com/boltdb/bolt"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/store/localstore/engine"
)
@ -114,18 +114,8 @@ func (s *testSuite) TestDB(c *C) {
c.Assert(err, IsNil)
c.Assert(v, DeepEquals, []byte("1"))
snap, err := db.GetSnapshot()
iter, err := db.Seek(nil)
c.Assert(err, IsNil)
v, err = snap.Get([]byte("a"))
c.Assert(err, IsNil)
c.Assert(v, DeepEquals, []byte("1"))
v, err = snap.Get([]byte("c"))
c.Assert(err, IsNil)
c.Assert(v, IsNil)
iter := snap.NewIterator(nil)
c.Assert(iter.Next(), Equals, true)
c.Assert(iter.Key(), DeepEquals, []byte("a"))
c.Assert(iter.Next(), Equals, true)
@ -133,11 +123,11 @@ func (s *testSuite) TestDB(c *C) {
c.Assert(iter.Next(), Equals, false)
iter.Release()
iter = snap.NewIterator([]byte("b"))
iter, err = db.Seek([]byte("b"))
c.Assert(err, IsNil)
c.Assert(iter.Next(), Equals, true)
c.Assert(iter.Key(), DeepEquals, []byte("b"))
c.Assert(iter.Value(), DeepEquals, []byte("2"))
c.Assert(iter.Next(), Equals, false)
snap.Release()
iter.Release()
}

View File

@ -25,8 +25,9 @@ type DB interface {
// Get the associated value with key
// return nil, nil if no value found
Get(key []byte) ([]byte, error)
// Getsnapshot generates a snapshot for current DB
GetSnapshot() (Snapshot, error)
// Seek seeks the iterator to the first key in the engine which
// is >= startKey in byte-order.
Seek(startKey []byte) (Iterator, error)
// NewBatch creates a Batch for writing
NewBatch() Batch
// Commit writes the changed data in Batch
@ -35,18 +36,6 @@ type DB interface {
Close() error
}
// Snapshot is the interface for local storage
type Snapshot interface {
// Get the associated value with key in this snapshot
// return nil, nil if no value found
Get(key []byte) ([]byte, error)
// NewIterator creates an iterator, seeks the iterator to
// the first key >= startKey.
NewIterator(startKey []byte) Iterator
// Release releases the snapshot
Release()
}
// Iterator is the interface for local storage
type Iterator interface {
// Next moves the iterator to the next key/value pair,

View File

@ -25,9 +25,8 @@ import (
)
var (
_ engine.DB = (*db)(nil)
_ engine.Batch = (*leveldb.Batch)(nil)
_ engine.Snapshot = (*snapshot)(nil)
_ engine.DB = (*db)(nil)
_ engine.Batch = (*leveldb.Batch)(nil)
)
var (
@ -51,19 +50,15 @@ func (d *db) Get(key []byte) ([]byte, error) {
return v, err
}
func (d *db) GetSnapshot() (engine.Snapshot, error) {
s, err := d.DB.GetSnapshot()
if err != nil {
return nil, err
}
return &snapshot{s}, nil
}
func (d *db) NewBatch() engine.Batch {
b := p.Get().(*leveldb.Batch)
return b
}
func (d *db) Seek(startKey []byte) (engine.Iterator, error) {
return d.DB.NewIterator(&util.Range{Start: startKey}, nil), nil
}
func (d *db) Commit(b engine.Batch) error {
batch, ok := b.(*leveldb.Batch)
if !ok {
@ -79,28 +74,6 @@ func (d *db) Close() error {
return d.DB.Close()
}
type snapshot struct {
*leveldb.Snapshot
}
func (s *snapshot) Get(key []byte) ([]byte, error) {
v, err := s.Snapshot.Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, nil
}
return v, err
}
func (s *snapshot) NewIterator(startKey []byte) engine.Iterator {
it := s.Snapshot.NewIterator(&util.Range{Start: startKey}, nil)
return it
}
func (s *snapshot) Release() {
s.Snapshot.Release()
}
// Driver implements engine Driver.
type Driver struct {
}

View File

@ -62,27 +62,18 @@ func (s *testSuite) TestDB(c *C) {
c.Assert(err, IsNil)
c.Assert(v, IsNil)
snap, err := db.GetSnapshot()
c.Assert(err, IsNil)
b = db.NewBatch()
b.Put([]byte("a"), []byte("2"))
err = db.Commit(b)
c.Assert(err, IsNil)
v, err = snap.Get([]byte("a"))
c.Assert(err, IsNil)
c.Assert(v, DeepEquals, []byte("1"))
v, err = db.Get([]byte("a"))
c.Assert(err, IsNil)
c.Assert(v, DeepEquals, []byte("2"))
v, err = snap.Get([]byte("c"))
iter, err := db.Seek(nil)
c.Assert(err, IsNil)
c.Assert(v, IsNil)
iter := snap.NewIterator(nil)
c.Assert(iter.Next(), Equals, true)
c.Assert(iter.Key(), DeepEquals, []byte("a"))
c.Assert(iter.Next(), Equals, true)
@ -90,6 +81,4 @@ func (s *testSuite) TestDB(c *C) {
c.Assert(iter.Next(), Equals, false)
iter.Release()
snap.Release()
}

View File

@ -96,18 +96,14 @@ func (s *dbStore) UUID() string {
}
func (s *dbStore) GetSnapshot() (kv.MvccSnapshot, error) {
engineSnapshot, err := s.db.GetSnapshot()
if err != nil {
return nil, errors.Trace(err)
}
currentVer, err := globalVersionProvider.CurrentVersion()
if err != nil {
return nil, errors.Trace(err)
}
// dbSnapshot implements MvccSnapshot interface.
return &dbSnapshot{
Snapshot: engineSnapshot,
version: currentVer,
db: s.db,
version: currentVer,
}, nil
}
@ -116,11 +112,6 @@ func (s *dbStore) Begin() (kv.Transaction, error) {
s.mu.Lock()
defer s.mu.Unlock()
snapshot, err := s.db.GetSnapshot()
if err != nil {
return nil, errors.Trace(err)
}
beginVer, err := globalVersionProvider.CurrentVersion()
if err != nil {
return nil, err
@ -135,8 +126,8 @@ func (s *dbStore) Begin() (kv.Transaction, error) {
}
log.Debugf("Begin txn:%d", txn.tID)
txn.UnionStore, err = kv.NewUnionStore(&dbSnapshot{
Snapshot: snapshot,
version: beginVer,
db: s.db,
version: beginVer,
})
if err != nil {
return nil, errors.Trace(err)

View File

@ -53,9 +53,7 @@ func (t *testMvccSuite) TestMvccEncode(c *C) {
func (t *testMvccSuite) scanRawEngine(c *C, f func([]byte, []byte)) {
// scan raw db
s, err := t.s.(*dbStore).db.GetSnapshot()
c.Assert(err, IsNil)
it := s.NewIterator(nil)
it, _ := t.s.(*dbStore).db.Seek(nil)
for it.Next() {
f(it.Key(), it.Value())
}

View File

@ -31,7 +31,7 @@ var (
)
type dbSnapshot struct {
engine.Snapshot
db engine.DB
version kv.Version // transaction begin version
}
@ -54,7 +54,10 @@ func (s *dbSnapshot) MvccGet(k kv.Key, ver kv.Version) ([]byte, error) {
endKey := MvccEncodeVersionKey(k, kv.MinVersion)
// get raw iterator
it := s.Snapshot.NewIterator(startKey)
it, err := s.db.Seek(startKey)
if err != nil {
return nil, errors.Trace(err)
}
defer it.Release()
var rawKey []byte
@ -100,12 +103,7 @@ func (s *dbSnapshot) MvccRelease() {
s.Release()
}
func (s *dbSnapshot) Release() {
if s.Snapshot != nil {
s.Snapshot.Release()
s.Snapshot = nil
}
}
func (s *dbSnapshot) Release() {}
type dbIter struct {
s *dbSnapshot
@ -134,7 +132,10 @@ func (it *dbIter) Next(fn kv.FnKeyCmp) (kv.Iterator, error) {
var retErr error
var engineIter engine.Iterator
for {
engineIter = it.s.Snapshot.NewIterator(encKey)
engineIter, retErr = it.s.db.Seek(encKey)
if retErr != nil {
return nil, errors.Trace(retErr)
}
// Check if overflow
if !engineIter.Next() {
it.valid = false