kv: update goleveldb/memdb repo which remove unnecessary rand.NewSource (#1410)
This commit is contained in:
48
_vendor/Godeps/Godeps.json
generated
48
_vendor/Godeps/Godeps.json
generated
@ -168,52 +168,52 @@
|
||||
"Rev": "1ce93efbc8f9c568886b2ef85ce305b2217b3de3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/cache",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/cache",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/comparer",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/errors",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/filter",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/filter",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/iterator",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/journal",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/journal",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/memdb",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/opt",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/storage",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/storage",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/table",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/table",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/util",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"ImportPath": "github.com/pingcap/goleveldb/leveldb/util",
|
||||
"Rev": "0a4d8ac1a625fd94f0c561da420b71a12e88ef12"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/twinj/uuid",
|
||||
|
||||
11
_vendor/vendor/github.com/pingcap/goleveldb/.travis.yml
generated
vendored
Normal file
11
_vendor/vendor/github.com/pingcap/goleveldb/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -timeout 1h ./...
|
||||
- go test -timeout 30m -race -run "TestDB_(Concurrent|GoleveldbIssue74)" ./leveldb
|
||||
105
_vendor/vendor/github.com/pingcap/goleveldb/README.md
generated
vendored
Normal file
105
_vendor/vendor/github.com/pingcap/goleveldb/README.md
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
This is an implementation of the [LevelDB key/value database](http:code.google.com/p/leveldb) in the [Go programming language](http:golang.org).
|
||||
|
||||
[](https://travis-ci.org/syndtr/goleveldb)
|
||||
|
||||
Installation
|
||||
-----------
|
||||
|
||||
go get github.com/pingcap/goleveldb/leveldb
|
||||
|
||||
Requirements
|
||||
-----------
|
||||
|
||||
* Need at least `go1.4` or newer.
|
||||
|
||||
Usage
|
||||
-----------
|
||||
|
||||
Create or open a database:
|
||||
```go
|
||||
db, err := leveldb.OpenFile("path/to/db", nil)
|
||||
...
|
||||
defer db.Close()
|
||||
...
|
||||
```
|
||||
Read or modify the database content:
|
||||
```go
|
||||
// Remember that the contents of the returned slice should not be modified.
|
||||
data, err := db.Get([]byte("key"), nil)
|
||||
...
|
||||
err = db.Put([]byte("key"), []byte("value"), nil)
|
||||
...
|
||||
err = db.Delete([]byte("key"), nil)
|
||||
...
|
||||
```
|
||||
|
||||
Iterate over database content:
|
||||
```go
|
||||
iter := db.NewIterator(nil, nil)
|
||||
for iter.Next() {
|
||||
// Remember that the contents of the returned slice should not be modified, and
|
||||
// only valid until the next call to Next.
|
||||
key := iter.Key()
|
||||
value := iter.Value()
|
||||
...
|
||||
}
|
||||
iter.Release()
|
||||
err = iter.Error()
|
||||
...
|
||||
```
|
||||
Seek-then-Iterate:
|
||||
```go
|
||||
iter := db.NewIterator(nil, nil)
|
||||
for ok := iter.Seek(key); ok; ok = iter.Next() {
|
||||
// Use key/value.
|
||||
...
|
||||
}
|
||||
iter.Release()
|
||||
err = iter.Error()
|
||||
...
|
||||
```
|
||||
Iterate over subset of database content:
|
||||
```go
|
||||
iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
|
||||
for iter.Next() {
|
||||
// Use key/value.
|
||||
...
|
||||
}
|
||||
iter.Release()
|
||||
err = iter.Error()
|
||||
...
|
||||
```
|
||||
Iterate over subset of database content with a particular prefix:
|
||||
```go
|
||||
iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
|
||||
for iter.Next() {
|
||||
// Use key/value.
|
||||
...
|
||||
}
|
||||
iter.Release()
|
||||
err = iter.Error()
|
||||
...
|
||||
```
|
||||
Batch writes:
|
||||
```go
|
||||
batch := new(leveldb.Batch)
|
||||
batch.Put([]byte("foo"), []byte("value"))
|
||||
batch.Put([]byte("bar"), []byte("another value"))
|
||||
batch.Delete([]byte("baz"))
|
||||
err = db.Write(batch, nil)
|
||||
...
|
||||
```
|
||||
Use bloom filter:
|
||||
```go
|
||||
o := &opt.Options{
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
}
|
||||
db, err := leveldb.OpenFile("path/to/db", o)
|
||||
...
|
||||
defer db.Close()
|
||||
...
|
||||
```
|
||||
Documentation
|
||||
-----------
|
||||
|
||||
You can read package documentation [here](http:godoc.org/github.com/pingcap/goleveldb).
|
||||
@ -10,10 +10,12 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/memdb"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
// ErrBatchCorrupted records reason of batch corruption.
|
||||
type ErrBatchCorrupted struct {
|
||||
Reason string
|
||||
}
|
||||
@ -23,7 +25,7 @@ func (e *ErrBatchCorrupted) Error() string {
|
||||
}
|
||||
|
||||
func newErrBatchCorrupted(reason string) error {
|
||||
return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
|
||||
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
|
||||
}
|
||||
|
||||
const (
|
||||
@ -31,6 +33,7 @@ const (
|
||||
batchGrowRec = 3000
|
||||
)
|
||||
|
||||
// BatchReplay wraps basic batch operations.
|
||||
type BatchReplay interface {
|
||||
Put(key, value []byte)
|
||||
Delete(key []byte)
|
||||
@ -67,20 +70,20 @@ func (b *Batch) grow(n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batch) appendRec(kt kType, key, value []byte) {
|
||||
func (b *Batch) appendRec(kt keyType, key, value []byte) {
|
||||
n := 1 + binary.MaxVarintLen32 + len(key)
|
||||
if kt == ktVal {
|
||||
if kt == keyTypeVal {
|
||||
n += binary.MaxVarintLen32 + len(value)
|
||||
}
|
||||
b.grow(n)
|
||||
off := len(b.data)
|
||||
data := b.data[:off+n]
|
||||
data[off] = byte(kt)
|
||||
off += 1
|
||||
off++
|
||||
off += binary.PutUvarint(data[off:], uint64(len(key)))
|
||||
copy(data[off:], key)
|
||||
off += len(key)
|
||||
if kt == ktVal {
|
||||
if kt == keyTypeVal {
|
||||
off += binary.PutUvarint(data[off:], uint64(len(value)))
|
||||
copy(data[off:], value)
|
||||
off += len(value)
|
||||
@ -94,13 +97,13 @@ func (b *Batch) appendRec(kt kType, key, value []byte) {
|
||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||
// It is safe to modify the contents of the argument after Put returns.
|
||||
func (b *Batch) Put(key, value []byte) {
|
||||
b.appendRec(ktVal, key, value)
|
||||
b.appendRec(keyTypeVal, key, value)
|
||||
}
|
||||
|
||||
// Delete appends 'delete operation' of the given key to the batch.
|
||||
// It is safe to modify the contents of the argument after Delete returns.
|
||||
func (b *Batch) Delete(key []byte) {
|
||||
b.appendRec(ktDel, key, nil)
|
||||
b.appendRec(keyTypeDel, key, nil)
|
||||
}
|
||||
|
||||
// Dump dumps batch contents. The returned slice can be loaded into the
|
||||
@ -121,13 +124,14 @@ func (b *Batch) Load(data []byte) error {
|
||||
|
||||
// Replay replays batch contents.
|
||||
func (b *Batch) Replay(r BatchReplay) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
||||
switch kt {
|
||||
case ktVal:
|
||||
case keyTypeVal:
|
||||
r.Put(key, value)
|
||||
case ktDel:
|
||||
case keyTypeDel:
|
||||
r.Delete(key)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -154,6 +158,7 @@ func (b *Batch) append(p *Batch) {
|
||||
b.grow(len(p.data) - batchHdrLen)
|
||||
b.data = append(b.data, p.data[batchHdrLen:]...)
|
||||
b.rLen += p.rLen
|
||||
b.bLen += p.bLen
|
||||
}
|
||||
if p.sync {
|
||||
b.sync = true
|
||||
@ -193,18 +198,19 @@ func (b *Batch) decode(prevSeq uint64, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
|
||||
func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
|
||||
off := batchHdrLen
|
||||
for i := 0; i < b.rLen; i++ {
|
||||
if off >= len(b.data) {
|
||||
return newErrBatchCorrupted("invalid records length")
|
||||
}
|
||||
|
||||
kt := kType(b.data[off])
|
||||
if kt > ktVal {
|
||||
kt := keyType(b.data[off])
|
||||
if kt > keyTypeVal {
|
||||
panic(kt)
|
||||
return newErrBatchCorrupted("bad record: invalid type")
|
||||
}
|
||||
off += 1
|
||||
off++
|
||||
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
@ -214,7 +220,7 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error
|
||||
key := b.data[off : off+int(x)]
|
||||
off += int(x)
|
||||
var value []byte
|
||||
if kt == ktVal {
|
||||
if kt == keyTypeVal {
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.data) {
|
||||
@ -224,16 +230,19 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error
|
||||
off += int(x)
|
||||
}
|
||||
|
||||
f(i, kt, key, value)
|
||||
if err := f(i, kt, key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Put(ikey, value)
|
||||
var ikScratch []byte
|
||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
||||
ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
||||
return to.Put(ikScratch, value)
|
||||
})
|
||||
}
|
||||
|
||||
@ -245,8 +254,9 @@ func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) er
|
||||
}
|
||||
|
||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Delete(ikey)
|
||||
var ikScratch []byte
|
||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
||||
ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
||||
return to.Delete(ikScratch)
|
||||
})
|
||||
}
|
||||
123
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
123
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
type tbRec struct {
|
||||
kt keyType
|
||||
key, value []byte
|
||||
}
|
||||
|
||||
type testBatch struct {
|
||||
rec []*tbRec
|
||||
}
|
||||
|
||||
func (p *testBatch) Put(key, value []byte) {
|
||||
p.rec = append(p.rec, &tbRec{keyTypeVal, key, value})
|
||||
}
|
||||
|
||||
func (p *testBatch) Delete(key []byte) {
|
||||
p.rec = append(p.rec, &tbRec{keyTypeDel, key, nil})
|
||||
}
|
||||
|
||||
func compareBatch(t *testing.T, b1, b2 *Batch) {
|
||||
if b1.seq != b2.seq {
|
||||
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
|
||||
}
|
||||
if b1.Len() != b2.Len() {
|
||||
t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
|
||||
}
|
||||
p1, p2 := new(testBatch), new(testBatch)
|
||||
err := b1.Replay(p1)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 1: ", err)
|
||||
}
|
||||
err = b2.Replay(p2)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 2: ", err)
|
||||
}
|
||||
for i := range p1.rec {
|
||||
r1, r2 := p1.rec[i], p2.rec[i]
|
||||
if r1.kt != r2.kt {
|
||||
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
|
||||
}
|
||||
if !bytes.Equal(r1.key, r2.key) {
|
||||
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
|
||||
}
|
||||
if r1.kt == keyTypeVal {
|
||||
if !bytes.Equal(r1.value, r2.value) {
|
||||
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatch_EncodeDecode(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("k"), []byte(""))
|
||||
b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz"))
|
||||
b1.Delete([]byte("key10000"))
|
||||
b1.Delete([]byte("k"))
|
||||
buf := b1.encode()
|
||||
b2 := new(Batch)
|
||||
err := b2.decode(0, buf)
|
||||
if err != nil {
|
||||
t.Error("error when decoding batch: ", err)
|
||||
}
|
||||
compareBatch(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBatch_Append(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("foo"), []byte("foovalue"))
|
||||
b1.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a := new(Batch)
|
||||
b2a.seq = 10009
|
||||
b2a.Put([]byte("key1"), []byte("value1"))
|
||||
b2a.Put([]byte("key2"), []byte("value2"))
|
||||
b2a.Delete([]byte("key1"))
|
||||
b2b := new(Batch)
|
||||
b2b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b2b.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a.append(b2b)
|
||||
compareBatch(t, b1, b2a)
|
||||
if b1.size() != b2a.size() {
|
||||
t.Fatalf("invalid batch size want %d, got %d", b1.size(), b2a.size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatch_Size(t *testing.T) {
|
||||
b := new(Batch)
|
||||
for i := 0; i < 2; i++ {
|
||||
b.Put([]byte("key1"), []byte("value1"))
|
||||
b.Put([]byte("key2"), []byte("value2"))
|
||||
b.Delete([]byte("key1"))
|
||||
b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b.Put([]byte("bar"), []byte("barvalue"))
|
||||
mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0)
|
||||
b.memReplay(mem)
|
||||
if b.size() != mem.Size() {
|
||||
t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size())
|
||||
}
|
||||
b.Reset()
|
||||
}
|
||||
}
|
||||
509
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
509
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,509 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func randomString(r *rand.Rand, n int) []byte {
|
||||
b := new(bytes.Buffer)
|
||||
for i := 0; i < n; i++ {
|
||||
b.WriteByte(' ' + byte(r.Intn(95)))
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func compressibleStr(r *rand.Rand, frac float32, n int) []byte {
|
||||
nn := int(float32(n) * frac)
|
||||
rb := randomString(r, nn)
|
||||
b := make([]byte, 0, n+nn)
|
||||
for len(b) < n {
|
||||
b = append(b, rb...)
|
||||
}
|
||||
return b[:n]
|
||||
}
|
||||
|
||||
type valueGen struct {
|
||||
src []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func newValueGen(frac float32) *valueGen {
|
||||
v := new(valueGen)
|
||||
r := rand.New(rand.NewSource(301))
|
||||
v.src = make([]byte, 0, 1048576+100)
|
||||
for len(v.src) < 1048576 {
|
||||
v.src = append(v.src, compressibleStr(r, frac, 100)...)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *valueGen) get(n int) []byte {
|
||||
if v.pos+n > len(v.src) {
|
||||
v.pos = 0
|
||||
}
|
||||
v.pos += n
|
||||
return v.src[v.pos-n : v.pos]
|
||||
}
|
||||
|
||||
var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid()))
|
||||
|
||||
type dbBench struct {
|
||||
b *testing.B
|
||||
stor storage.Storage
|
||||
db *DB
|
||||
|
||||
o *opt.Options
|
||||
ro *opt.ReadOptions
|
||||
wo *opt.WriteOptions
|
||||
|
||||
keys, values [][]byte
|
||||
}
|
||||
|
||||
func openDBBench(b *testing.B, noCompress bool) *dbBench {
|
||||
_, err := os.Stat(benchDB)
|
||||
if err == nil {
|
||||
err = os.RemoveAll(benchDB)
|
||||
if err != nil {
|
||||
b.Fatal("cannot remove old db: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
p := &dbBench{
|
||||
b: b,
|
||||
o: &opt.Options{},
|
||||
ro: &opt.ReadOptions{},
|
||||
wo: &opt.WriteOptions{},
|
||||
}
|
||||
p.stor, err = storage.OpenFile(benchDB, false)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open stor: ", err)
|
||||
}
|
||||
if noCompress {
|
||||
p.o.Compression = opt.NoCompression
|
||||
}
|
||||
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open db: ", err)
|
||||
}
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *dbBench) reopen() {
|
||||
p.db.Close()
|
||||
var err error
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
p.b.Fatal("Reopen: got error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) populate(n int) {
|
||||
p.keys, p.values = make([][]byte, n), make([][]byte, n)
|
||||
v := newValueGen(0.5)
|
||||
for i := range p.keys {
|
||||
p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) randomize() {
|
||||
m := len(p.keys)
|
||||
times := m * 2
|
||||
r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface))
|
||||
for n := 0; n < times; n++ {
|
||||
i, j := r1.Int()%m, r2.Int()%m
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
p.keys[i], p.keys[j] = p.keys[j], p.keys[i]
|
||||
p.values[i], p.values[j] = p.values[j], p.values[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) writes(perBatch int) {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
n := len(p.keys)
|
||||
m := n / perBatch
|
||||
if n%perBatch > 0 {
|
||||
m++
|
||||
}
|
||||
batches := make([]Batch, m)
|
||||
j := 0
|
||||
for i := range batches {
|
||||
first := true
|
||||
for ; j < n && ((j+1)%perBatch != 0 || first); j++ {
|
||||
first = false
|
||||
batches[i].Put(p.keys[j], p.values[j])
|
||||
}
|
||||
}
|
||||
runtime.GC()
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range batches {
|
||||
err := db.Write(&(batches[i]), p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) gc() {
|
||||
p.keys, p.values = nil, nil
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
func (p *dbBench) puts() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range p.keys {
|
||||
err := db.Put(p.keys[i], p.values[i], p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("put failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) fill() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
perBatch := 10000
|
||||
batch := new(Batch)
|
||||
for i, n := 0, len(p.keys); i < n; {
|
||||
first := true
|
||||
for ; i < n && ((i+1)%perBatch != 0 || first); i++ {
|
||||
first = false
|
||||
batch.Put(p.keys[i], p.values[i])
|
||||
}
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) gets() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
_, err := db.Get(p.keys[i], p.ro)
|
||||
if err != nil {
|
||||
b.Error("got error: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) seeks() {
|
||||
b := p.b
|
||||
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
if !iter.Seek(p.keys[i]) {
|
||||
b.Error("value not found for: ", string(p.keys[i]))
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) newIter() iterator.Iterator {
|
||||
iter := p.db.NewIterator(nil, p.ro)
|
||||
err := iter.Error()
|
||||
if err != nil {
|
||||
p.b.Fatal("cannot create iterator: ", err)
|
||||
}
|
||||
return iter
|
||||
}
|
||||
|
||||
func (p *dbBench) close() {
|
||||
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
|
||||
p.b.Log("Block pool stats: ", bp)
|
||||
}
|
||||
p.db.Close()
|
||||
p.stor.Close()
|
||||
os.RemoveAll(benchDB)
|
||||
p.db = nil
|
||||
p.keys = nil
|
||||
p.values = nil
|
||||
runtime.GC()
|
||||
runtime.GOMAXPROCS(1)
|
||||
}
|
||||
|
||||
func BenchmarkDBWrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatch(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatchUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandomSync(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.wo.Sync = true
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBPut(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.puts()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBRead(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadGC(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverse(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverseTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeek(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeekRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGet(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGetRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadConcurrent(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDBReadConcurrent2(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
var dir uint32
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
if atomic.AddUint32(&dir, 1)%2 == 0 {
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
} else {
|
||||
if pb.Next() && iter.Last() {
|
||||
for pb.Next() && iter.Prev() {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
29
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/cache/bench_test.go
generated
vendored
Normal file
29
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/cache/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkLRUCache(b *testing.B) {
|
||||
c := NewCache(NewLRU(10000))
|
||||
|
||||
b.SetParallelism(10)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for pb.Next() {
|
||||
key := uint64(r.Intn(1000000))
|
||||
c.Get(0, key, func() (int, Value) {
|
||||
return 1, key
|
||||
}).Release()
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -12,7 +12,7 @@ import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// Cacher provides interface to implements a caching functionality.
|
||||
@ -47,17 +47,21 @@ type Cacher interface {
|
||||
// so the the Release method will be called once object is released.
|
||||
type Value interface{}
|
||||
|
||||
type CacheGetter struct {
|
||||
// NamespaceGetter provides convenient wrapper for namespace.
|
||||
type NamespaceGetter struct {
|
||||
Cache *Cache
|
||||
NS uint64
|
||||
}
|
||||
|
||||
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
// Get simply calls Cache.Get() method.
|
||||
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
return g.Cache.Get(g.NS, key, setFunc)
|
||||
}
|
||||
|
||||
// The hash tables implementation is based on:
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
|
||||
// Kunlong Zhang, and Michael Spear.
|
||||
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
|
||||
const (
|
||||
mInitialSize = 1 << 4
|
||||
@ -610,10 +614,12 @@ func (n *Node) unrefLocked() {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle is a 'cache handle' of a 'cache node'.
|
||||
type Handle struct {
|
||||
n unsafe.Pointer // *Node
|
||||
}
|
||||
|
||||
// Value returns the value of the 'cache node'.
|
||||
func (h *Handle) Value() Value {
|
||||
n := (*Node)(atomic.LoadPointer(&h.n))
|
||||
if n != nil {
|
||||
@ -622,6 +628,8 @@ func (h *Handle) Value() Value {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release releases this 'cache handle'.
|
||||
// It is safe to call release multiple times.
|
||||
func (h *Handle) Release() {
|
||||
nPtr := atomic.LoadPointer(&h.n)
|
||||
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
|
||||
553
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/cache/cache_test.go
generated
vendored
Normal file
553
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/cache/cache_test.go
generated
vendored
Normal file
@ -0,0 +1,553 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type int32o int32
|
||||
|
||||
func (o *int32o) acquire() {
|
||||
if atomic.AddInt32((*int32)(o), 1) != 1 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *int32o) Release() {
|
||||
if atomic.AddInt32((*int32)(o), -1) != 0 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
type releaserFunc struct {
|
||||
fn func()
|
||||
value Value
|
||||
}
|
||||
|
||||
func (r releaserFunc) Release() {
|
||||
if r.fn != nil {
|
||||
r.fn()
|
||||
}
|
||||
}
|
||||
|
||||
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
|
||||
return c.Get(ns, key, func() (int, Value) {
|
||||
if relf != nil {
|
||||
return charge, releaserFunc{relf, value}
|
||||
}
|
||||
return charge, value
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacheMap(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
nsx := []struct {
|
||||
nobjects, nhandles, concurrent, repeat int
|
||||
}{
|
||||
{10000, 400, 50, 3},
|
||||
{100000, 1000, 100, 10},
|
||||
}
|
||||
|
||||
var (
|
||||
objects [][]int32o
|
||||
handles [][]unsafe.Pointer
|
||||
)
|
||||
|
||||
for _, x := range nsx {
|
||||
objects = append(objects, make([]int32o, x.nobjects))
|
||||
handles = append(handles, make([]unsafe.Pointer, x.nhandles))
|
||||
}
|
||||
|
||||
c := NewCache(nil)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
var done int32
|
||||
|
||||
for ns, x := range nsx {
|
||||
for i := 0; i < x.concurrent; i++ {
|
||||
wg.Add(1)
|
||||
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for j := len(objects) * repeat; j >= 0; j-- {
|
||||
key := uint64(r.Intn(len(objects)))
|
||||
h := c.Get(uint64(ns), key, func() (int, Value) {
|
||||
o := &objects[key]
|
||||
o.acquire()
|
||||
return 1, o
|
||||
})
|
||||
if v := h.Value().(*int32o); v != &objects[key] {
|
||||
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
|
||||
}
|
||||
if objects[key] != 1 {
|
||||
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
|
||||
}
|
||||
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}(ns, i, x.repeat, objects[ns], handles[ns])
|
||||
}
|
||||
|
||||
go func(handles []unsafe.Pointer) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
i := r.Intn(len(handles))
|
||||
h := (*Handle)(atomic.LoadPointer(&handles[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}(handles[ns])
|
||||
}
|
||||
|
||||
go func() {
|
||||
handles := make([]*Handle, 100000)
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
for i := range handles {
|
||||
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
|
||||
return 1, 1
|
||||
})
|
||||
}
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
atomic.StoreInt32(&done, 1)
|
||||
|
||||
for _, handles0 := range handles {
|
||||
for i := range handles0 {
|
||||
h := (*Handle)(atomic.LoadPointer(&handles0[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ns, objects0 := range objects {
|
||||
for i, o := range objects0 {
|
||||
if o != 0 {
|
||||
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NodesAndSize(t *testing.T) {
|
||||
c := NewCache(nil)
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 2, nil)
|
||||
set(c, 1, 1, 3, 3, nil)
|
||||
set(c, 2, 1, 4, 1, nil)
|
||||
if c.Nodes() != 4 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
|
||||
}
|
||||
if c.Size() != 7 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Capacity(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
if c.Capacity() != 10 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 2, nil).Release()
|
||||
set(c, 1, 1, 3, 3, nil).Release()
|
||||
set(c, 2, 1, 4, 1, nil).Release()
|
||||
set(c, 2, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 3, 6, 1, nil).Release()
|
||||
set(c, 2, 4, 7, 1, nil).Release()
|
||||
set(c, 2, 5, 8, 1, nil).Release()
|
||||
if c.Nodes() != 7 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
|
||||
}
|
||||
if c.Size() != 10 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
|
||||
}
|
||||
c.SetCapacity(9)
|
||||
if c.Capacity() != 9 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
|
||||
}
|
||||
if c.Nodes() != 6 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
|
||||
}
|
||||
if c.Size() != 8 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NilValue(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
h := c.Get(0, 0, func() (size int, value Value) {
|
||||
return 1, nil
|
||||
})
|
||||
if h != nil {
|
||||
t.Error("cache handle is non-nil")
|
||||
}
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_GetLatency(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
const (
|
||||
concurrentSet = 30
|
||||
concurrentGet = 3
|
||||
duration = 3 * time.Second
|
||||
delay = 3 * time.Millisecond
|
||||
maxkey = 100000
|
||||
)
|
||||
|
||||
var (
|
||||
set, getHit, getAll int32
|
||||
getMaxLatency, getDuration int64
|
||||
)
|
||||
|
||||
c := NewCache(NewLRU(5000))
|
||||
wg := &sync.WaitGroup{}
|
||||
until := time.Now().Add(duration)
|
||||
for i := 0; i < concurrentSet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for time.Now().Before(until) {
|
||||
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
|
||||
time.Sleep(delay)
|
||||
atomic.AddInt32(&set, 1)
|
||||
return 1, 1
|
||||
}).Release()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
for i := 0; i < concurrentGet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
mark := time.Now()
|
||||
if mark.Before(until) {
|
||||
h := c.Get(0, uint64(r.Intn(maxkey)), nil)
|
||||
latency := int64(time.Now().Sub(mark))
|
||||
m := atomic.LoadInt64(&getMaxLatency)
|
||||
if latency > m {
|
||||
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
|
||||
}
|
||||
atomic.AddInt64(&getDuration, latency)
|
||||
if h != nil {
|
||||
atomic.AddInt32(&getHit, 1)
|
||||
h.Release()
|
||||
}
|
||||
atomic.AddInt32(&getAll, 1)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
|
||||
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
|
||||
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
|
||||
|
||||
if getAvglatency > delay/3 {
|
||||
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_HitMiss(t *testing.T) {
|
||||
cases := []struct {
|
||||
key uint64
|
||||
value string
|
||||
}{
|
||||
{1, "vvvvvvvvv"},
|
||||
{100, "v1"},
|
||||
{0, "v2"},
|
||||
{12346, "v3"},
|
||||
{777, "v4"},
|
||||
{999, "v5"},
|
||||
{7654, "v6"},
|
||||
{2, "v7"},
|
||||
{3, "v8"},
|
||||
{9, "v9"},
|
||||
}
|
||||
|
||||
setfin := 0
|
||||
c := NewCache(NewLRU(1000))
|
||||
for i, x := range cases {
|
||||
set(c, 0, x.key, x.value, len(x.value), func() {
|
||||
setfin++
|
||||
}).Release()
|
||||
for j, y := range cases {
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j <= i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, x := range cases {
|
||||
finalizerOk := false
|
||||
c.Delete(0, x.key, func() {
|
||||
finalizerOk = true
|
||||
})
|
||||
|
||||
if !finalizerOk {
|
||||
t.Errorf("case %d delete finalizer not executed", i)
|
||||
}
|
||||
|
||||
for j, y := range cases {
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j > i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if setfin != len(cases) {
|
||||
t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Eviction(t *testing.T) {
|
||||
c := NewCache(NewLRU(12))
|
||||
o1 := set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
set(c, 0, 5, 5, 1, nil).Release()
|
||||
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
|
||||
h.Release()
|
||||
}
|
||||
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
|
||||
|
||||
for _, key := range []uint64{9, 2, 5, 1} {
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
for _, key := range []uint64{1, 2, 5} {
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
for _, key := range []uint64{3, 4, 9} {
|
||||
h := c.Get(0, key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Evict(t *testing.T) {
|
||||
c := NewCache(NewLRU(6))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 1, 1, 4, 1, nil).Release()
|
||||
set(c, 1, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 1, 6, 1, nil).Release()
|
||||
set(c, 2, 2, 7, 1, nil).Release()
|
||||
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ok := c.Evict(0, 1); !ok {
|
||||
t.Error("first Cache.Evict on #0.1 return false")
|
||||
}
|
||||
if ok := c.Evict(0, 1); ok {
|
||||
t.Error("second Cache.Evict on #0.1 return true")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictNS(1)
|
||||
if h := c.Get(1, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if h := c.Get(1, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictAll()
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Delete(t *testing.T) {
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
|
||||
if ok := c.Delete(0, 1, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #1 return false")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if ok := c.Delete(0, 1, delFunc); ok {
|
||||
t.Error("Cache.Delete on #1 return true")
|
||||
}
|
||||
|
||||
h2 := c.Get(0, 2, nil)
|
||||
if h2 == nil {
|
||||
t.Error("Cache.Get on #2 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(1) Cache.Delete on #2 return false")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(2) Cache.Delete on #2 return false")
|
||||
}
|
||||
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
c.Get(0, 2, nil).Release()
|
||||
|
||||
for key := 2; key <= 4; key++ {
|
||||
if h := c.Get(0, uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("Cache.Get on #%d return nil", key)
|
||||
}
|
||||
}
|
||||
|
||||
h2.Release()
|
||||
if h := c.Get(0, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
if delFuncCalled != 4 {
|
||||
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Close(t *testing.T) {
|
||||
relFuncCalled := 0
|
||||
relFunc := func() {
|
||||
relFuncCalled++
|
||||
}
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, relFunc).Release()
|
||||
set(c, 0, 2, 2, 1, relFunc).Release()
|
||||
|
||||
h3 := set(c, 0, 3, 3, 1, relFunc)
|
||||
if h3 == nil {
|
||||
t.Error("Cache.Get on #3 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 3, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #3 return false")
|
||||
}
|
||||
|
||||
c.Close()
|
||||
|
||||
if relFuncCalled != 3 {
|
||||
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
|
||||
}
|
||||
if delFuncCalled != 1 {
|
||||
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
@ -6,7 +6,7 @@
|
||||
|
||||
package leveldb
|
||||
|
||||
import "github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
import "github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
|
||||
type iComparer struct {
|
||||
ucmp comparer.Comparer
|
||||
@ -33,9 +33,9 @@ func (icmp *iComparer) Name() string {
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Compare(a, b []byte) int {
|
||||
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
|
||||
x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
|
||||
if x == 0 {
|
||||
if m, n := iKey(a).num(), iKey(b).num(); m > n {
|
||||
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
|
||||
x = -1
|
||||
} else if m < n {
|
||||
x = 1
|
||||
@ -45,13 +45,13 @@ func (icmp *iComparer) Compare(a, b []byte) int {
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
ua, ub := iKey(a).ukey(), iKey(b).ukey()
|
||||
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
|
||||
dst = icmp.ucmp.Separator(dst, ua, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
dst = append(dst, keyMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, a[len(a)-8:]...)
|
||||
@ -60,13 +60,13 @@ func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Successor(dst, b []byte) []byte {
|
||||
ub := iKey(b).ukey()
|
||||
ub := internalKey(b).ukey()
|
||||
dst = icmp.ucmp.Successor(dst, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
dst = append(dst, keyMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, b[len(b)-8:]...)
|
||||
496
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/corrupt_test.go
generated
vendored
Normal file
496
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/corrupt_test.go
generated
vendored
Normal file
@ -0,0 +1,496 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/filter"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
const ctValSize = 1000
|
||||
|
||||
type dbCorruptHarness struct {
|
||||
dbHarness
|
||||
}
|
||||
|
||||
func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
|
||||
h := new(dbCorruptHarness)
|
||||
h.init(t, o)
|
||||
return h
|
||||
}
|
||||
|
||||
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
|
||||
return newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) recover() {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
|
||||
var err error
|
||||
p.db, err = Recover(h.stor, h.o)
|
||||
if err != nil {
|
||||
t.Fatal("Repair: got error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) build(n int) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
batch := new(Batch)
|
||||
for i := 0; i < n; i++ {
|
||||
batch.Reset()
|
||||
batch.Put(tkey(i), tval(i, ctValSize))
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
t.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
batch := new(Batch)
|
||||
for i := range rnd.Perm(n) {
|
||||
batch.Reset()
|
||||
batch.Put(tkey(i), tval(i, ctValSize))
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
t.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
batch := new(Batch)
|
||||
for i := 0; i < n; i++ {
|
||||
batch.Reset()
|
||||
batch.Delete(tkey(rnd.Intn(max)))
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
t.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
|
||||
fds, _ := p.stor.List(ft)
|
||||
sortFds(fds)
|
||||
if fi < 0 {
|
||||
fi = len(fds) - 1
|
||||
}
|
||||
if fi >= len(fds) {
|
||||
t.Fatalf("no such file with type %q with index %d", ft, fi)
|
||||
}
|
||||
|
||||
fd := fds[fi]
|
||||
r, err := h.stor.Open(fd)
|
||||
if err != nil {
|
||||
t.Fatal("cannot open file: ", err)
|
||||
}
|
||||
x, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
t.Fatal("cannot query file size: ", err)
|
||||
}
|
||||
m := int(x)
|
||||
if _, err := r.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
if -offset > m {
|
||||
offset = 0
|
||||
} else {
|
||||
offset = m + offset
|
||||
}
|
||||
}
|
||||
if offset > m {
|
||||
offset = m
|
||||
}
|
||||
if offset+n > m {
|
||||
n = m - offset
|
||||
}
|
||||
|
||||
buf := make([]byte, m)
|
||||
_, err = io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
t.Fatal("cannot read file: ", err)
|
||||
}
|
||||
r.Close()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
buf[offset+i] ^= 0x80
|
||||
}
|
||||
|
||||
err = h.stor.Remove(fd)
|
||||
if err != nil {
|
||||
t.Fatal("cannot remove old file: ", err)
|
||||
}
|
||||
w, err := h.stor.Create(fd)
|
||||
if err != nil {
|
||||
t.Fatal("cannot create new file: ", err)
|
||||
}
|
||||
_, err = w.Write(buf)
|
||||
if err != nil {
|
||||
t.Fatal("cannot write new file: ", err)
|
||||
}
|
||||
w.Close()
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) removeAll(ft storage.FileType) {
|
||||
fds, err := h.stor.List(ft)
|
||||
if err != nil {
|
||||
h.t.Fatal("get files: ", err)
|
||||
}
|
||||
for _, fd := range fds {
|
||||
if err := h.stor.Remove(fd); err != nil {
|
||||
h.t.Error("remove file: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) {
|
||||
fds, err := h.stor.List(ft)
|
||||
if err != nil {
|
||||
h.t.Fatal("get files: ", err)
|
||||
}
|
||||
for _, fd := range fds {
|
||||
if err := h.stor.ForceRemove(fd); err != nil {
|
||||
h.t.Error("remove file: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) removeOne(ft storage.FileType) {
|
||||
fds, err := h.stor.List(ft)
|
||||
if err != nil {
|
||||
h.t.Fatal("get files: ", err)
|
||||
}
|
||||
fd := fds[rand.Intn(len(fds))]
|
||||
h.t.Logf("removing file @%d", fd.Num)
|
||||
if err := h.stor.Remove(fd); err != nil {
|
||||
h.t.Error("remove file: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) check(min, max int) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
var n, badk, badv, missed, good int
|
||||
iter := db.NewIterator(nil, p.ro)
|
||||
for iter.Next() {
|
||||
k := 0
|
||||
fmt.Sscanf(string(iter.Key()), "%d", &k)
|
||||
if k < n {
|
||||
badk++
|
||||
continue
|
||||
}
|
||||
missed += k - n
|
||||
n = k + 1
|
||||
if !bytes.Equal(iter.Value(), tval(k, ctValSize)) {
|
||||
badv++
|
||||
} else {
|
||||
good++
|
||||
}
|
||||
}
|
||||
err := iter.Error()
|
||||
iter.Release()
|
||||
t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v",
|
||||
min, max, good, badk, badv, missed, err)
|
||||
if good < min || good > max {
|
||||
t.Errorf("good entries number not in range")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorruptDB_Journal(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.build(100)
|
||||
h.check(100, 100)
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeJournal, -1, 19, 1)
|
||||
h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
|
||||
|
||||
h.openDB()
|
||||
h.check(36, 36)
|
||||
}
|
||||
|
||||
func TestCorruptDB_Table(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.build(100)
|
||||
h.compactMem()
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.compactRangeAt(1, "", "")
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, 100, 1)
|
||||
|
||||
h.openDB()
|
||||
h.check(99, 99)
|
||||
}
|
||||
|
||||
func TestCorruptDB_TableIndex(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.build(10000)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, -2000, 500)
|
||||
|
||||
h.openDB()
|
||||
h.check(5000, 9999)
|
||||
}
|
||||
|
||||
func TestCorruptDB_MissingManifest(t *testing.T) {
|
||||
rnd := rand.New(rand.NewSource(0x0badda7a))
|
||||
h := newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
WriteBuffer: 1000 * 60,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
h.build(1000)
|
||||
h.compactMem()
|
||||
h.buildShuffled(1000, rnd)
|
||||
h.compactMem()
|
||||
h.deleteRand(500, 1000, rnd)
|
||||
h.compactMem()
|
||||
h.buildShuffled(1000, rnd)
|
||||
h.compactMem()
|
||||
h.deleteRand(500, 1000, rnd)
|
||||
h.compactMem()
|
||||
h.buildShuffled(1000, rnd)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
|
||||
h.forceRemoveAll(storage.TypeManifest)
|
||||
h.openAssert(false)
|
||||
|
||||
h.recover()
|
||||
h.check(1000, 1000)
|
||||
h.build(1000)
|
||||
h.compactMem()
|
||||
h.compactRange("", "")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.check(1000, 1000)
|
||||
}
|
||||
|
||||
func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.put("foo", "v2")
|
||||
h.put("foo", "v3")
|
||||
h.put("foo", "v4")
|
||||
h.put("foo", "v5")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("foo", "v5")
|
||||
h.put("foo", "v6")
|
||||
h.getVal("foo", "v6")
|
||||
|
||||
h.reopenDB()
|
||||
h.getVal("foo", "v6")
|
||||
}
|
||||
|
||||
func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.put("foo", "v2")
|
||||
h.put("foo", "v3")
|
||||
h.compactMem()
|
||||
h.put("foo", "v4")
|
||||
h.put("foo", "v5")
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("foo", "v5")
|
||||
h.put("foo", "v6")
|
||||
h.getVal("foo", "v6")
|
||||
|
||||
h.reopenDB()
|
||||
h.getVal("foo", "v6")
|
||||
}
|
||||
|
||||
func TestCorruptDB_CorruptedManifest(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("foo", "hello")
|
||||
h.compactMem()
|
||||
h.compactRange("", "")
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeManifest, -1, 0, 1000)
|
||||
h.openAssert(false)
|
||||
|
||||
h.recover()
|
||||
h.getVal("foo", "hello")
|
||||
}
|
||||
|
||||
func TestCorruptDB_CompactionInputError(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.build(10)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, 100, 1)
|
||||
|
||||
h.openDB()
|
||||
h.check(9, 9)
|
||||
|
||||
h.build(10000)
|
||||
h.check(10000, 10000)
|
||||
}
|
||||
|
||||
func TestCorruptDB_UnrelatedKeys(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.build(10)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, 100, 1)
|
||||
|
||||
h.openDB()
|
||||
h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
|
||||
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
|
||||
h.compactMem()
|
||||
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
|
||||
}
|
||||
|
||||
func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("a", "v1")
|
||||
h.put("b", "v1")
|
||||
h.compactMem()
|
||||
h.put("a", "v2")
|
||||
h.put("b", "v2")
|
||||
h.compactMem()
|
||||
h.put("a", "v3")
|
||||
h.put("b", "v3")
|
||||
h.compactMem()
|
||||
h.put("c", "v0")
|
||||
h.put("d", "v0")
|
||||
h.compactMem()
|
||||
h.compactRangeAt(1, "", "")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("a", "v3")
|
||||
h.getVal("b", "v3")
|
||||
h.getVal("c", "v0")
|
||||
h.getVal("d", "v0")
|
||||
}
|
||||
|
||||
func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("a", "v1")
|
||||
h.put("b", "v1")
|
||||
h.compactMem()
|
||||
h.put("a", "v2")
|
||||
h.put("b", "v2")
|
||||
h.compactMem()
|
||||
h.put("a", "v3")
|
||||
h.put("b", "v3")
|
||||
h.compactMem()
|
||||
h.put("c", "v0")
|
||||
h.put("d", "v0")
|
||||
h.compactMem()
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("a", "v3")
|
||||
h.getVal("b", "v3")
|
||||
h.getVal("c", "v0")
|
||||
h.getVal("d", "v0")
|
||||
}
|
||||
|
||||
func TestCorruptDB_MissingTableFiles(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("a", "v1")
|
||||
h.put("b", "v1")
|
||||
h.compactMem()
|
||||
h.put("c", "v2")
|
||||
h.put("d", "v2")
|
||||
h.compactMem()
|
||||
h.put("e", "v3")
|
||||
h.put("f", "v3")
|
||||
h.closeDB()
|
||||
|
||||
h.removeOne(storage.TypeTable)
|
||||
h.openAssert(false)
|
||||
}
|
||||
|
||||
func TestCorruptDB_RecoverTable(t *testing.T) {
|
||||
h := newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 112 * opt.KiB,
|
||||
CompactionTableSize: 90 * opt.KiB,
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
h.build(1000)
|
||||
h.compactMem()
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.compactRangeAt(1, "", "")
|
||||
seq := h.db.seq
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, 0, 1000, 1)
|
||||
h.corrupt(storage.TypeTable, 3, 10000, 1)
|
||||
// Corrupted filter shouldn't affect recovery.
|
||||
h.corrupt(storage.TypeTable, 3, 113888, 10)
|
||||
h.corrupt(storage.TypeTable, -1, 20000, 1)
|
||||
|
||||
h.recover()
|
||||
if h.db.seq != seq {
|
||||
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
|
||||
}
|
||||
h.check(985, 985)
|
||||
}
|
||||
@ -17,14 +17,14 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/table"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/journal"
|
||||
"github.com/pingcap/goleveldb/leveldb/memdb"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/table"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// DB is a LevelDB database.
|
||||
@ -36,14 +36,14 @@ type DB struct {
|
||||
s *session
|
||||
|
||||
// MemDB.
|
||||
memMu sync.RWMutex
|
||||
memPool chan *memdb.DB
|
||||
mem, frozenMem *memDB
|
||||
journal *journal.Writer
|
||||
journalWriter storage.Writer
|
||||
journalFile storage.File
|
||||
frozenJournalFile storage.File
|
||||
frozenSeq uint64
|
||||
memMu sync.RWMutex
|
||||
memPool chan *memdb.DB
|
||||
mem, frozenMem *memDB
|
||||
journal *journal.Writer
|
||||
journalWriter storage.Writer
|
||||
journalFd storage.FileDesc
|
||||
frozenJournalFd storage.FileDesc
|
||||
frozenSeq uint64
|
||||
|
||||
// Snapshot.
|
||||
snapsMu sync.Mutex
|
||||
@ -61,8 +61,10 @@ type DB struct {
|
||||
writeDelayN int
|
||||
journalC chan *Batch
|
||||
journalAckC chan error
|
||||
tr *Transaction
|
||||
|
||||
// Compaction.
|
||||
compCommitLk sync.Mutex
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
mcompCmdC chan cCmd
|
||||
@ -70,7 +72,8 @@ type DB struct {
|
||||
compPerErrC chan error
|
||||
compErrSetC chan error
|
||||
compWriteLocking bool
|
||||
compStats []cStats
|
||||
compStats cStats
|
||||
memdbMaxLevel int // For testing.
|
||||
|
||||
// Close.
|
||||
closeW sync.WaitGroup
|
||||
@ -104,7 +107,6 @@ func openDB(s *session) (*DB, error) {
|
||||
compErrC: make(chan error),
|
||||
compPerErrC: make(chan error),
|
||||
compErrSetC: make(chan error),
|
||||
compStats: make([]cStats, s.o.GetNumLevel()),
|
||||
// Close
|
||||
closeC: make(chan struct{}),
|
||||
}
|
||||
@ -209,7 +211,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
stor, err := storage.OpenFile(path)
|
||||
stor, err := storage.OpenFile(path, o.GetReadOnly())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -259,7 +261,7 @@ func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
stor, err := storage.OpenFile(path)
|
||||
stor, err := storage.OpenFile(path, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -278,12 +280,11 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
o.Strict &= ^opt.StrictReader
|
||||
|
||||
// Get all tables and sort it by file number.
|
||||
tableFiles_, err := s.getFiles(storage.TypeTable)
|
||||
fds, err := s.stor.List(storage.TypeTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tableFiles := files(tableFiles_)
|
||||
tableFiles.sort()
|
||||
sortFds(fds)
|
||||
|
||||
var (
|
||||
maxSeq uint64
|
||||
@ -296,17 +297,17 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
rec = &sessionRecord{}
|
||||
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
|
||||
)
|
||||
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
|
||||
tmp = s.newTemp()
|
||||
writer, err := tmp.Create()
|
||||
buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) {
|
||||
tmpFd = s.newTemp()
|
||||
writer, err := s.stor.Create(tmpFd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
writer.Close()
|
||||
if err != nil {
|
||||
tmp.Remove()
|
||||
tmp = nil
|
||||
s.stor.Remove(tmpFd)
|
||||
tmpFd = storage.FileDesc{}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -314,7 +315,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
tw := table.NewWriter(writer, o)
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
if validIkey(key) {
|
||||
if validInternalKey(key) {
|
||||
err = tw.Append(key, iter.Value())
|
||||
if err != nil {
|
||||
return
|
||||
@ -338,9 +339,9 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
size = int64(tw.BytesLen())
|
||||
return
|
||||
}
|
||||
recoverTable := func(file storage.File) error {
|
||||
s.logf("table@recovery recovering @%d", file.Num())
|
||||
reader, err := file.Open()
|
||||
recoverTable := func(fd storage.FileDesc) error {
|
||||
s.logf("table@recovery recovering @%d", fd.Num)
|
||||
reader, err := s.stor.Open(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -362,7 +363,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
tgoodKey, tcorruptedKey, tcorruptedBlock int
|
||||
imin, imax []byte
|
||||
)
|
||||
tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
|
||||
tr, err := table.NewReader(reader, size, fd, nil, bpool, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -370,7 +371,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
|
||||
itererr.SetErrorCallback(func(err error) {
|
||||
if errors.IsCorrupted(err) {
|
||||
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
|
||||
s.logf("table@recovery block corruption @%d %q", fd.Num, err)
|
||||
tcorruptedBlock++
|
||||
}
|
||||
})
|
||||
@ -379,7 +380,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
// Scan the table.
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
_, seq, _, kerr := parseIkey(key)
|
||||
_, seq, _, kerr := parseInternalKey(key)
|
||||
if kerr != nil {
|
||||
tcorruptedKey++
|
||||
continue
|
||||
@ -405,23 +406,23 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
|
||||
if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
|
||||
droppedTable++
|
||||
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
return nil
|
||||
}
|
||||
|
||||
if tgoodKey > 0 {
|
||||
if tcorruptedKey > 0 || tcorruptedBlock > 0 {
|
||||
// Rebuild the table.
|
||||
s.logf("table@recovery rebuilding @%d", file.Num())
|
||||
s.logf("table@recovery rebuilding @%d", fd.Num)
|
||||
iter := tr.NewIterator(nil, nil)
|
||||
tmp, newSize, err := buildTable(iter)
|
||||
tmpFd, newSize, err := buildTable(iter)
|
||||
iter.Release()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
closed = true
|
||||
reader.Close()
|
||||
if err := file.Replace(tmp); err != nil {
|
||||
if err := s.stor.Rename(tmpFd, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
size = newSize
|
||||
@ -431,30 +432,30 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
}
|
||||
recoveredKey += tgoodKey
|
||||
// Add table to level 0.
|
||||
rec.addTable(0, file.Num(), uint64(size), imin, imax)
|
||||
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
rec.addTable(0, fd.Num, size, imin, imax)
|
||||
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
} else {
|
||||
droppedTable++
|
||||
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
|
||||
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover all tables.
|
||||
if len(tableFiles) > 0 {
|
||||
s.logf("table@recovery F·%d", len(tableFiles))
|
||||
if len(fds) > 0 {
|
||||
s.logf("table@recovery F·%d", len(fds))
|
||||
|
||||
// Mark file number as used.
|
||||
s.markFileNum(tableFiles[len(tableFiles)-1].Num())
|
||||
s.markFileNum(fds[len(fds)-1].Num)
|
||||
|
||||
for _, file := range tableFiles {
|
||||
if err := recoverTable(file); err != nil {
|
||||
for _, fd := range fds {
|
||||
if err := recoverTable(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
|
||||
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq)
|
||||
}
|
||||
|
||||
// Set sequence number.
|
||||
@ -471,31 +472,31 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
|
||||
func (db *DB) recoverJournal() error {
|
||||
// Get all journals and sort it by file number.
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
rawFds, err := db.s.stor.List(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files(allJournalFiles).sort()
|
||||
sortFds(rawFds)
|
||||
|
||||
// Journals that will be recovered.
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
var fds []storage.FileDesc
|
||||
for _, fd := range rawFds {
|
||||
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
|
||||
fds = append(fds, fd)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
of storage.File // Obsolete file.
|
||||
ofd storage.FileDesc // Obsolete file.
|
||||
rec = &sessionRecord{}
|
||||
)
|
||||
|
||||
// Recover journals.
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(recJournalFiles))
|
||||
if len(fds) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(fds))
|
||||
|
||||
// Mark file number as used.
|
||||
db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
|
||||
db.s.markFileNum(fds[len(fds)-1].Num)
|
||||
|
||||
var (
|
||||
// Options.
|
||||
@ -509,31 +510,31 @@ func (db *DB) recoverJournal() error {
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
for _, fd := range fds {
|
||||
db.logf("journal@recovery recovering @%d", fd.Num)
|
||||
|
||||
fr, err := jf.Open()
|
||||
fr, err := db.s.stor.Open(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
|
||||
}
|
||||
|
||||
// Flush memdb and remove obsolete journal file.
|
||||
if of != nil {
|
||||
if !ofd.Nil() {
|
||||
if mdb.Len() > 0 {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rec.setJournalNum(jf.Num())
|
||||
rec.setJournalNum(fd.Num)
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
fr.Close()
|
||||
@ -541,8 +542,8 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
rec.resetAddedTables()
|
||||
|
||||
of.Remove()
|
||||
of = nil
|
||||
db.s.stor.Remove(ofd)
|
||||
ofd = storage.FileDesc{}
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
@ -555,7 +556,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
return errors.SetFd(err, fd)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
@ -566,7 +567,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
return errors.SetFd(err, fd)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
@ -576,7 +577,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
return errors.SetFd(err, fd)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
@ -594,7 +595,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
of = jf
|
||||
ofd = fd
|
||||
}
|
||||
|
||||
// Flush the last memdb.
|
||||
@ -611,7 +612,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
// Commit.
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setJournalNum(db.journalFd.Num)
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
// Close journal on error.
|
||||
@ -623,8 +624,8 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
// Remove the last obsolete journal file.
|
||||
if of != nil {
|
||||
of.Remove()
|
||||
if !ofd.Nil() {
|
||||
db.s.stor.Remove(ofd)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -632,17 +633,17 @@ func (db *DB) recoverJournal() error {
|
||||
|
||||
func (db *DB) recoverJournalRO() error {
|
||||
// Get all journals and sort it by file number.
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
rawFds, err := db.s.stor.List(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files(allJournalFiles).sort()
|
||||
sortFds(rawFds)
|
||||
|
||||
// Journals that will be recovered.
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
var fds []storage.FileDesc
|
||||
for _, fd := range rawFds {
|
||||
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
|
||||
fds = append(fds, fd)
|
||||
}
|
||||
}
|
||||
|
||||
@ -656,8 +657,8 @@ func (db *DB) recoverJournalRO() error {
|
||||
)
|
||||
|
||||
// Recover journals.
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
|
||||
if len(fds) > 0 {
|
||||
db.logf("journal@recovery RO·Mode F·%d", len(fds))
|
||||
|
||||
var (
|
||||
jr *journal.Reader
|
||||
@ -665,19 +666,19 @@ func (db *DB) recoverJournalRO() error {
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
for _, fd := range fds {
|
||||
db.logf("journal@recovery recovering @%d", fd.Num)
|
||||
|
||||
fr, err := jf.Open()
|
||||
fr, err := db.s.stor.Open(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
@ -689,7 +690,7 @@ func (db *DB) recoverJournalRO() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
return errors.SetFd(err, fd)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
@ -700,7 +701,7 @@ func (db *DB) recoverJournalRO() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
return errors.SetFd(err, fd)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
@ -710,7 +711,7 @@ func (db *DB) recoverJournalRO() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
return errors.SetFd(err, fd)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
@ -727,46 +728,35 @@ func (db *DB) recoverJournalRO() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
ikey := newIkey(key, seq, ktSeek)
|
||||
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
if m == nil {
|
||||
continue
|
||||
func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
|
||||
mk, mv, err := mdb.Find(ikey)
|
||||
if err == nil {
|
||||
ukey, _, kt, kerr := parseInternalKey(mk)
|
||||
if kerr != nil {
|
||||
// Shouldn't have had happen.
|
||||
panic(kerr)
|
||||
}
|
||||
defer m.decref()
|
||||
if icmp.uCompare(ukey, ikey.ukey()) == 0 {
|
||||
if kt == keyTypeDel {
|
||||
return true, nil, ErrNotFound
|
||||
}
|
||||
return true, mv, nil
|
||||
|
||||
mk, mv, me := m.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
// Shouldn't have had happen.
|
||||
panic(kerr)
|
||||
}
|
||||
if db.s.icmp.uCompare(ukey, key) == 0 {
|
||||
if kt == ktDel {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return append([]byte{}, mv...), nil
|
||||
}
|
||||
} else if me != ErrNotFound {
|
||||
return nil, me
|
||||
}
|
||||
}
|
||||
|
||||
v := db.s.version()
|
||||
value, cSched, err := v.get(ikey, ro, false)
|
||||
v.release()
|
||||
if cSched {
|
||||
// Trigger table compaction.
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
} else if err != ErrNotFound {
|
||||
return true, nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
|
||||
ikey := newIkey(key, seq, ktSeek)
|
||||
func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
|
||||
|
||||
if auxm != nil {
|
||||
if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
|
||||
return append([]byte{}, mv...), me
|
||||
}
|
||||
}
|
||||
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
@ -775,30 +765,55 @@ func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err er
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
mk, _, me := m.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
// Shouldn't have had happen.
|
||||
panic(kerr)
|
||||
}
|
||||
if db.s.icmp.uCompare(ukey, key) == 0 {
|
||||
if kt == ktDel {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
} else if me != ErrNotFound {
|
||||
return false, me
|
||||
if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok {
|
||||
return append([]byte{}, mv...), me
|
||||
}
|
||||
}
|
||||
|
||||
v := db.s.version()
|
||||
_, cSched, err := v.get(ikey, ro, true)
|
||||
value, cSched, err := v.get(auxt, ikey, ro, false)
|
||||
v.release()
|
||||
if cSched {
|
||||
// Trigger table compaction.
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nilIfNotFound(err error) error {
|
||||
if err == ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
|
||||
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
|
||||
|
||||
if auxm != nil {
|
||||
if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
|
||||
return me == nil, nilIfNotFound(me)
|
||||
}
|
||||
}
|
||||
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok {
|
||||
return me == nil, nilIfNotFound(me)
|
||||
}
|
||||
}
|
||||
|
||||
v := db.s.version()
|
||||
_, cSched, err := v.get(auxt, ikey, ro, true)
|
||||
v.release()
|
||||
if cSched {
|
||||
// Trigger table compaction.
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
}
|
||||
if err == nil {
|
||||
ret = true
|
||||
@ -822,7 +837,7 @@ func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
return db.get(key, se.seq, ro)
|
||||
return db.get(nil, nil, key, se.seq, ro)
|
||||
}
|
||||
|
||||
// Has returns true if the DB does contains the given key.
|
||||
@ -836,11 +851,11 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
|
||||
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
return db.has(key, se.seq, ro)
|
||||
return db.has(nil, nil, key, se.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the latest snapshot of the
|
||||
// uderlying DB.
|
||||
// underlying DB.
|
||||
// The returned iterator is not goroutine-safe, but it is safe to use
|
||||
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||
// It is also safe to use an iterator concurrently with modifying its
|
||||
@ -864,7 +879,7 @@ func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Itera
|
||||
defer db.releaseSnapshot(se)
|
||||
// Iterator holds 'version' lock, 'version' is immutable so snapshot
|
||||
// can be released after iterator created.
|
||||
return db.newIterator(se.seq, slice, ro)
|
||||
return db.newIterator(nil, nil, se.seq, slice, ro)
|
||||
}
|
||||
|
||||
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
|
||||
@ -920,7 +935,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
var level uint
|
||||
var rest string
|
||||
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
|
||||
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
|
||||
if n != 1 {
|
||||
err = ErrNotFound
|
||||
} else {
|
||||
value = fmt.Sprint(v.tLen(int(level)))
|
||||
@ -929,8 +944,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
value = "Compactions\n" +
|
||||
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
|
||||
"-------+------------+---------------+---------------+---------------+---------------\n"
|
||||
for level, tables := range v.tables {
|
||||
duration, read, write := db.compStats[level].get()
|
||||
for level, tables := range v.levels {
|
||||
duration, read, write := db.compStats.getStat(level)
|
||||
if len(tables) == 0 && duration == 0 {
|
||||
continue
|
||||
}
|
||||
@ -939,10 +954,10 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
float64(read)/1048576.0, float64(write)/1048576.0)
|
||||
}
|
||||
case p == "sstables":
|
||||
for level, tables := range v.tables {
|
||||
for level, tables := range v.levels {
|
||||
value += fmt.Sprintf("--- level %d ---\n", level)
|
||||
for _, t := range tables {
|
||||
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
|
||||
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax)
|
||||
}
|
||||
}
|
||||
case p == "blockpool":
|
||||
@ -982,8 +997,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
|
||||
sizes := make(Sizes, 0, len(ranges))
|
||||
for _, r := range ranges {
|
||||
imin := newIkey(r.Start, kMaxSeq, ktSeek)
|
||||
imax := newIkey(r.Limit, kMaxSeq, ktSeek)
|
||||
imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
|
||||
imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
|
||||
start, err := v.offsetOf(imin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -992,7 +1007,7 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var size uint64
|
||||
var size int64
|
||||
if limit >= start {
|
||||
size = limit - start
|
||||
}
|
||||
@ -1002,8 +1017,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
return sizes, nil
|
||||
}
|
||||
|
||||
// Close closes the DB. This will also releases any outstanding snapshot and
|
||||
// abort any in-flight compaction.
|
||||
// Close closes the DB. This will also releases any outstanding snapshot,
|
||||
// abort any in-flight compaction and discard open transaction.
|
||||
//
|
||||
// It is not safe to close a DB until all outstanding iterators are released.
|
||||
// It is valid to call Close multiple times. Other methods should not be
|
||||
@ -1032,11 +1047,18 @@ func (db *DB) Close() error {
|
||||
// Signal all goroutines.
|
||||
close(db.closeC)
|
||||
|
||||
// Discard open transaction.
|
||||
if db.tr != nil {
|
||||
db.tr.Discard()
|
||||
}
|
||||
|
||||
// Acquire writer lock.
|
||||
db.writeLockC <- struct{}{}
|
||||
|
||||
// Wait for all gorotines to exit.
|
||||
db.closeW.Wait()
|
||||
|
||||
// Lock writer and closes journal.
|
||||
db.writeLockC <- struct{}{}
|
||||
// Closes journal.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
@ -1063,8 +1085,6 @@ func (db *DB) Close() error {
|
||||
db.frozenMem = nil
|
||||
db.journal = nil
|
||||
db.journalWriter = nil
|
||||
db.journalFile = nil
|
||||
db.frozenJournalFile = nil
|
||||
db.closer = nil
|
||||
|
||||
return err
|
||||
@ -10,57 +10,78 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
|
||||
)
|
||||
|
||||
type cStats struct {
|
||||
sync.Mutex
|
||||
type cStat struct {
|
||||
duration time.Duration
|
||||
read uint64
|
||||
write uint64
|
||||
read int64
|
||||
write int64
|
||||
}
|
||||
|
||||
func (p *cStats) add(n *cStatsStaging) {
|
||||
p.Lock()
|
||||
func (p *cStat) add(n *cStatStaging) {
|
||||
p.duration += n.duration
|
||||
p.read += n.read
|
||||
p.write += n.write
|
||||
p.Unlock()
|
||||
}
|
||||
|
||||
func (p *cStats) get() (duration time.Duration, read, write uint64) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
func (p *cStat) get() (duration time.Duration, read, write int64) {
|
||||
return p.duration, p.read, p.write
|
||||
}
|
||||
|
||||
type cStatsStaging struct {
|
||||
type cStatStaging struct {
|
||||
start time.Time
|
||||
duration time.Duration
|
||||
on bool
|
||||
read uint64
|
||||
write uint64
|
||||
read int64
|
||||
write int64
|
||||
}
|
||||
|
||||
func (p *cStatsStaging) startTimer() {
|
||||
func (p *cStatStaging) startTimer() {
|
||||
if !p.on {
|
||||
p.start = time.Now()
|
||||
p.on = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *cStatsStaging) stopTimer() {
|
||||
func (p *cStatStaging) stopTimer() {
|
||||
if p.on {
|
||||
p.duration += time.Since(p.start)
|
||||
p.on = false
|
||||
}
|
||||
}
|
||||
|
||||
type cStats struct {
|
||||
lk sync.Mutex
|
||||
stats []cStat
|
||||
}
|
||||
|
||||
func (p *cStats) addStat(level int, n *cStatStaging) {
|
||||
p.lk.Lock()
|
||||
if level >= len(p.stats) {
|
||||
newStats := make([]cStat, level+1)
|
||||
copy(newStats, p.stats)
|
||||
p.stats = newStats
|
||||
}
|
||||
p.stats[level].add(n)
|
||||
p.lk.Unlock()
|
||||
}
|
||||
|
||||
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
|
||||
p.lk.Lock()
|
||||
defer p.lk.Unlock()
|
||||
if level < len(p.stats) {
|
||||
return p.stats[level].get()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) compactionError() {
|
||||
var err error
|
||||
noerr:
|
||||
@ -151,7 +172,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
|
||||
disableBackoff = db.s.o.GetDisableCompactionBackoff()
|
||||
)
|
||||
for n := 0; ; n++ {
|
||||
// Check wether the DB is closed.
|
||||
// Check whether the DB is closed.
|
||||
if db.isClosed() {
|
||||
db.logf("%s exiting", name)
|
||||
db.compactionExitTransact()
|
||||
@ -235,6 +256,14 @@ func (db *DB) compactionExitTransact() {
|
||||
panic(errCompactionTransactExiting)
|
||||
}
|
||||
|
||||
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
|
||||
db.compCommitLk.Lock()
|
||||
defer db.compCommitLk.Unlock() // Defer is necessary.
|
||||
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (db *DB) memCompaction() {
|
||||
mdb := db.getFrozenMem()
|
||||
if mdb == nil {
|
||||
@ -265,41 +294,40 @@ func (db *DB) memCompaction() {
|
||||
|
||||
var (
|
||||
rec = &sessionRecord{}
|
||||
stats = &cStatsStaging{}
|
||||
stats = &cStatStaging{}
|
||||
flushLevel int
|
||||
)
|
||||
|
||||
// Generate tables.
|
||||
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
|
||||
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
|
||||
stats.stopTimer()
|
||||
return
|
||||
}, func() error {
|
||||
for _, r := range rec.addedTables {
|
||||
db.logf("memdb@flush revert @%d", r.num)
|
||||
f := db.s.getTableFile(r.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setSeqNum(db.frozenSeq)
|
||||
err = db.s.commit(rec)
|
||||
stats.stopTimer()
|
||||
return
|
||||
}, nil)
|
||||
rec.setJournalNum(db.journalFd.Num)
|
||||
rec.setSeqNum(db.frozenSeq)
|
||||
|
||||
// Commit.
|
||||
stats.startTimer()
|
||||
db.compactionCommit("memdb", rec)
|
||||
stats.stopTimer()
|
||||
|
||||
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
|
||||
|
||||
for _, r := range rec.addedTables {
|
||||
stats.write += r.size
|
||||
}
|
||||
db.compStats[flushLevel].add(stats)
|
||||
db.compStats.addStat(flushLevel, stats)
|
||||
|
||||
// Drop frozen memdb.
|
||||
db.dropFrozenMem()
|
||||
@ -315,7 +343,7 @@ func (db *DB) memCompaction() {
|
||||
}
|
||||
|
||||
// Trigger table compaction.
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
}
|
||||
|
||||
type tableCompactionBuilder struct {
|
||||
@ -323,7 +351,7 @@ type tableCompactionBuilder struct {
|
||||
s *session
|
||||
c *compaction
|
||||
rec *sessionRecord
|
||||
stat0, stat1 *cStatsStaging
|
||||
stat0, stat1 *cStatStaging
|
||||
|
||||
snapHasLastUkey bool
|
||||
snapLastUkey []byte
|
||||
@ -377,9 +405,9 @@ func (b *tableCompactionBuilder) flush() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.rec.addTableFile(b.c.level+1, t)
|
||||
b.rec.addTableFile(b.c.sourceLevel+1, t)
|
||||
b.stat1.write += t.size
|
||||
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
|
||||
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
|
||||
b.tw = nil
|
||||
return nil
|
||||
}
|
||||
@ -424,7 +452,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
}
|
||||
|
||||
ikey := iter.Key()
|
||||
ukey, seq, kt, kerr := parseIkey(ikey)
|
||||
ukey, seq, kt, kerr := parseInternalKey(ikey)
|
||||
|
||||
if kerr == nil {
|
||||
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
|
||||
@ -450,14 +478,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
|
||||
hasLastUkey = true
|
||||
lastUkey = append(lastUkey[:0], ukey...)
|
||||
lastSeq = kMaxSeq
|
||||
lastSeq = keyMaxSeq
|
||||
}
|
||||
|
||||
switch {
|
||||
case lastSeq <= b.minSeq:
|
||||
// Dropped because newer entry for same user key exist
|
||||
fallthrough // (A)
|
||||
case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
|
||||
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
|
||||
// For this user key:
|
||||
// (1) there is no data in higher levels
|
||||
// (2) data in lower levels will have larger seq numbers
|
||||
@ -479,7 +507,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
// Don't drop corrupted keys.
|
||||
hasLastUkey = false
|
||||
lastUkey = lastUkey[:0]
|
||||
lastSeq = kMaxSeq
|
||||
lastSeq = keyMaxSeq
|
||||
b.kerrCnt++
|
||||
}
|
||||
|
||||
@ -502,8 +530,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
func (b *tableCompactionBuilder) revert() error {
|
||||
for _, at := range b.rec.addedTables {
|
||||
b.s.logf("table@build revert @%d", at.num)
|
||||
f := b.s.getTableFile(at.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -514,30 +541,28 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
defer c.release()
|
||||
|
||||
rec := &sessionRecord{}
|
||||
rec.addCompPtr(c.level, c.imax)
|
||||
rec.addCompPtr(c.sourceLevel, c.imax)
|
||||
|
||||
if !noTrivial && c.trivial() {
|
||||
t := c.tables[0][0]
|
||||
db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
|
||||
rec.delTable(c.level, t.file.Num())
|
||||
rec.addTableFile(c.level+1, t)
|
||||
db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
t := c.levels[0][0]
|
||||
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
|
||||
rec.delTable(c.sourceLevel, t.fd.Num)
|
||||
rec.addTableFile(c.sourceLevel+1, t)
|
||||
db.compactionCommit("table-move", rec)
|
||||
return
|
||||
}
|
||||
|
||||
var stats [2]cStatsStaging
|
||||
for i, tables := range c.tables {
|
||||
var stats [2]cStatStaging
|
||||
for i, tables := range c.levels {
|
||||
for _, t := range tables {
|
||||
stats[i].read += t.size
|
||||
// Insert deleted tables into record
|
||||
rec.delTable(c.level+i, t.file.Num())
|
||||
rec.delTable(c.sourceLevel+i, t.fd.Num)
|
||||
}
|
||||
}
|
||||
sourceSize := int(stats[0].read + stats[1].read)
|
||||
minSeq := db.minSeq()
|
||||
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
|
||||
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
|
||||
|
||||
b := &tableCompactionBuilder{
|
||||
db: db,
|
||||
@ -547,49 +572,60 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
stat1: &stats[1],
|
||||
minSeq: minSeq,
|
||||
strict: db.s.o.GetStrict(opt.StrictCompaction),
|
||||
tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
|
||||
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
|
||||
}
|
||||
db.compactionTransact("table@build", b)
|
||||
|
||||
// Commit changes
|
||||
db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats[1].startTimer()
|
||||
defer stats[1].stopTimer()
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
// Commit.
|
||||
stats[1].startTimer()
|
||||
db.compactionCommit("table", rec)
|
||||
stats[1].stopTimer()
|
||||
|
||||
resultSize := int(stats[1].write)
|
||||
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
|
||||
|
||||
// Save compaction stats
|
||||
for i := range stats {
|
||||
db.compStats[c.level+1].add(&stats[i])
|
||||
db.compStats.addStat(c.sourceLevel+1, &stats[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
|
||||
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
|
||||
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
|
||||
|
||||
if level >= 0 {
|
||||
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
|
||||
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
}
|
||||
} else {
|
||||
v := db.s.version()
|
||||
m := 1
|
||||
for i, t := range v.tables[1:] {
|
||||
if t.overlaps(db.s.icmp, umin, umax, false) {
|
||||
m = i + 1
|
||||
}
|
||||
}
|
||||
v.release()
|
||||
// Retry until nothing to compact.
|
||||
for {
|
||||
compacted := false
|
||||
|
||||
for level := 0; level < m; level++ {
|
||||
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
// Scan for maximum level with overlapped tables.
|
||||
v := db.s.version()
|
||||
m := 1
|
||||
for i := m; i < len(v.levels); i++ {
|
||||
tables := v.levels[i]
|
||||
if tables.overlaps(db.s.icmp, umin, umax, false) {
|
||||
m = i
|
||||
}
|
||||
}
|
||||
v.release()
|
||||
|
||||
for level := 0; level < m; level++ {
|
||||
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
compacted = true
|
||||
}
|
||||
}
|
||||
|
||||
if !compacted {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) tableAutoCompaction() {
|
||||
@ -616,11 +652,11 @@ type cCmd interface {
|
||||
ack(err error)
|
||||
}
|
||||
|
||||
type cIdle struct {
|
||||
type cAuto struct {
|
||||
ackC chan<- error
|
||||
}
|
||||
|
||||
func (r cIdle) ack(err error) {
|
||||
func (r cAuto) ack(err error) {
|
||||
if r.ackC != nil {
|
||||
defer func() {
|
||||
recover()
|
||||
@ -644,13 +680,21 @@ func (r cRange) ack(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// This will trigger auto compation and/or wait for all compaction to be done.
|
||||
func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
|
||||
// This will trigger auto compaction but will not wait for it.
|
||||
func (db *DB) compTrigger(compC chan<- cCmd) {
|
||||
select {
|
||||
case compC <- cAuto{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// This will trigger auto compaction and/or wait for all compaction to be done.
|
||||
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
|
||||
ch := make(chan error)
|
||||
defer close(ch)
|
||||
// Send cmd.
|
||||
select {
|
||||
case compC <- cIdle{ch}:
|
||||
case compC <- cAuto{ch}:
|
||||
case err = <-db.compErrC:
|
||||
return
|
||||
case _, _ = <-db.closeC:
|
||||
@ -666,16 +710,8 @@ func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// This will trigger auto compaction but will not wait for it.
|
||||
func (db *DB) compSendTrigger(compC chan<- cCmd) {
|
||||
select {
|
||||
case compC <- cIdle{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Send range compaction request.
|
||||
func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
||||
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
||||
ch := make(chan error)
|
||||
defer close(ch)
|
||||
// Send cmd.
|
||||
@ -715,7 +751,7 @@ func (db *DB) mCompaction() {
|
||||
select {
|
||||
case x = <-db.mcompCmdC:
|
||||
switch x.(type) {
|
||||
case cIdle:
|
||||
case cAuto:
|
||||
db.memCompaction()
|
||||
x.ack(nil)
|
||||
x = nil
|
||||
@ -776,11 +812,10 @@ func (db *DB) tCompaction() {
|
||||
}
|
||||
if x != nil {
|
||||
switch cmd := x.(type) {
|
||||
case cIdle:
|
||||
case cAuto:
|
||||
ackQ = append(ackQ, x)
|
||||
case cRange:
|
||||
db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
|
||||
x.ack(nil)
|
||||
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
|
||||
default:
|
||||
panic("leveldb: unknown command")
|
||||
}
|
||||
@ -13,13 +13,13 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
)
|
||||
|
||||
type memdbReleaser struct {
|
||||
@ -33,40 +33,50 @@ func (mr *memdbReleaser) Release() {
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
|
||||
em, fm := db.getMems()
|
||||
v := db.s.version()
|
||||
|
||||
ti := v.getIterators(slice, ro)
|
||||
n := len(ti) + 2
|
||||
i := make([]iterator.Iterator, 0, n)
|
||||
tableIts := v.getIterators(slice, ro)
|
||||
n := len(tableIts) + len(auxt) + 3
|
||||
its := make([]iterator.Iterator, 0, n)
|
||||
|
||||
if auxm != nil {
|
||||
ami := auxm.NewIterator(slice)
|
||||
ami.SetReleaser(&memdbReleaser{m: auxm})
|
||||
its = append(its, ami)
|
||||
}
|
||||
for _, t := range auxt {
|
||||
its = append(its, v.s.tops.newIterator(t, slice, ro))
|
||||
}
|
||||
|
||||
emi := em.NewIterator(slice)
|
||||
emi.SetReleaser(&memdbReleaser{m: em})
|
||||
i = append(i, emi)
|
||||
its = append(its, emi)
|
||||
if fm != nil {
|
||||
fmi := fm.NewIterator(slice)
|
||||
fmi.SetReleaser(&memdbReleaser{m: fm})
|
||||
i = append(i, fmi)
|
||||
its = append(its, fmi)
|
||||
}
|
||||
i = append(i, ti...)
|
||||
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
|
||||
mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
|
||||
its = append(its, tableIts...)
|
||||
mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
|
||||
mi.SetReleaser(&versionReleaser{v: v})
|
||||
return mi
|
||||
}
|
||||
|
||||
func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
|
||||
func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
|
||||
var islice *util.Range
|
||||
if slice != nil {
|
||||
islice = &util.Range{}
|
||||
if slice.Start != nil {
|
||||
islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
|
||||
islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
|
||||
}
|
||||
if slice.Limit != nil {
|
||||
islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
|
||||
islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
|
||||
}
|
||||
}
|
||||
rawIter := db.newRawIterator(islice, ro)
|
||||
rawIter := db.newRawIterator(auxm, auxt, islice, ro)
|
||||
iter := &dbIter{
|
||||
db: db,
|
||||
icmp: db.s.icmp,
|
||||
@ -177,7 +187,7 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
ikey := newIkey(key, i.seq, ktSeek)
|
||||
ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
|
||||
if i.iter.Seek(ikey) {
|
||||
i.dir = dirSOI
|
||||
return i.next()
|
||||
@ -189,15 +199,15 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
|
||||
func (i *dbIter) next() bool {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if seq <= i.seq {
|
||||
switch kt {
|
||||
case ktDel:
|
||||
case keyTypeDel:
|
||||
// Skip deleted key.
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.dir = dirForward
|
||||
case ktVal:
|
||||
case keyTypeVal:
|
||||
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
@ -240,13 +250,13 @@ func (i *dbIter) prev() bool {
|
||||
del := true
|
||||
if i.iter.Valid() {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if seq <= i.seq {
|
||||
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
return true
|
||||
}
|
||||
del = (kt == ktDel)
|
||||
del = (kt == keyTypeDel)
|
||||
if !del {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
@ -282,7 +292,7 @@ func (i *dbIter) Prev() bool {
|
||||
return i.Last()
|
||||
case dirForward:
|
||||
for i.iter.Prev() {
|
||||
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
goto cont
|
||||
@ -13,9 +13,9 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type snapshotElement struct {
|
||||
@ -110,7 +110,7 @@ func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err er
|
||||
err = ErrSnapshotReleased
|
||||
return
|
||||
}
|
||||
return snap.db.get(key, snap.elem.seq, ro)
|
||||
return snap.db.get(nil, nil, key, snap.elem.seq, ro)
|
||||
}
|
||||
|
||||
// Has returns true if the DB does contains the given key.
|
||||
@ -127,10 +127,10 @@ func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error)
|
||||
err = ErrSnapshotReleased
|
||||
return
|
||||
}
|
||||
return snap.db.has(key, snap.elem.seq, ro)
|
||||
return snap.db.has(nil, nil, key, snap.elem.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the snapshot of the uderlying DB.
|
||||
// NewIterator returns an iterator for the snapshot of the underlying DB.
|
||||
// The returned iterator is not goroutine-safe, but it is safe to use
|
||||
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||
// It is also safe to use an iterator concurrently with modifying its
|
||||
@ -158,7 +158,7 @@ func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterat
|
||||
}
|
||||
// Since iterator already hold version ref, it doesn't need to
|
||||
// hold snapshot ref.
|
||||
return snap.db.newIterator(snap.elem.seq, slice, ro)
|
||||
return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro)
|
||||
}
|
||||
|
||||
// Release releases the snapshot. This will not release any returned
|
||||
@ -10,8 +10,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/pingcap/goleveldb/leveldb/journal"
|
||||
"github.com/pingcap/goleveldb/leveldb/memdb"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
type memDB struct {
|
||||
@ -20,6 +21,10 @@ type memDB struct {
|
||||
ref int32
|
||||
}
|
||||
|
||||
func (m *memDB) getref() int32 {
|
||||
return atomic.LoadInt32(&m.ref)
|
||||
}
|
||||
|
||||
func (m *memDB) incref() {
|
||||
atomic.AddInt32(&m.ref, 1)
|
||||
}
|
||||
@ -48,11 +53,15 @@ func (db *DB) addSeq(delta uint64) {
|
||||
atomic.AddUint64(&db.seq, delta)
|
||||
}
|
||||
|
||||
func (db *DB) sampleSeek(ikey iKey) {
|
||||
func (db *DB) setSeq(seq uint64) {
|
||||
atomic.StoreUint64(&db.seq, seq)
|
||||
}
|
||||
|
||||
func (db *DB) sampleSeek(ikey internalKey) {
|
||||
v := db.s.version()
|
||||
if v.sampleSeek(ikey) {
|
||||
// Trigger table compaction.
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
}
|
||||
v.release()
|
||||
}
|
||||
@ -67,12 +76,18 @@ func (db *DB) mpoolPut(mem *memdb.DB) {
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) mpoolGet() *memdb.DB {
|
||||
func (db *DB) mpoolGet(n int) *memDB {
|
||||
var mdb *memdb.DB
|
||||
select {
|
||||
case mem := <-db.memPool:
|
||||
return mem
|
||||
case mdb = <-db.memPool:
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if mdb == nil || mdb.Capacity() < n {
|
||||
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
|
||||
}
|
||||
return &memDB{
|
||||
db: db,
|
||||
DB: mdb,
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,11 +110,10 @@ func (db *DB) mpoolDrain() {
|
||||
// Create new memdb and froze the old one; need external synchronization.
|
||||
// newMem only called synchronously by the writer.
|
||||
func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
num := db.s.allocFileNum()
|
||||
file := db.s.getJournalFile(num)
|
||||
w, err := file.Create()
|
||||
fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
|
||||
w, err := db.s.stor.Create(fd)
|
||||
if err != nil {
|
||||
db.s.reuseFileNum(num)
|
||||
db.s.reuseFileNum(fd.Num)
|
||||
return
|
||||
}
|
||||
|
||||
@ -115,20 +129,14 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
} else {
|
||||
db.journal.Reset(w)
|
||||
db.journalWriter.Close()
|
||||
db.frozenJournalFile = db.journalFile
|
||||
db.frozenJournalFd = db.journalFd
|
||||
}
|
||||
db.journalWriter = w
|
||||
db.journalFile = file
|
||||
db.journalFd = fd
|
||||
db.frozenMem = db.mem
|
||||
mdb := db.mpoolGet()
|
||||
if mdb == nil || mdb.Capacity() < n {
|
||||
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
|
||||
}
|
||||
mem = &memDB{
|
||||
db: db,
|
||||
DB: mdb,
|
||||
ref: 2,
|
||||
}
|
||||
mem = db.mpoolGet(n)
|
||||
mem.incref() // for self
|
||||
mem.incref() // for caller
|
||||
db.mem = mem
|
||||
// The seq only incremented by the writer. And whoever called newMem
|
||||
// should hold write lock, so no need additional synchronization here.
|
||||
@ -181,12 +189,12 @@ func (db *DB) getFrozenMem() *memDB {
|
||||
// Drop frozen memdb; assume that frozen memdb isn't nil.
|
||||
func (db *DB) dropFrozenMem() {
|
||||
db.memMu.Lock()
|
||||
if err := db.frozenJournalFile.Remove(); err != nil {
|
||||
db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
|
||||
if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
|
||||
db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
|
||||
} else {
|
||||
db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
|
||||
db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
|
||||
}
|
||||
db.frozenJournalFile = nil
|
||||
db.frozenJournalFd = storage.FileDesc{}
|
||||
db.frozenMem.decref()
|
||||
db.frozenMem = nil
|
||||
db.memMu.Unlock()
|
||||
2829
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/db_test.go
generated
vendored
Normal file
2829
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/db_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
289
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/db_transaction.go
generated
vendored
Normal file
289
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/db_transaction.go
generated
vendored
Normal file
@ -0,0 +1,289 @@
|
||||
// Copyright (c) 2016, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var errTransactionDone = errors.New("leveldb: transaction already closed")
|
||||
|
||||
// Transaction is the transaction handle.
|
||||
type Transaction struct {
|
||||
db *DB
|
||||
lk sync.RWMutex
|
||||
seq uint64
|
||||
mem *memDB
|
||||
tables tFiles
|
||||
ikScratch []byte
|
||||
rec sessionRecord
|
||||
stats cStatStaging
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns ErrNotFound if the
|
||||
// DB does not contains the key.
|
||||
//
|
||||
// The returned slice is its own copy, it is safe to modify the contents
|
||||
// of the returned slice.
|
||||
// It is safe to modify the contents of the argument after Get returns.
|
||||
func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) {
|
||||
tr.lk.RLock()
|
||||
defer tr.lk.RUnlock()
|
||||
if tr.closed {
|
||||
return nil, errTransactionDone
|
||||
}
|
||||
return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro)
|
||||
}
|
||||
|
||||
// Has returns true if the DB does contains the given key.
|
||||
//
|
||||
// It is safe to modify the contents of the argument after Has returns.
|
||||
func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) {
|
||||
tr.lk.RLock()
|
||||
defer tr.lk.RUnlock()
|
||||
if tr.closed {
|
||||
return false, errTransactionDone
|
||||
}
|
||||
return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the latest snapshot of the transaction.
|
||||
// The returned iterator is not goroutine-safe, but it is safe to use multiple
|
||||
// iterators concurrently, with each in a dedicated goroutine.
|
||||
// It is also safe to use an iterator concurrently while writes to the
|
||||
// transaction. The resultant key/value pairs are guaranteed to be consistent.
|
||||
//
|
||||
// Slice allows slicing the iterator to only contains keys in the given
|
||||
// range. A nil Range.Start is treated as a key before all keys in the
|
||||
// DB. And a nil Range.Limit is treated as a key after all keys in
|
||||
// the DB.
|
||||
//
|
||||
// The iterator must be released after use, by calling Release method.
|
||||
//
|
||||
// Also read Iterator documentation of the leveldb/iterator package.
|
||||
func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
tr.lk.RLock()
|
||||
defer tr.lk.RUnlock()
|
||||
if tr.closed {
|
||||
return iterator.NewEmptyIterator(errTransactionDone)
|
||||
}
|
||||
tr.mem.incref()
|
||||
return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro)
|
||||
}
|
||||
|
||||
func (tr *Transaction) flush() error {
|
||||
// Flush memdb.
|
||||
if tr.mem.Len() != 0 {
|
||||
tr.stats.startTimer()
|
||||
iter := tr.mem.NewIterator(nil)
|
||||
t, n, err := tr.db.s.tops.createFrom(iter)
|
||||
iter.Release()
|
||||
tr.stats.stopTimer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tr.mem.getref() == 1 {
|
||||
tr.mem.Reset()
|
||||
} else {
|
||||
tr.mem.decref()
|
||||
tr.mem = tr.db.mpoolGet(0)
|
||||
tr.mem.incref()
|
||||
}
|
||||
tr.tables = append(tr.tables, t)
|
||||
tr.rec.addTableFile(0, t)
|
||||
tr.stats.write += t.size
|
||||
tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *Transaction) put(kt keyType, key, value []byte) error {
|
||||
tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
|
||||
if tr.mem.Free() < len(tr.ikScratch)+len(value) {
|
||||
if err := tr.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tr.mem.Put(tr.ikScratch, value); err != nil {
|
||||
return err
|
||||
}
|
||||
tr.seq++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put sets the value for the given key. It overwrites any previous value
|
||||
// for that key; a DB is not a multi-map.
|
||||
// Please note that the transaction is not compacted until committed, so if you
|
||||
// writes 10 same keys, then those 10 same keys are in the transaction.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Put returns.
|
||||
func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error {
|
||||
tr.lk.Lock()
|
||||
defer tr.lk.Unlock()
|
||||
if tr.closed {
|
||||
return errTransactionDone
|
||||
}
|
||||
return tr.put(keyTypeVal, key, value)
|
||||
}
|
||||
|
||||
// Delete deletes the value for the given key.
|
||||
// Please note that the transaction is not compacted until committed, so if you
|
||||
// writes 10 same keys, then those 10 same keys are in the transaction.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Delete returns.
|
||||
func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error {
|
||||
tr.lk.Lock()
|
||||
defer tr.lk.Unlock()
|
||||
if tr.closed {
|
||||
return errTransactionDone
|
||||
}
|
||||
return tr.put(keyTypeDel, key, nil)
|
||||
}
|
||||
|
||||
// Write apply the given batch to the transaction. The batch will be applied
|
||||
// sequentially.
|
||||
// Please note that the transaction is not compacted until committed, so if you
|
||||
// writes 10 same keys, then those 10 same keys are in the transaction.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Write returns.
|
||||
func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error {
|
||||
if b == nil || b.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tr.lk.Lock()
|
||||
defer tr.lk.Unlock()
|
||||
if tr.closed {
|
||||
return errTransactionDone
|
||||
}
|
||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
||||
return tr.put(kt, key, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (tr *Transaction) setDone() {
|
||||
tr.closed = true
|
||||
tr.db.tr = nil
|
||||
tr.mem.decref()
|
||||
<-tr.db.writeLockC
|
||||
}
|
||||
|
||||
// Commit commits the transaction.
|
||||
//
|
||||
// Other methods should not be called after transaction has been committed.
|
||||
func (tr *Transaction) Commit() error {
|
||||
if err := tr.db.ok(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr.lk.Lock()
|
||||
defer tr.lk.Unlock()
|
||||
if tr.closed {
|
||||
return errTransactionDone
|
||||
}
|
||||
defer tr.setDone()
|
||||
if err := tr.flush(); err != nil {
|
||||
tr.discard()
|
||||
return err
|
||||
}
|
||||
if len(tr.tables) != 0 {
|
||||
// Committing transaction.
|
||||
tr.rec.setSeqNum(tr.seq)
|
||||
tr.db.compCommitLk.Lock()
|
||||
defer tr.db.compCommitLk.Unlock()
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
if err := tr.db.s.commit(&tr.rec); err != nil {
|
||||
tr.db.logf("transaction@commit error R·%d %q", retry, err)
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case _, _ = <-tr.db.closeC:
|
||||
tr.db.logf("transaction@commit exiting")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Success. Set db.seq.
|
||||
tr.db.setSeq(tr.seq)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Trigger table auto-compaction.
|
||||
tr.db.compTrigger(tr.db.tcompCmdC)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *Transaction) discard() {
|
||||
// Discard transaction.
|
||||
for _, t := range tr.tables {
|
||||
tr.db.logf("transaction@discard @%d", t.fd.Num)
|
||||
if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil {
|
||||
tr.db.s.reuseFileNum(t.fd.Num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Discard discards the transaction.
|
||||
//
|
||||
// Other methods should not be called after transaction has been discarded.
|
||||
func (tr *Transaction) Discard() {
|
||||
tr.lk.Lock()
|
||||
if !tr.closed {
|
||||
tr.discard()
|
||||
tr.setDone()
|
||||
}
|
||||
tr.lk.Unlock()
|
||||
}
|
||||
|
||||
// OpenTransaction opens an atomic DB transaction. Only one transaction can be
|
||||
// opened at a time. Write will be blocked until the transaction is committed or
|
||||
// discarded.
|
||||
// The returned transaction handle is goroutine-safe.
|
||||
//
|
||||
// The transaction must be closed once done, either by committing or discarding
|
||||
// the transaction.
|
||||
// Closing the DB will discard open transaction.
|
||||
func (db *DB) OpenTransaction() (*Transaction, error) {
|
||||
if err := db.ok(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The write happen synchronously.
|
||||
select {
|
||||
case db.writeLockC <- struct{}{}:
|
||||
case err := <-db.compPerErrC:
|
||||
return nil, err
|
||||
case _, _ = <-db.closeC:
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
if db.tr != nil {
|
||||
panic("leveldb: has open transaction")
|
||||
}
|
||||
|
||||
// Flush current memdb.
|
||||
if db.mem != nil && db.mem.Len() != 0 {
|
||||
if _, err := db.rotateMem(0, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tr := &Transaction{
|
||||
db: db,
|
||||
seq: db.seq,
|
||||
mem: db.mpoolGet(0),
|
||||
}
|
||||
tr.mem.incref()
|
||||
db.tr = tr
|
||||
return tr, nil
|
||||
}
|
||||
102
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/db_util.go
generated
vendored
Normal file
102
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/db_util.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// Reader is the interface that wraps basic Get and NewIterator methods.
|
||||
// This interface implemented by both DB and Snapshot.
|
||||
type Reader interface {
|
||||
Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
|
||||
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
|
||||
}
|
||||
|
||||
// Sizes is list of size.
|
||||
type Sizes []int64
|
||||
|
||||
// Sum returns sum of the sizes.
|
||||
func (sizes Sizes) Sum() int64 {
|
||||
var sum int64
|
||||
for _, size := range sizes {
|
||||
sum += size
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// Logging.
|
||||
func (db *DB) log(v ...interface{}) { db.s.log(v...) }
|
||||
func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
|
||||
|
||||
// Check and clean files.
|
||||
func (db *DB) checkAndCleanFiles() error {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
|
||||
tmap := make(map[int64]bool)
|
||||
for _, tables := range v.levels {
|
||||
for _, t := range tables {
|
||||
tmap[t.fd.Num] = false
|
||||
}
|
||||
}
|
||||
|
||||
fds, err := db.s.stor.List(storage.TypeAll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nt int
|
||||
var rem []storage.FileDesc
|
||||
for _, fd := range fds {
|
||||
keep := true
|
||||
switch fd.Type {
|
||||
case storage.TypeManifest:
|
||||
keep = fd.Num >= db.s.manifestFd.Num
|
||||
case storage.TypeJournal:
|
||||
if !db.frozenJournalFd.Nil() {
|
||||
keep = fd.Num >= db.frozenJournalFd.Num
|
||||
} else {
|
||||
keep = fd.Num >= db.journalFd.Num
|
||||
}
|
||||
case storage.TypeTable:
|
||||
_, keep = tmap[fd.Num]
|
||||
if keep {
|
||||
tmap[fd.Num] = true
|
||||
nt++
|
||||
}
|
||||
}
|
||||
|
||||
if !keep {
|
||||
rem = append(rem, fd)
|
||||
}
|
||||
}
|
||||
|
||||
if nt != len(tmap) {
|
||||
var mfds []storage.FileDesc
|
||||
for num, present := range tmap {
|
||||
if !present {
|
||||
mfds = append(mfds, storage.FileDesc{storage.TypeTable, num})
|
||||
db.logf("db@janitor table missing @%d", num)
|
||||
}
|
||||
}
|
||||
return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds})
|
||||
}
|
||||
|
||||
db.logf("db@janitor F·%d G·%d", len(fds), len(rem))
|
||||
for _, fd := range rem {
|
||||
db.logf("db@janitor removing %s-%d", fd.Type, fd.Num)
|
||||
if err := db.s.stor.Remove(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -9,9 +9,9 @@ package leveldb
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/memdb"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func (db *DB) writeJournal(b *Batch) error {
|
||||
@ -45,9 +45,9 @@ func (db *DB) jWriter() {
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) rotateMem(n int) (mem *memDB, err error) {
|
||||
func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) {
|
||||
// Wait for pending memdb compaction.
|
||||
err = db.compSendIdle(db.mcompCmdC)
|
||||
err = db.compTriggerWait(db.mcompCmdC)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -59,7 +59,11 @@ func (db *DB) rotateMem(n int) (mem *memDB, err error) {
|
||||
}
|
||||
|
||||
// Schedule memdb compaction.
|
||||
db.compSendTrigger(db.mcompCmdC)
|
||||
if wait {
|
||||
err = db.compTriggerWait(db.mcompCmdC)
|
||||
} else {
|
||||
db.compTrigger(db.mcompCmdC)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -84,7 +88,7 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
|
||||
return false
|
||||
case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
|
||||
delayed = true
|
||||
err = db.compSendIdle(db.tcompCmdC)
|
||||
err = db.compTriggerWait(db.tcompCmdC)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@ -94,7 +98,7 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
|
||||
mdbFree = n
|
||||
} else {
|
||||
mdb.decref()
|
||||
mdb, err = db.rotateMem(n)
|
||||
mdb, err = db.rotateMem(n, false)
|
||||
if err == nil {
|
||||
mdbFree = mdb.Free()
|
||||
} else {
|
||||
@ -131,12 +135,27 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
|
||||
|
||||
b.init(wo.GetSync() && !db.s.o.GetNoSync())
|
||||
|
||||
if b.size() > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
|
||||
// Writes using transaction.
|
||||
tr, err1 := db.OpenTransaction()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
if err1 := tr.Write(b, wo); err1 != nil {
|
||||
tr.Discard()
|
||||
return err1
|
||||
}
|
||||
return tr.Commit()
|
||||
}
|
||||
|
||||
// The write happen synchronously.
|
||||
select {
|
||||
case db.writeC <- b:
|
||||
if <-db.writeMergedC {
|
||||
return <-db.writeAckC
|
||||
}
|
||||
// Continue, the write lock already acquired by previous writer
|
||||
// and handed out to us.
|
||||
case db.writeLockC <- struct{}{}:
|
||||
case err = <-db.compPerErrC:
|
||||
return
|
||||
@ -147,14 +166,15 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
|
||||
merged := 0
|
||||
danglingMerge := false
|
||||
defer func() {
|
||||
for i := 0; i < merged; i++ {
|
||||
db.writeAckC <- err
|
||||
}
|
||||
if danglingMerge {
|
||||
// Only one dangling merge at most, so this is safe.
|
||||
db.writeMergedC <- false
|
||||
} else {
|
||||
<-db.writeLockC
|
||||
}
|
||||
for i := 0; i < merged; i++ {
|
||||
db.writeAckC <- err
|
||||
}
|
||||
}()
|
||||
|
||||
mdb, mdbFree, err := db.flush(b.size())
|
||||
@ -234,7 +254,7 @@ drain:
|
||||
db.addSeq(uint64(b.Len()))
|
||||
|
||||
if b.size() >= mdbFree {
|
||||
db.rotateMem(0)
|
||||
db.rotateMem(0, false)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -261,8 +281,8 @@ func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
|
||||
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
|
||||
iter := mem.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
|
||||
(min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
|
||||
return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
|
||||
(min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
|
||||
}
|
||||
|
||||
// CompactRange compacts the underlying DB for the given key range.
|
||||
@ -293,12 +313,12 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
defer mdb.decref()
|
||||
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
|
||||
// Memdb compaction.
|
||||
if _, err := db.rotateMem(0); err != nil {
|
||||
if _, err := db.rotateMem(0, false); err != nil {
|
||||
<-db.writeLockC
|
||||
return err
|
||||
}
|
||||
<-db.writeLockC
|
||||
if err := db.compSendIdle(db.mcompCmdC); err != nil {
|
||||
if err := db.compTriggerWait(db.mcompCmdC); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@ -306,7 +326,7 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
}
|
||||
|
||||
// Table compaction.
|
||||
return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
|
||||
return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit)
|
||||
}
|
||||
|
||||
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
|
||||
@ -7,7 +7,7 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -11,8 +11,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -29,21 +29,21 @@ func New(text string) error {
|
||||
// ErrCorrupted is the type that wraps errors that indicate corruption in
|
||||
// the database.
|
||||
type ErrCorrupted struct {
|
||||
File *storage.FileInfo
|
||||
Err error
|
||||
Fd storage.FileDesc
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ErrCorrupted) Error() string {
|
||||
if e.File != nil {
|
||||
return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
|
||||
if !e.Fd.Nil() {
|
||||
return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
|
||||
} else {
|
||||
return e.Err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
// NewErrCorrupted creates new ErrCorrupted error.
|
||||
func NewErrCorrupted(f storage.File, err error) error {
|
||||
return &ErrCorrupted{storage.NewFileInfo(f), err}
|
||||
func NewErrCorrupted(fd storage.FileDesc, err error) error {
|
||||
return &ErrCorrupted{fd, err}
|
||||
}
|
||||
|
||||
// IsCorrupted returns a boolean indicating whether the error is indicating
|
||||
@ -61,17 +61,17 @@ func IsCorrupted(err error) bool {
|
||||
// ErrMissingFiles is the type that indicating a corruption due to missing
|
||||
// files. ErrMissingFiles always wrapped with ErrCorrupted.
|
||||
type ErrMissingFiles struct {
|
||||
Files []*storage.FileInfo
|
||||
Fds []storage.FileDesc
|
||||
}
|
||||
|
||||
func (e *ErrMissingFiles) Error() string { return "file missing" }
|
||||
|
||||
// SetFile sets 'file info' of the given error with the given file.
|
||||
// SetFd sets 'file info' of the given error with the given file.
|
||||
// Currently only ErrCorrupted is supported, otherwise will do nothing.
|
||||
func SetFile(err error, f storage.File) error {
|
||||
func SetFd(err error, fd storage.FileDesc) error {
|
||||
switch x := err.(type) {
|
||||
case *ErrCorrupted:
|
||||
x.File = storage.NewFileInfo(f)
|
||||
x.Fd = fd
|
||||
return x
|
||||
}
|
||||
return err
|
||||
117
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/external_test.go
generated
vendored
Normal file
117
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/external_test.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Leveldb external", func() {
|
||||
o := &opt.Options{
|
||||
DisableBlockCache: true,
|
||||
BlockRestartInterval: 5,
|
||||
BlockSize: 80,
|
||||
Compression: opt.NoCompression,
|
||||
OpenFilesCacheCapacity: -1,
|
||||
Strict: opt.StrictAll,
|
||||
WriteBuffer: 1000,
|
||||
CompactionTableSize: 2000,
|
||||
}
|
||||
|
||||
Describe("write test", func() {
|
||||
It("should do write correctly", func(done Done) {
|
||||
db := newTestingDB(o, nil, nil)
|
||||
t := testutil.DBTesting{
|
||||
DB: db,
|
||||
Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(),
|
||||
}
|
||||
testutil.DoDBTesting(&t)
|
||||
db.TestClose()
|
||||
done <- true
|
||||
}, 20.0)
|
||||
})
|
||||
|
||||
Describe("read test", func() {
|
||||
testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
|
||||
// Building the DB.
|
||||
db := newTestingDB(o, nil, nil)
|
||||
kv.IterateShuffled(nil, func(i int, key, value []byte) {
|
||||
err := db.TestPut(key, value)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
return db
|
||||
}, func(db testutil.DB) {
|
||||
db.(*testingDB).TestClose()
|
||||
})
|
||||
})
|
||||
|
||||
Describe("transaction test", func() {
|
||||
It("should do transaction correctly", func(done Done) {
|
||||
db := newTestingDB(o, nil, nil)
|
||||
|
||||
By("creating first transaction")
|
||||
var err error
|
||||
tr := &testingTransaction{}
|
||||
tr.Transaction, err = db.OpenTransaction()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
t0 := &testutil.DBTesting{
|
||||
DB: tr,
|
||||
Deleted: testutil.KeyValue_Generate(nil, 200, 1, 50, 5, 5).Clone(),
|
||||
}
|
||||
testutil.DoDBTesting(t0)
|
||||
testutil.TestGet(tr, t0.Present)
|
||||
testutil.TestHas(tr, t0.Present)
|
||||
|
||||
By("committing first transaction")
|
||||
err = tr.Commit()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
testutil.TestIter(db, nil, t0.Present)
|
||||
testutil.TestGet(db, t0.Present)
|
||||
testutil.TestHas(db, t0.Present)
|
||||
|
||||
By("manipulating DB without transaction")
|
||||
t0.DB = db
|
||||
testutil.DoDBTesting(t0)
|
||||
|
||||
By("creating second transaction")
|
||||
tr.Transaction, err = db.OpenTransaction()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
t1 := &testutil.DBTesting{
|
||||
DB: tr,
|
||||
Deleted: t0.Deleted.Clone(),
|
||||
Present: t0.Present.Clone(),
|
||||
}
|
||||
testutil.DoDBTesting(t1)
|
||||
testutil.TestIter(db, nil, t0.Present)
|
||||
|
||||
By("discarding second transaction")
|
||||
tr.Discard()
|
||||
testutil.TestIter(db, nil, t0.Present)
|
||||
|
||||
By("creating third transaction")
|
||||
tr.Transaction, err = db.OpenTransaction()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
t0.DB = tr
|
||||
testutil.DoDBTesting(t0)
|
||||
|
||||
By("committing third transaction")
|
||||
err = tr.Commit()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
testutil.TestIter(db, nil, t0.Present)
|
||||
|
||||
db.TestClose()
|
||||
done <- true
|
||||
}, 30.0)
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -7,7 +7,7 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/pingcap/goleveldb/leveldb/filter"
|
||||
)
|
||||
|
||||
type iFilter struct {
|
||||
@ -15,7 +15,7 @@ type iFilter struct {
|
||||
}
|
||||
|
||||
func (f iFilter) Contains(filter, key []byte) bool {
|
||||
return f.Filter.Contains(filter, iKey(key).ukey())
|
||||
return f.Filter.Contains(filter, internalKey(key).ukey())
|
||||
}
|
||||
|
||||
func (f iFilter) NewGenerator() filter.FilterGenerator {
|
||||
@ -27,5 +27,5 @@ type iFilterGenerator struct {
|
||||
}
|
||||
|
||||
func (g iFilterGenerator) Add(key []byte) {
|
||||
g.FilterGenerator.Add(iKey(key).ukey())
|
||||
g.FilterGenerator.Add(internalKey(key).ukey())
|
||||
}
|
||||
@ -7,7 +7,7 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func bloomHash(key []byte) uint32 {
|
||||
142
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/filter/bloom_test.go
generated
vendored
Normal file
142
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/filter/bloom_test.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package filter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type harness struct {
|
||||
t *testing.T
|
||||
|
||||
bloom Filter
|
||||
generator FilterGenerator
|
||||
filter []byte
|
||||
}
|
||||
|
||||
func newHarness(t *testing.T) *harness {
|
||||
bloom := NewBloomFilter(10)
|
||||
return &harness{
|
||||
t: t,
|
||||
bloom: bloom,
|
||||
generator: bloom.NewGenerator(),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *harness) add(key []byte) {
|
||||
h.generator.Add(key)
|
||||
}
|
||||
|
||||
func (h *harness) addNum(key uint32) {
|
||||
var b [4]byte
|
||||
binary.LittleEndian.PutUint32(b[:], key)
|
||||
h.add(b[:])
|
||||
}
|
||||
|
||||
func (h *harness) build() {
|
||||
b := &util.Buffer{}
|
||||
h.generator.Generate(b)
|
||||
h.filter = b.Bytes()
|
||||
}
|
||||
|
||||
func (h *harness) reset() {
|
||||
h.filter = nil
|
||||
}
|
||||
|
||||
func (h *harness) filterLen() int {
|
||||
return len(h.filter)
|
||||
}
|
||||
|
||||
func (h *harness) assert(key []byte, want, silent bool) bool {
|
||||
got := h.bloom.Contains(h.filter, key)
|
||||
if !silent && got != want {
|
||||
h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want)
|
||||
}
|
||||
return got
|
||||
}
|
||||
|
||||
func (h *harness) assertNum(key uint32, want, silent bool) bool {
|
||||
var b [4]byte
|
||||
binary.LittleEndian.PutUint32(b[:], key)
|
||||
return h.assert(b[:], want, silent)
|
||||
}
|
||||
|
||||
func TestBloomFilter_Empty(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.build()
|
||||
h.assert([]byte("hello"), false, false)
|
||||
h.assert([]byte("world"), false, false)
|
||||
}
|
||||
|
||||
func TestBloomFilter_Small(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
h.add([]byte("hello"))
|
||||
h.add([]byte("world"))
|
||||
h.build()
|
||||
h.assert([]byte("hello"), true, false)
|
||||
h.assert([]byte("world"), true, false)
|
||||
h.assert([]byte("x"), false, false)
|
||||
h.assert([]byte("foo"), false, false)
|
||||
}
|
||||
|
||||
func nextN(n int) int {
|
||||
switch {
|
||||
case n < 10:
|
||||
n += 1
|
||||
case n < 100:
|
||||
n += 10
|
||||
case n < 1000:
|
||||
n += 100
|
||||
default:
|
||||
n += 1000
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func TestBloomFilter_VaryingLengths(t *testing.T) {
|
||||
h := newHarness(t)
|
||||
var mediocre, good int
|
||||
for n := 1; n < 10000; n = nextN(n) {
|
||||
h.reset()
|
||||
for i := 0; i < n; i++ {
|
||||
h.addNum(uint32(i))
|
||||
}
|
||||
h.build()
|
||||
|
||||
got := h.filterLen()
|
||||
want := (n * 10 / 8) + 40
|
||||
if got > want {
|
||||
t.Errorf("filter len test failed, '%d' > '%d'", got, want)
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
h.assertNum(uint32(i), true, false)
|
||||
}
|
||||
|
||||
var rate float32
|
||||
for i := 0; i < 10000; i++ {
|
||||
if h.assertNum(uint32(i+1000000000), true, true) {
|
||||
rate++
|
||||
}
|
||||
}
|
||||
rate /= 10000
|
||||
if rate > 0.02 {
|
||||
t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n)
|
||||
}
|
||||
if rate > 0.0125 {
|
||||
mediocre++
|
||||
} else {
|
||||
good++
|
||||
}
|
||||
}
|
||||
t.Logf("false positive rate: %d good, %d mediocre", good, mediocre)
|
||||
if mediocre > good/5 {
|
||||
t.Error("mediocre false positive rate is more than expected")
|
||||
}
|
||||
}
|
||||
@ -7,7 +7,7 @@
|
||||
package iterator
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// BasicArray is the interface that wraps basic Len and Search method.
|
||||
30
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/array_iter_test.go
generated
vendored
Normal file
30
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/array_iter_test.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package iterator_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
. "github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Array iterator", func() {
|
||||
It("Should iterates and seeks correctly", func() {
|
||||
// Build key/value.
|
||||
kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3)
|
||||
|
||||
// Test the iterator.
|
||||
t := testutil.IteratorTesting{
|
||||
KeyValue: kv.Clone(),
|
||||
Iter: NewArrayIterator(kv),
|
||||
}
|
||||
testutil.DoIteratorTesting(&t)
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -7,8 +7,8 @@
|
||||
package iterator
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// IteratorIndexer is the interface that wraps CommonIterator and basic Get
|
||||
83
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/indexed_iter_test.go
generated
vendored
Normal file
83
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/indexed_iter_test.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package iterator_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
. "github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
type keyValue struct {
|
||||
key []byte
|
||||
testutil.KeyValue
|
||||
}
|
||||
|
||||
type keyValueIndex []keyValue
|
||||
|
||||
func (x keyValueIndex) Search(key []byte) int {
|
||||
return sort.Search(x.Len(), func(i int) bool {
|
||||
return comparer.DefaultComparer.Compare(x[i].key, key) >= 0
|
||||
})
|
||||
}
|
||||
|
||||
func (x keyValueIndex) Len() int { return len(x) }
|
||||
func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil }
|
||||
func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) }
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Indexed iterator", func() {
|
||||
Test := func(n ...int) func() {
|
||||
if len(n) == 0 {
|
||||
rnd := testutil.NewRand()
|
||||
n = make([]int, rnd.Intn(17)+3)
|
||||
for i := range n {
|
||||
n[i] = rnd.Intn(19) + 1
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
It("Should iterates and seeks correctly", func(done Done) {
|
||||
// Build key/value.
|
||||
index := make(keyValueIndex, len(n))
|
||||
sum := 0
|
||||
for _, x := range n {
|
||||
sum += x
|
||||
}
|
||||
kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4)
|
||||
for i, j := 0, 0; i < len(n); i++ {
|
||||
for x := n[i]; x > 0; x-- {
|
||||
key, value := kv.Index(j)
|
||||
index[i].key = key
|
||||
index[i].Put(key, value)
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
// Test the iterator.
|
||||
t := testutil.IteratorTesting{
|
||||
KeyValue: kv.Clone(),
|
||||
Iter: NewIndexedIterator(NewArrayIndexer(index), true),
|
||||
}
|
||||
testutil.DoIteratorTesting(&t)
|
||||
done <- true
|
||||
}, 1.5)
|
||||
}
|
||||
}
|
||||
|
||||
Describe("with 100 keys", Test(100))
|
||||
Describe("with 50-50 keys", Test(50, 50))
|
||||
Describe("with 50-1 keys", Test(50, 1))
|
||||
Describe("with 50-1-50 keys", Test(50, 1, 50))
|
||||
Describe("with 1-50 keys", Test(1, 50))
|
||||
Describe("with random N-keys", Test())
|
||||
})
|
||||
})
|
||||
@ -11,7 +11,7 @@ package iterator
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/iter_suite_test.go
generated
vendored
Normal file
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/iter_suite_test.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package iterator_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
testutil.RunSuite(t, "Iterator Suite")
|
||||
}
|
||||
@ -7,9 +7,9 @@
|
||||
package iterator
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type dir int
|
||||
60
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/merged_iter_test.go
generated
vendored
Normal file
60
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/iterator/merged_iter_test.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package iterator_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
. "github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Merged iterator", func() {
|
||||
Test := func(filled int, empty int) func() {
|
||||
return func() {
|
||||
It("Should iterates and seeks correctly", func(done Done) {
|
||||
rnd := testutil.NewRand()
|
||||
|
||||
// Build key/value.
|
||||
filledKV := make([]testutil.KeyValue, filled)
|
||||
kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4)
|
||||
kv.Iterate(func(i int, key, value []byte) {
|
||||
filledKV[rnd.Intn(filled)].Put(key, value)
|
||||
})
|
||||
|
||||
// Create itearators.
|
||||
iters := make([]Iterator, filled+empty)
|
||||
for i := range iters {
|
||||
if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) {
|
||||
filled--
|
||||
Expect(filledKV[filled].Len()).ShouldNot(BeZero())
|
||||
iters[i] = NewArrayIterator(filledKV[filled])
|
||||
} else {
|
||||
empty--
|
||||
iters[i] = NewEmptyIterator(nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Test the iterator.
|
||||
t := testutil.IteratorTesting{
|
||||
KeyValue: kv.Clone(),
|
||||
Iter: NewMergedIterator(iters, comparer.DefaultComparer, true),
|
||||
}
|
||||
testutil.DoIteratorTesting(&t)
|
||||
done <- true
|
||||
}, 1.5)
|
||||
}
|
||||
}
|
||||
|
||||
Describe("with three, all filled iterators", Test(3, 0))
|
||||
Describe("with one filled, one empty iterators", Test(1, 1))
|
||||
Describe("with one filled, two empty iterators", Test(1, 2))
|
||||
})
|
||||
})
|
||||
@ -82,8 +82,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// These constants are part of the wire format and should not be changed.
|
||||
@ -165,7 +166,7 @@ func (r *Reader) corrupt(n int, reason string, skip bool) error {
|
||||
r.dropper.Drop(&ErrCorrupted{n, reason})
|
||||
}
|
||||
if r.strict && !skip {
|
||||
r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason})
|
||||
r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason})
|
||||
return r.err
|
||||
}
|
||||
return errSkip
|
||||
818
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/journal/journal_test.go
generated
vendored
Normal file
818
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/journal/journal_test.go
generated
vendored
Normal file
@ -0,0 +1,818 @@
|
||||
// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135
|
||||
// License, authors and contributors informations can be found at bellow URLs respectively:
|
||||
// https://code.google.com/p/leveldb-go/source/browse/LICENSE
|
||||
// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
|
||||
// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
|
||||
|
||||
package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type dropper struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (d dropper) Drop(err error) {
|
||||
d.t.Log(err)
|
||||
}
|
||||
|
||||
func short(s string) string {
|
||||
if len(s) < 64 {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:])
|
||||
}
|
||||
|
||||
// big returns a string of length n, composed of repetitions of partial.
|
||||
func big(partial string, n int) string {
|
||||
return strings.Repeat(partial, n/len(partial)+1)[:n]
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
r := NewReader(buf, dropper{t}, true, true)
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("got %v, want %v", err, io.EOF)
|
||||
}
|
||||
}
|
||||
|
||||
func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
reset()
|
||||
w := NewWriter(buf)
|
||||
for {
|
||||
s, ok := gen()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write([]byte(s)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reset()
|
||||
r := NewReader(buf, dropper{t}, true, true)
|
||||
for {
|
||||
s, ok := gen()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
x, err := ioutil.ReadAll(rr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(x) != s {
|
||||
t.Fatalf("got %q, want %q", short(string(x)), short(s))
|
||||
}
|
||||
}
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("got %v, want %v", err, io.EOF)
|
||||
}
|
||||
}
|
||||
|
||||
func testLiterals(t *testing.T, s []string) {
|
||||
var i int
|
||||
reset := func() {
|
||||
i = 0
|
||||
}
|
||||
gen := func() (string, bool) {
|
||||
if i == len(s) {
|
||||
return "", false
|
||||
}
|
||||
i++
|
||||
return s[i-1], true
|
||||
}
|
||||
testGenerator(t, reset, gen)
|
||||
}
|
||||
|
||||
func TestMany(t *testing.T) {
|
||||
const n = 1e5
|
||||
var i int
|
||||
reset := func() {
|
||||
i = 0
|
||||
}
|
||||
gen := func() (string, bool) {
|
||||
if i == n {
|
||||
return "", false
|
||||
}
|
||||
i++
|
||||
return fmt.Sprintf("%d.", i-1), true
|
||||
}
|
||||
testGenerator(t, reset, gen)
|
||||
}
|
||||
|
||||
func TestRandom(t *testing.T) {
|
||||
const n = 1e2
|
||||
var (
|
||||
i int
|
||||
r *rand.Rand
|
||||
)
|
||||
reset := func() {
|
||||
i, r = 0, rand.New(rand.NewSource(0))
|
||||
}
|
||||
gen := func() (string, bool) {
|
||||
if i == n {
|
||||
return "", false
|
||||
}
|
||||
i++
|
||||
return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true
|
||||
}
|
||||
testGenerator(t, reset, gen)
|
||||
}
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
testLiterals(t, []string{
|
||||
strings.Repeat("a", 1000),
|
||||
strings.Repeat("b", 97270),
|
||||
strings.Repeat("c", 8000),
|
||||
})
|
||||
}
|
||||
|
||||
func TestBoundary(t *testing.T) {
|
||||
for i := blockSize - 16; i < blockSize+16; i++ {
|
||||
s0 := big("abcd", i)
|
||||
for j := blockSize - 16; j < blockSize+16; j++ {
|
||||
s1 := big("ABCDE", j)
|
||||
testLiterals(t, []string{s0, s1})
|
||||
testLiterals(t, []string{s0, "", s1})
|
||||
testLiterals(t, []string{s0, "x", s1})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlush(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
// Write a couple of records. Everything should still be held
|
||||
// in the record.Writer buffer, so that buf.Len should be 0.
|
||||
w0, _ := w.Next()
|
||||
w0.Write([]byte("0"))
|
||||
w1, _ := w.Next()
|
||||
w1.Write([]byte("11"))
|
||||
if got, want := buf.Len(), 0; got != want {
|
||||
t.Fatalf("buffer length #0: got %d want %d", got, want)
|
||||
}
|
||||
// Flush the record.Writer buffer, which should yield 17 bytes.
|
||||
// 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes.
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := buf.Len(), 17; got != want {
|
||||
t.Fatalf("buffer length #1: got %d want %d", got, want)
|
||||
}
|
||||
// Do another write, one that isn't large enough to complete the block.
|
||||
// The write should not have flowed through to buf.
|
||||
w2, _ := w.Next()
|
||||
w2.Write(bytes.Repeat([]byte("2"), 10000))
|
||||
if got, want := buf.Len(), 17; got != want {
|
||||
t.Fatalf("buffer length #2: got %d want %d", got, want)
|
||||
}
|
||||
// Flushing should get us up to 10024 bytes written.
|
||||
// 10024 = 17 + 7 + 10000.
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := buf.Len(), 10024; got != want {
|
||||
t.Fatalf("buffer length #3: got %d want %d", got, want)
|
||||
}
|
||||
// Do a bigger write, one that completes the current block.
|
||||
// We should now have 32768 bytes (a complete block), without
|
||||
// an explicit flush.
|
||||
w3, _ := w.Next()
|
||||
w3.Write(bytes.Repeat([]byte("3"), 40000))
|
||||
if got, want := buf.Len(), 32768; got != want {
|
||||
t.Fatalf("buffer length #4: got %d want %d", got, want)
|
||||
}
|
||||
// Flushing should get us up to 50038 bytes written.
|
||||
// 50038 = 10024 + 2*7 + 40000. There are two headers because
|
||||
// the one record was split into two chunks.
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := buf.Len(), 50038; got != want {
|
||||
t.Fatalf("buffer length #5: got %d want %d", got, want)
|
||||
}
|
||||
// Check that reading those records give the right lengths.
|
||||
r := NewReader(buf, dropper{t}, true, true)
|
||||
wants := []int64{1, 2, 10000, 40000}
|
||||
for i, want := range wants {
|
||||
rr, _ := r.Next()
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #%d: %v", i, err)
|
||||
}
|
||||
if n != want {
|
||||
t.Fatalf("read #%d: got %d bytes want %d", i, n, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonExhaustiveRead(t *testing.T) {
|
||||
const n = 100
|
||||
buf := new(bytes.Buffer)
|
||||
p := make([]byte, 10)
|
||||
rnd := rand.New(rand.NewSource(1))
|
||||
|
||||
w := NewWriter(buf)
|
||||
for i := 0; i < n; i++ {
|
||||
length := len(p) + rnd.Intn(3*blockSize)
|
||||
s := string(uint8(i)) + "123456789abcdefgh"
|
||||
ww, _ := w.Next()
|
||||
ww.Write([]byte(big(s, length)))
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := NewReader(buf, dropper{t}, true, true)
|
||||
for i := 0; i < n; i++ {
|
||||
rr, _ := r.Next()
|
||||
_, err := io.ReadFull(rr, p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := string(uint8(i)) + "123456789"
|
||||
if got := string(p); got != want {
|
||||
t.Fatalf("read #%d: got %q want %q", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStaleReader(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
w0, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w0.Write([]byte("0"))
|
||||
w1, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w1.Write([]byte("11"))
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := NewReader(buf, dropper{t}, true, true)
|
||||
r0, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r1, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p := make([]byte, 1)
|
||||
if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") {
|
||||
t.Fatalf("stale read #0: unexpected error: %v", err)
|
||||
}
|
||||
if _, err := r1.Read(p); err != nil {
|
||||
t.Fatalf("fresh read #1: got %v want nil error", err)
|
||||
}
|
||||
if p[0] != '1' {
|
||||
t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStaleWriter(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
w0, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w1, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
|
||||
t.Fatalf("stale write #0: unexpected error: %v", err)
|
||||
}
|
||||
if _, err := w1.Write([]byte("11")); err != nil {
|
||||
t.Fatalf("fresh write #1: got %v want nil error", err)
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatalf("flush: %v", err)
|
||||
}
|
||||
if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") {
|
||||
t.Fatalf("stale write #1: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_MissingLastBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Cut the last block.
|
||||
b := buf.Bytes()[:blockSize]
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read.
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if n != blockSize-1024 {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
|
||||
}
|
||||
|
||||
// Second read.
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #0.
|
||||
for i := 0; i < 1024; i++ {
|
||||
b[i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (third record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 2; n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #1.
|
||||
for i := 0; i < 1024; i++ {
|
||||
b[blockSize+i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #2: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 2; n != want {
|
||||
t.Fatalf("read #2: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedLastBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #3.
|
||||
for i := len(b) - 1; i > len(b)-1024; i-- {
|
||||
b[i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize - headerSize); n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Third read (third record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #2: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #2: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Fourth read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting record #1.
|
||||
x := blockSize
|
||||
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting record #1.
|
||||
x := blockSize/2 + headerSize
|
||||
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (third record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
147
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/key.go
generated
vendored
Normal file
147
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/key.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
// ErrInternalKeyCorrupted records internal key corruption.
|
||||
type ErrInternalKeyCorrupted struct {
|
||||
Ikey []byte
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrInternalKeyCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason)
|
||||
}
|
||||
|
||||
func newErrInternalKeyCorrupted(ikey []byte, reason string) error {
|
||||
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason})
|
||||
}
|
||||
|
||||
type keyType uint
|
||||
|
||||
func (kt keyType) String() string {
|
||||
switch kt {
|
||||
case keyTypeDel:
|
||||
return "d"
|
||||
case keyTypeVal:
|
||||
return "v"
|
||||
}
|
||||
return "x"
|
||||
}
|
||||
|
||||
// Value types encoded as the last component of internal keys.
|
||||
// Don't modify; this value are saved to disk.
|
||||
const (
|
||||
keyTypeDel keyType = iota
|
||||
keyTypeVal
|
||||
)
|
||||
|
||||
// keyTypeSeek defines the keyType that should be passed when constructing an
|
||||
// internal key for seeking to a particular sequence number (since we
|
||||
// sort sequence numbers in decreasing order and the value type is
|
||||
// embedded as the low 8 bits in the sequence number in internal keys,
|
||||
// we need to use the highest-numbered ValueType, not the lowest).
|
||||
const keyTypeSeek = keyTypeVal
|
||||
|
||||
const (
|
||||
// Maximum value possible for sequence number; the 8-bits are
|
||||
// used by value type, so its can packed together in single
|
||||
// 64-bit integer.
|
||||
keyMaxSeq = (uint64(1) << 56) - 1
|
||||
// Maximum value possible for packed sequence number and type.
|
||||
keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek)
|
||||
)
|
||||
|
||||
// Maximum number encoded in bytes.
|
||||
var keyMaxNumBytes = make([]byte, 8)
|
||||
|
||||
func init() {
|
||||
binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum)
|
||||
}
|
||||
|
||||
type internalKey []byte
|
||||
|
||||
func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey {
|
||||
if seq > keyMaxSeq {
|
||||
panic("leveldb: invalid sequence number")
|
||||
} else if kt > keyTypeVal {
|
||||
panic("leveldb: invalid type")
|
||||
}
|
||||
|
||||
if n := len(ukey) + 8; cap(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
} else {
|
||||
dst = dst[:n]
|
||||
}
|
||||
copy(dst, ukey)
|
||||
binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt))
|
||||
return internalKey(dst)
|
||||
}
|
||||
|
||||
func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) {
|
||||
if len(ik) < 8 {
|
||||
return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length")
|
||||
}
|
||||
num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
|
||||
seq, kt = uint64(num>>8), keyType(num&0xff)
|
||||
if kt > keyTypeVal {
|
||||
return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type")
|
||||
}
|
||||
ukey = ik[:len(ik)-8]
|
||||
return
|
||||
}
|
||||
|
||||
func validInternalKey(ik []byte) bool {
|
||||
_, _, _, err := parseInternalKey(ik)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (ik internalKey) assert() {
|
||||
if ik == nil {
|
||||
panic("leveldb: nil internalKey")
|
||||
}
|
||||
if len(ik) < 8 {
|
||||
panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik)))
|
||||
}
|
||||
}
|
||||
|
||||
func (ik internalKey) ukey() []byte {
|
||||
ik.assert()
|
||||
return ik[:len(ik)-8]
|
||||
}
|
||||
|
||||
func (ik internalKey) num() uint64 {
|
||||
ik.assert()
|
||||
return binary.LittleEndian.Uint64(ik[len(ik)-8:])
|
||||
}
|
||||
|
||||
func (ik internalKey) parseNum() (seq uint64, kt keyType) {
|
||||
num := ik.num()
|
||||
seq, kt = uint64(num>>8), keyType(num&0xff)
|
||||
if kt > keyTypeVal {
|
||||
panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ik internalKey) String() string {
|
||||
if ik == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
if ukey, seq, kt, err := parseInternalKey(ik); err == nil {
|
||||
return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
|
||||
}
|
||||
return "<invalid>"
|
||||
}
|
||||
133
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/key_test.go
generated
vendored
Normal file
133
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/key_test.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
)
|
||||
|
||||
var defaultIComparer = &iComparer{comparer.DefaultComparer}
|
||||
|
||||
func ikey(key string, seq uint64, kt keyType) internalKey {
|
||||
return makeInternalKey(nil, []byte(key), uint64(seq), kt)
|
||||
}
|
||||
|
||||
func shortSep(a, b []byte) []byte {
|
||||
dst := make([]byte, len(a))
|
||||
dst = defaultIComparer.Separator(dst[:0], a, b)
|
||||
if dst == nil {
|
||||
return a
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func shortSuccessor(b []byte) []byte {
|
||||
dst := make([]byte, len(b))
|
||||
dst = defaultIComparer.Successor(dst[:0], b)
|
||||
if dst == nil {
|
||||
return b
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func testSingleKey(t *testing.T, key string, seq uint64, kt keyType) {
|
||||
ik := ikey(key, seq, kt)
|
||||
|
||||
if !bytes.Equal(ik.ukey(), []byte(key)) {
|
||||
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
|
||||
}
|
||||
|
||||
rseq, rt := ik.parseNum()
|
||||
if rseq != seq {
|
||||
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
|
||||
}
|
||||
if rt != kt {
|
||||
t.Errorf("type does not equal, got %v, want %v", rt, kt)
|
||||
}
|
||||
|
||||
if rukey, rseq, rt, kerr := parseInternalKey(ik); kerr == nil {
|
||||
if !bytes.Equal(rukey, []byte(key)) {
|
||||
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
|
||||
}
|
||||
if rseq != seq {
|
||||
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
|
||||
}
|
||||
if rt != kt {
|
||||
t.Errorf("type does not equal, got %v, want %v", rt, kt)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("key error: %v", kerr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalKey_EncodeDecode(t *testing.T) {
|
||||
keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
|
||||
seqs := []uint64{
|
||||
1, 2, 3,
|
||||
(1 << 8) - 1, 1 << 8, (1 << 8) + 1,
|
||||
(1 << 16) - 1, 1 << 16, (1 << 16) + 1,
|
||||
(1 << 32) - 1, 1 << 32, (1 << 32) + 1,
|
||||
}
|
||||
for _, key := range keys {
|
||||
for _, seq := range seqs {
|
||||
testSingleKey(t, key, seq, keyTypeVal)
|
||||
testSingleKey(t, "hello", 1, keyTypeDel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBytes(t *testing.T, want, got []byte) {
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("assert failed, got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternalKeyShortSeparator(t *testing.T) {
|
||||
// When user keys are same
|
||||
assertBytes(t, ikey("foo", 100, keyTypeVal),
|
||||
shortSep(ikey("foo", 100, keyTypeVal),
|
||||
ikey("foo", 99, keyTypeVal)))
|
||||
assertBytes(t, ikey("foo", 100, keyTypeVal),
|
||||
shortSep(ikey("foo", 100, keyTypeVal),
|
||||
ikey("foo", 101, keyTypeVal)))
|
||||
assertBytes(t, ikey("foo", 100, keyTypeVal),
|
||||
shortSep(ikey("foo", 100, keyTypeVal),
|
||||
ikey("foo", 100, keyTypeVal)))
|
||||
assertBytes(t, ikey("foo", 100, keyTypeVal),
|
||||
shortSep(ikey("foo", 100, keyTypeVal),
|
||||
ikey("foo", 100, keyTypeDel)))
|
||||
|
||||
// When user keys are misordered
|
||||
assertBytes(t, ikey("foo", 100, keyTypeVal),
|
||||
shortSep(ikey("foo", 100, keyTypeVal),
|
||||
ikey("bar", 99, keyTypeVal)))
|
||||
|
||||
// When user keys are different, but correctly ordered
|
||||
assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek),
|
||||
shortSep(ikey("foo", 100, keyTypeVal),
|
||||
ikey("hello", 200, keyTypeVal)))
|
||||
|
||||
// When start user key is prefix of limit user key
|
||||
assertBytes(t, ikey("foo", 100, keyTypeVal),
|
||||
shortSep(ikey("foo", 100, keyTypeVal),
|
||||
ikey("foobar", 200, keyTypeVal)))
|
||||
|
||||
// When limit user key is prefix of start user key
|
||||
assertBytes(t, ikey("foobar", 100, keyTypeVal),
|
||||
shortSep(ikey("foobar", 100, keyTypeVal),
|
||||
ikey("foo", 200, keyTypeVal)))
|
||||
}
|
||||
|
||||
func TestInternalKeyShortestSuccessor(t *testing.T) {
|
||||
assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek),
|
||||
shortSuccessor(ikey("foo", 100, keyTypeVal)))
|
||||
assertBytes(t, ikey("\xff\xff", 100, keyTypeVal),
|
||||
shortSuccessor(ikey("\xff\xff", 100, keyTypeVal)))
|
||||
}
|
||||
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/leveldb_suite_test.go
generated
vendored
Normal file
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/leveldb_suite_test.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
func TestLevelDB(t *testing.T) {
|
||||
testutil.RunSuite(t, "LevelDB Suite")
|
||||
}
|
||||
75
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/memdb/bench_test.go
generated
vendored
Normal file
75
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/memdb/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package memdb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
)
|
||||
|
||||
func BenchmarkPut(b *testing.B) {
|
||||
buf := make([][4]byte, b.N)
|
||||
for i := range buf {
|
||||
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
p := New(comparer.DefaultComparer, 0)
|
||||
for i := range buf {
|
||||
p.Put(buf[i][:], nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPutRandom(b *testing.B) {
|
||||
buf := make([][4]byte, b.N)
|
||||
for i := range buf {
|
||||
binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int()))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
p := New(comparer.DefaultComparer, 0)
|
||||
for i := range buf {
|
||||
p.Put(buf[i][:], nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGet(b *testing.B) {
|
||||
buf := make([][4]byte, b.N)
|
||||
for i := range buf {
|
||||
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
|
||||
}
|
||||
|
||||
p := New(comparer.DefaultComparer, 0)
|
||||
for i := range buf {
|
||||
p.Put(buf[i][:], nil)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := range buf {
|
||||
p.Get(buf[i][:])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetRandom(b *testing.B) {
|
||||
buf := make([][4]byte, b.N)
|
||||
for i := range buf {
|
||||
binary.LittleEndian.PutUint32(buf[i][:], uint32(i))
|
||||
}
|
||||
|
||||
p := New(comparer.DefaultComparer, 0)
|
||||
for i := range buf {
|
||||
p.Put(buf[i][:], nil)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
p.Get(buf[rand.Int()%b.N][:])
|
||||
}
|
||||
}
|
||||
@ -11,10 +11,10 @@ import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -436,7 +436,6 @@ func (p *DB) Len() int {
|
||||
// Reset resets the DB to initial empty state. Allows reuse the buffer.
|
||||
func (p *DB) Reset() {
|
||||
p.mu.Lock()
|
||||
p.rnd = rand.New(rand.NewSource(0xdeadbeef))
|
||||
p.maxHeight = 1
|
||||
p.n = 0
|
||||
p.kvSize = 0
|
||||
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/memdb/memdb_suite_test.go
generated
vendored
Normal file
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/memdb/memdb_suite_test.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package memdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
func TestMemDB(t *testing.T) {
|
||||
testutil.RunSuite(t, "MemDB Suite")
|
||||
}
|
||||
135
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/memdb/memdb_test.go
generated
vendored
Normal file
135
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/memdb/memdb_test.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package memdb
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) {
|
||||
p.mu.RLock()
|
||||
if node := p.findLT(key); node != 0 {
|
||||
n := p.nodeData[node]
|
||||
m := n + p.nodeData[node+nKey]
|
||||
rkey = p.kvData[n:m]
|
||||
value = p.kvData[m : m+p.nodeData[node+nVal]]
|
||||
} else {
|
||||
err = ErrNotFound
|
||||
}
|
||||
p.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *DB) TestFindLast() (rkey, value []byte, err error) {
|
||||
p.mu.RLock()
|
||||
if node := p.findLast(); node != 0 {
|
||||
n := p.nodeData[node]
|
||||
m := n + p.nodeData[node+nKey]
|
||||
rkey = p.kvData[n:m]
|
||||
value = p.kvData[m : m+p.nodeData[node+nVal]]
|
||||
} else {
|
||||
err = ErrNotFound
|
||||
}
|
||||
p.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *DB) TestPut(key []byte, value []byte) error {
|
||||
p.Put(key, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *DB) TestDelete(key []byte) error {
|
||||
p.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) {
|
||||
return p.Find(key)
|
||||
}
|
||||
|
||||
func (p *DB) TestGet(key []byte) (value []byte, err error) {
|
||||
return p.Get(key)
|
||||
}
|
||||
|
||||
func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return p.NewIterator(slice)
|
||||
}
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Memdb", func() {
|
||||
Describe("write test", func() {
|
||||
It("should do write correctly", func() {
|
||||
db := New(comparer.DefaultComparer, 0)
|
||||
t := testutil.DBTesting{
|
||||
DB: db,
|
||||
Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(),
|
||||
PostFn: func(t *testutil.DBTesting) {
|
||||
Expect(db.Len()).Should(Equal(t.Present.Len()))
|
||||
Expect(db.Size()).Should(Equal(t.Present.Size()))
|
||||
switch t.Act {
|
||||
case testutil.DBPut, testutil.DBOverwrite:
|
||||
Expect(db.Contains(t.ActKey)).Should(BeTrue())
|
||||
default:
|
||||
Expect(db.Contains(t.ActKey)).Should(BeFalse())
|
||||
}
|
||||
},
|
||||
}
|
||||
testutil.DoDBTesting(&t)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("read test", func() {
|
||||
testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB {
|
||||
// Building the DB.
|
||||
db := New(comparer.DefaultComparer, 0)
|
||||
kv.IterateShuffled(nil, func(i int, key, value []byte) {
|
||||
db.Put(key, value)
|
||||
})
|
||||
|
||||
if kv.Len() > 1 {
|
||||
It("Should find correct keys with findLT", func() {
|
||||
testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) {
|
||||
key_, key, _ := kv.IndexInexact(i + 1)
|
||||
expectedKey, expectedValue := kv.Index(i)
|
||||
|
||||
// Using key that exist.
|
||||
rkey, rvalue, err := db.TestFindLT(key)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey)
|
||||
Expect(rkey).Should(Equal(expectedKey), "Key")
|
||||
Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey)
|
||||
|
||||
// Using key that doesn't exist.
|
||||
rkey, rvalue, err = db.TestFindLT(key_)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey)
|
||||
Expect(rkey).Should(Equal(expectedKey))
|
||||
Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
if kv.Len() > 0 {
|
||||
It("Should find last key with findLast", func() {
|
||||
key, value := kv.Index(kv.Len() - 1)
|
||||
rkey, rvalue, err := db.TestFindLast()
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(rkey).Should(Equal(key))
|
||||
Expect(rvalue).Should(Equal(value))
|
||||
})
|
||||
}
|
||||
|
||||
return db
|
||||
}, nil, nil)
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -8,10 +8,11 @@
|
||||
package opt
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"math"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/cache"
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/filter"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -35,8 +36,6 @@ var (
|
||||
DefaultCompactionTotalSizeMultiplier = 10.0
|
||||
DefaultCompressionType = SnappyCompression
|
||||
DefaultIteratorSamplingRate = 1 * MiB
|
||||
DefaultMaxMemCompationLevel = 2
|
||||
DefaultNumLevel = 7
|
||||
DefaultOpenFilesCacher = LRUCacher
|
||||
DefaultOpenFilesCacheCapacity = 500
|
||||
DefaultWriteBuffer = 4 * MiB
|
||||
@ -266,6 +265,13 @@ type Options struct {
|
||||
// The default value is false.
|
||||
DisableCompactionBackoff bool
|
||||
|
||||
// DisableLargeBatchTransaction allows disabling switch-to-transaction mode
|
||||
// on large batch write. If enable batch writes large than WriteBuffer will
|
||||
// use transaction.
|
||||
//
|
||||
// The default is false.
|
||||
DisableLargeBatchTransaction bool
|
||||
|
||||
// ErrorIfExist defines whether an error should returned if the DB already
|
||||
// exist.
|
||||
//
|
||||
@ -301,24 +307,11 @@ type Options struct {
|
||||
// The default is 1MiB.
|
||||
IteratorSamplingRate int
|
||||
|
||||
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
|
||||
// will be pushed into if doesn't creates overlap. This should less than
|
||||
// NumLevel. Use -1 for level-0.
|
||||
//
|
||||
// The default is 2.
|
||||
MaxMemCompationLevel int
|
||||
|
||||
// NoSync allows completely disable fsync.
|
||||
//
|
||||
// The default is false.
|
||||
NoSync bool
|
||||
|
||||
// NumLevel defines number of database level. The level shouldn't changed
|
||||
// between opens, or the database will panic.
|
||||
//
|
||||
// The default is 7.
|
||||
NumLevel int
|
||||
|
||||
// OpenFilesCacher provides cache algorithm for open files caching.
|
||||
// Specify NoCacher to disable caching algorithm.
|
||||
//
|
||||
@ -440,7 +433,7 @@ func (o *Options) GetCompactionTableSize(level int) int {
|
||||
if o.CompactionTableSize > 0 {
|
||||
base = o.CompactionTableSize
|
||||
}
|
||||
if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
|
||||
if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
|
||||
mult = o.CompactionTableSizeMultiplierPerLevel[level]
|
||||
} else if o.CompactionTableSizeMultiplier > 0 {
|
||||
mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
|
||||
@ -461,7 +454,7 @@ func (o *Options) GetCompactionTotalSize(level int) int64 {
|
||||
if o.CompactionTotalSize > 0 {
|
||||
base = o.CompactionTotalSize
|
||||
}
|
||||
if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
|
||||
if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
|
||||
mult = o.CompactionTotalSizeMultiplierPerLevel[level]
|
||||
} else if o.CompactionTotalSizeMultiplier > 0 {
|
||||
mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
|
||||
@ -508,6 +501,13 @@ func (o *Options) GetDisableCompactionBackoff() bool {
|
||||
return o.DisableCompactionBackoff
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableLargeBatchTransaction() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.DisableLargeBatchTransaction
|
||||
}
|
||||
|
||||
func (o *Options) GetErrorIfExist() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
@ -536,21 +536,6 @@ func (o *Options) GetIteratorSamplingRate() int {
|
||||
return o.IteratorSamplingRate
|
||||
}
|
||||
|
||||
func (o *Options) GetMaxMemCompationLevel() int {
|
||||
level := DefaultMaxMemCompationLevel
|
||||
if o != nil {
|
||||
if o.MaxMemCompationLevel > 0 {
|
||||
level = o.MaxMemCompationLevel
|
||||
} else if o.MaxMemCompationLevel < 0 {
|
||||
level = 0
|
||||
}
|
||||
}
|
||||
if level >= o.GetNumLevel() {
|
||||
return o.GetNumLevel() - 1
|
||||
}
|
||||
return level
|
||||
}
|
||||
|
||||
func (o *Options) GetNoSync() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
@ -558,13 +543,6 @@ func (o *Options) GetNoSync() bool {
|
||||
return o.NoSync
|
||||
}
|
||||
|
||||
func (o *Options) GetNumLevel() int {
|
||||
if o == nil || o.NumLevel <= 0 {
|
||||
return DefaultNumLevel
|
||||
}
|
||||
return o.NumLevel
|
||||
}
|
||||
|
||||
func (o *Options) GetOpenFilesCacher() Cacher {
|
||||
if o == nil || o.OpenFilesCacher == nil {
|
||||
return DefaultOpenFilesCacher
|
||||
@ -7,8 +7,8 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/filter"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func dupOptions(o *opt.Options) *opt.Options {
|
||||
@ -43,6 +43,8 @@ func (s *session) setOptions(o *opt.Options) {
|
||||
s.o.cache()
|
||||
}
|
||||
|
||||
const optCachedLevel = 7
|
||||
|
||||
type cachedOptions struct {
|
||||
*opt.Options
|
||||
|
||||
@ -54,15 +56,13 @@ type cachedOptions struct {
|
||||
}
|
||||
|
||||
func (co *cachedOptions) cache() {
|
||||
numLevel := co.Options.GetNumLevel()
|
||||
co.compactionExpandLimit = make([]int, optCachedLevel)
|
||||
co.compactionGPOverlaps = make([]int, optCachedLevel)
|
||||
co.compactionSourceLimit = make([]int, optCachedLevel)
|
||||
co.compactionTableSize = make([]int, optCachedLevel)
|
||||
co.compactionTotalSize = make([]int64, optCachedLevel)
|
||||
|
||||
co.compactionExpandLimit = make([]int, numLevel)
|
||||
co.compactionGPOverlaps = make([]int, numLevel)
|
||||
co.compactionSourceLimit = make([]int, numLevel)
|
||||
co.compactionTableSize = make([]int, numLevel)
|
||||
co.compactionTotalSize = make([]int64, numLevel)
|
||||
|
||||
for level := 0; level < numLevel; level++ {
|
||||
for level := 0; level < optCachedLevel; level++ {
|
||||
co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
|
||||
co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
|
||||
co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
|
||||
@ -72,21 +72,36 @@ func (co *cachedOptions) cache() {
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
|
||||
return co.compactionExpandLimit[level]
|
||||
if level < optCachedLevel {
|
||||
return co.compactionExpandLimit[level]
|
||||
}
|
||||
return co.Options.GetCompactionExpandLimit(level)
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
|
||||
return co.compactionGPOverlaps[level]
|
||||
if level < optCachedLevel {
|
||||
return co.compactionGPOverlaps[level]
|
||||
}
|
||||
return co.Options.GetCompactionGPOverlaps(level)
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
|
||||
return co.compactionSourceLimit[level]
|
||||
if level < optCachedLevel {
|
||||
return co.compactionSourceLimit[level]
|
||||
}
|
||||
return co.Options.GetCompactionSourceLimit(level)
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionTableSize(level int) int {
|
||||
return co.compactionTableSize[level]
|
||||
if level < optCachedLevel {
|
||||
return co.compactionTableSize[level]
|
||||
}
|
||||
return co.Options.GetCompactionTableSize(level)
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
|
||||
return co.compactionTotalSize[level]
|
||||
if level < optCachedLevel {
|
||||
return co.compactionTotalSize[level]
|
||||
}
|
||||
return co.Options.GetCompactionTotalSize(level)
|
||||
}
|
||||
@ -12,13 +12,13 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/journal"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
// ErrManifestCorrupted records manifest corruption.
|
||||
type ErrManifestCorrupted struct {
|
||||
Field string
|
||||
Reason string
|
||||
@ -28,31 +28,31 @@ func (e *ErrManifestCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
|
||||
}
|
||||
|
||||
func newErrManifestCorrupted(f storage.File, field, reason string) error {
|
||||
return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
|
||||
func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error {
|
||||
return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason})
|
||||
}
|
||||
|
||||
// session represent a persistent database session.
|
||||
type session struct {
|
||||
// Need 64-bit alignment.
|
||||
stNextFileNum uint64 // current unused file number
|
||||
stJournalNum uint64 // current journal file number; need external synchronization
|
||||
stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
|
||||
stNextFileNum int64 // current unused file number
|
||||
stJournalNum int64 // current journal file number; need external synchronization
|
||||
stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb
|
||||
stTempFileNum int64
|
||||
stSeqNum uint64 // last mem compacted seq; need external synchronization
|
||||
stTempFileNum uint64
|
||||
|
||||
stor storage.Storage
|
||||
storLock util.Releaser
|
||||
storLock storage.Lock
|
||||
o *cachedOptions
|
||||
icmp *iComparer
|
||||
tops *tOps
|
||||
|
||||
manifest *journal.Writer
|
||||
manifestWriter storage.Writer
|
||||
manifestFile storage.File
|
||||
manifestFd storage.FileDesc
|
||||
|
||||
stCompPtrs []iKey // compaction pointers; need external synchronization
|
||||
stVersion *version // current version
|
||||
stCompPtrs []internalKey // compaction pointers; need external synchronization
|
||||
stVersion *version // current version
|
||||
vmu sync.Mutex
|
||||
}
|
||||
|
||||
@ -66,9 +66,8 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
||||
return
|
||||
}
|
||||
s = &session{
|
||||
stor: stor,
|
||||
storLock: storLock,
|
||||
stCompPtrs: make([]iKey, o.GetNumLevel()),
|
||||
stor: stor,
|
||||
storLock: storLock,
|
||||
}
|
||||
s.setOptions(o)
|
||||
s.tops = newTableOps(s)
|
||||
@ -88,7 +87,6 @@ func (s *session) close() {
|
||||
}
|
||||
s.manifest = nil
|
||||
s.manifestWriter = nil
|
||||
s.manifestFile = nil
|
||||
s.stVersion = nil
|
||||
}
|
||||
|
||||
@ -109,18 +107,18 @@ func (s *session) recover() (err error) {
|
||||
if os.IsNotExist(err) {
|
||||
// Don't return os.ErrNotExist if the underlying storage contains
|
||||
// other files that belong to LevelDB. So the DB won't get trashed.
|
||||
if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
|
||||
err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
|
||||
if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 {
|
||||
err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
m, err := s.stor.GetManifest()
|
||||
fd, err := s.stor.GetMeta()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
reader, err := m.Open()
|
||||
reader, err := s.stor.Open(fd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -128,10 +126,9 @@ func (s *session) recover() (err error) {
|
||||
|
||||
var (
|
||||
// Options.
|
||||
numLevel = s.o.GetNumLevel()
|
||||
strict = s.o.GetStrict(opt.StrictManifest)
|
||||
strict = s.o.GetStrict(opt.StrictManifest)
|
||||
|
||||
jr = journal.NewReader(reader, dropper{s, m}, strict, true)
|
||||
jr = journal.NewReader(reader, dropper{s, fd}, strict, true)
|
||||
rec = &sessionRecord{}
|
||||
staging = s.stVersion.newStaging()
|
||||
)
|
||||
@ -143,24 +140,23 @@ func (s *session) recover() (err error) {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
return errors.SetFile(err, m)
|
||||
return errors.SetFd(err, fd)
|
||||
}
|
||||
|
||||
err = rec.decode(r, numLevel)
|
||||
err = rec.decode(r)
|
||||
if err == nil {
|
||||
// save compact pointers
|
||||
for _, r := range rec.compPtrs {
|
||||
s.stCompPtrs[r.level] = iKey(r.ikey)
|
||||
s.setCompPtr(r.level, internalKey(r.ikey))
|
||||
}
|
||||
// commit record to version staging
|
||||
staging.commit(rec)
|
||||
} else {
|
||||
err = errors.SetFile(err, m)
|
||||
err = errors.SetFd(err, fd)
|
||||
if strict || !errors.IsCorrupted(err) {
|
||||
return
|
||||
} else {
|
||||
s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
|
||||
}
|
||||
s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
|
||||
}
|
||||
rec.resetCompPtrs()
|
||||
rec.resetAddedTables()
|
||||
@ -169,18 +165,18 @@ func (s *session) recover() (err error) {
|
||||
|
||||
switch {
|
||||
case !rec.has(recComparer):
|
||||
return newErrManifestCorrupted(m, "comparer", "missing")
|
||||
return newErrManifestCorrupted(fd, "comparer", "missing")
|
||||
case rec.comparer != s.icmp.uName():
|
||||
return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
|
||||
return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
|
||||
case !rec.has(recNextFileNum):
|
||||
return newErrManifestCorrupted(m, "next-file-num", "missing")
|
||||
return newErrManifestCorrupted(fd, "next-file-num", "missing")
|
||||
case !rec.has(recJournalNum):
|
||||
return newErrManifestCorrupted(m, "journal-file-num", "missing")
|
||||
return newErrManifestCorrupted(fd, "journal-file-num", "missing")
|
||||
case !rec.has(recSeqNum):
|
||||
return newErrManifestCorrupted(m, "seq-num", "missing")
|
||||
return newErrManifestCorrupted(fd, "seq-num", "missing")
|
||||
}
|
||||
|
||||
s.manifestFile = m
|
||||
s.manifestFd = fd
|
||||
s.setVersion(staging.finish())
|
||||
s.setNextFileNum(rec.nextFileNum)
|
||||
s.recordCommited(rec)
|
||||
@ -9,46 +9,51 @@ package leveldb
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/memdb"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func (s *session) pickMemdbLevel(umin, umax []byte) int {
|
||||
func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int {
|
||||
v := s.version()
|
||||
defer v.release()
|
||||
return v.pickMemdbLevel(umin, umax)
|
||||
return v.pickMemdbLevel(umin, umax, maxLevel)
|
||||
}
|
||||
|
||||
func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
|
||||
func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) {
|
||||
// Create sorted table.
|
||||
iter := mdb.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
t, n, err := s.tops.createFrom(iter)
|
||||
if err != nil {
|
||||
return level, err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Pick level and add to record.
|
||||
if level < 0 {
|
||||
level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
|
||||
}
|
||||
rec.addTableFile(level, t)
|
||||
// Pick level other than zero can cause compaction issue with large
|
||||
// bulk insert and delete on strictly incrementing key-space. The
|
||||
// problem is that the small deletion markers trapped at lower level,
|
||||
// while key/value entries keep growing at higher level. Since the
|
||||
// key-space is strictly incrementing it will not overlaps with
|
||||
// higher level, thus maximum possible level is always picked, while
|
||||
// overlapping deletion marker pushed into lower level.
|
||||
// See: https://github.com/pingcap/goleveldb/issues/127.
|
||||
flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel)
|
||||
rec.addTableFile(flushLevel, t)
|
||||
|
||||
s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
return level, nil
|
||||
s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
return flushLevel, nil
|
||||
}
|
||||
|
||||
// Pick a compaction based on current state; need external synchronization.
|
||||
func (s *session) pickCompaction() *compaction {
|
||||
v := s.version()
|
||||
|
||||
var level int
|
||||
var sourceLevel int
|
||||
var t0 tFiles
|
||||
if v.cScore >= 1 {
|
||||
level = v.cLevel
|
||||
cptr := s.stCompPtrs[level]
|
||||
tables := v.tables[level]
|
||||
sourceLevel = v.cLevel
|
||||
cptr := s.getCompPtr(sourceLevel)
|
||||
tables := v.levels[sourceLevel]
|
||||
for _, t := range tables {
|
||||
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
|
||||
t0 = append(t0, t)
|
||||
@ -61,7 +66,7 @@ func (s *session) pickCompaction() *compaction {
|
||||
} else {
|
||||
if p := atomic.LoadPointer(&v.cSeek); p != nil {
|
||||
ts := (*tSet)(p)
|
||||
level = ts.level
|
||||
sourceLevel = ts.level
|
||||
t0 = append(t0, ts.table)
|
||||
} else {
|
||||
v.release()
|
||||
@ -69,14 +74,19 @@ func (s *session) pickCompaction() *compaction {
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
return newCompaction(s, v, sourceLevel, t0)
|
||||
}
|
||||
|
||||
// Create compaction from given level and range; need external synchronization.
|
||||
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction {
|
||||
v := s.version()
|
||||
|
||||
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
|
||||
if sourceLevel >= len(v.levels) {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
|
||||
t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0)
|
||||
if len(t0) == 0 {
|
||||
v.release()
|
||||
return nil
|
||||
@ -86,9 +96,9 @@ func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
// But we cannot do this for level-0 since level-0 files can overlap
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if level > 0 {
|
||||
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
|
||||
total := uint64(0)
|
||||
if !noLimit && sourceLevel > 0 {
|
||||
limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel))
|
||||
total := int64(0)
|
||||
for i, t := range t0 {
|
||||
total += t.size
|
||||
if total >= limit {
|
||||
@ -99,17 +109,17 @@ func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
return newCompaction(s, v, sourceLevel, t0)
|
||||
}
|
||||
|
||||
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
|
||||
func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction {
|
||||
c := &compaction{
|
||||
s: s,
|
||||
v: v,
|
||||
level: level,
|
||||
tables: [2]tFiles{t0, nil},
|
||||
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
|
||||
tPtrs: make([]int, s.o.GetNumLevel()),
|
||||
sourceLevel: sourceLevel,
|
||||
levels: [2]tFiles{t0, nil},
|
||||
maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)),
|
||||
tPtrs: make([]int, len(v.levels)),
|
||||
}
|
||||
c.expand()
|
||||
c.save()
|
||||
@ -121,21 +131,21 @@ type compaction struct {
|
||||
s *session
|
||||
v *version
|
||||
|
||||
level int
|
||||
tables [2]tFiles
|
||||
maxGPOverlaps uint64
|
||||
sourceLevel int
|
||||
levels [2]tFiles
|
||||
maxGPOverlaps int64
|
||||
|
||||
gp tFiles
|
||||
gpi int
|
||||
seenKey bool
|
||||
gpOverlappedBytes uint64
|
||||
imin, imax iKey
|
||||
gpOverlappedBytes int64
|
||||
imin, imax internalKey
|
||||
tPtrs []int
|
||||
released bool
|
||||
|
||||
snapGPI int
|
||||
snapSeenKey bool
|
||||
snapGPOverlappedBytes uint64
|
||||
snapGPOverlappedBytes int64
|
||||
snapTPtrs []int
|
||||
}
|
||||
|
||||
@ -162,30 +172,34 @@ func (c *compaction) release() {
|
||||
|
||||
// Expand compacted tables; need external synchronization.
|
||||
func (c *compaction) expand() {
|
||||
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
|
||||
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
|
||||
limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel))
|
||||
vt0 := c.v.levels[c.sourceLevel]
|
||||
vt1 := tFiles{}
|
||||
if level := c.sourceLevel + 1; level < len(c.v.levels) {
|
||||
vt1 = c.v.levels[level]
|
||||
}
|
||||
|
||||
t0, t1 := c.tables[0], c.tables[1]
|
||||
t0, t1 := c.levels[0], c.levels[1]
|
||||
imin, imax := t0.getRange(c.s.icmp)
|
||||
// We expand t0 here just incase ukey hop across tables.
|
||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
|
||||
if len(t0) != len(c.tables[0]) {
|
||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0)
|
||||
if len(t0) != len(c.levels[0]) {
|
||||
imin, imax = t0.getRange(c.s.icmp)
|
||||
}
|
||||
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
|
||||
// Get entire range covered by compaction.
|
||||
amin, amax := append(t0, t1...).getRange(c.s.icmp)
|
||||
|
||||
// See if we can grow the number of inputs in "level" without
|
||||
// changing the number of "level+1" files we pick up.
|
||||
// See if we can grow the number of inputs in "sourceLevel" without
|
||||
// changing the number of "sourceLevel+1" files we pick up.
|
||||
if len(t1) > 0 {
|
||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
|
||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
|
||||
xmin, xmax := exp0.getRange(c.s.icmp)
|
||||
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
|
||||
if len(exp1) == len(t1) {
|
||||
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
||||
imin, imax = xmin, xmax
|
||||
t0, t1 = exp0, exp1
|
||||
@ -195,22 +209,23 @@ func (c *compaction) expand() {
|
||||
}
|
||||
|
||||
// Compute the set of grandparent files that overlap this compaction
|
||||
// (parent == level+1; grandparent == level+2)
|
||||
if c.level+2 < c.s.o.GetNumLevel() {
|
||||
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
||||
// (parent == sourceLevel+1; grandparent == sourceLevel+2)
|
||||
if level := c.sourceLevel + 2; level < len(c.v.levels) {
|
||||
c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
||||
}
|
||||
|
||||
c.tables[0], c.tables[1] = t0, t1
|
||||
c.levels[0], c.levels[1] = t0, t1
|
||||
c.imin, c.imax = imin, imax
|
||||
}
|
||||
|
||||
// Check whether compaction is trivial.
|
||||
func (c *compaction) trivial() bool {
|
||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
||||
return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
||||
}
|
||||
|
||||
func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
||||
for level, tables := range c.v.tables[c.level+2:] {
|
||||
for level := c.sourceLevel + 2; level < len(c.v.levels); level++ {
|
||||
tables := c.v.levels[level]
|
||||
for c.tPtrs[level] < len(tables) {
|
||||
t := tables[c.tPtrs[level]]
|
||||
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
|
||||
@ -227,7 +242,7 @@ func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
||||
func (c *compaction) shouldStopBefore(ikey internalKey) bool {
|
||||
for ; c.gpi < len(c.gp); c.gpi++ {
|
||||
gp := c.gp[c.gpi]
|
||||
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
|
||||
@ -250,10 +265,10 @@ func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
||||
// Creates an iterator.
|
||||
func (c *compaction) newIterator() iterator.Iterator {
|
||||
// Creates iterator slice.
|
||||
icap := len(c.tables)
|
||||
if c.level == 0 {
|
||||
icap := len(c.levels)
|
||||
if c.sourceLevel == 0 {
|
||||
// Special case for level-0.
|
||||
icap = len(c.tables[0]) + 1
|
||||
icap = len(c.levels[0]) + 1
|
||||
}
|
||||
its := make([]iterator.Iterator, 0, icap)
|
||||
|
||||
@ -267,13 +282,13 @@ func (c *compaction) newIterator() iterator.Iterator {
|
||||
ro.Strict |= opt.StrictReader
|
||||
}
|
||||
|
||||
for i, tables := range c.tables {
|
||||
for i, tables := range c.levels {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Level-0 is not sorted and may overlaps each other.
|
||||
if c.level+i == 0 {
|
||||
if c.sourceLevel+i == 0 {
|
||||
for _, t := range tables {
|
||||
its = append(its, c.s.tops.newIterator(t, nil, ro))
|
||||
}
|
||||
@ -12,7 +12,8 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
type byteReader interface {
|
||||
@ -35,28 +36,28 @@ const (
|
||||
|
||||
type cpRecord struct {
|
||||
level int
|
||||
ikey iKey
|
||||
ikey internalKey
|
||||
}
|
||||
|
||||
type atRecord struct {
|
||||
level int
|
||||
num uint64
|
||||
size uint64
|
||||
imin iKey
|
||||
imax iKey
|
||||
num int64
|
||||
size int64
|
||||
imin internalKey
|
||||
imax internalKey
|
||||
}
|
||||
|
||||
type dtRecord struct {
|
||||
level int
|
||||
num uint64
|
||||
num int64
|
||||
}
|
||||
|
||||
type sessionRecord struct {
|
||||
hasRec int
|
||||
comparer string
|
||||
journalNum uint64
|
||||
prevJournalNum uint64
|
||||
nextFileNum uint64
|
||||
journalNum int64
|
||||
prevJournalNum int64
|
||||
nextFileNum int64
|
||||
seqNum uint64
|
||||
compPtrs []cpRecord
|
||||
addedTables []atRecord
|
||||
@ -75,17 +76,17 @@ func (p *sessionRecord) setComparer(name string) {
|
||||
p.comparer = name
|
||||
}
|
||||
|
||||
func (p *sessionRecord) setJournalNum(num uint64) {
|
||||
func (p *sessionRecord) setJournalNum(num int64) {
|
||||
p.hasRec |= 1 << recJournalNum
|
||||
p.journalNum = num
|
||||
}
|
||||
|
||||
func (p *sessionRecord) setPrevJournalNum(num uint64) {
|
||||
func (p *sessionRecord) setPrevJournalNum(num int64) {
|
||||
p.hasRec |= 1 << recPrevJournalNum
|
||||
p.prevJournalNum = num
|
||||
}
|
||||
|
||||
func (p *sessionRecord) setNextFileNum(num uint64) {
|
||||
func (p *sessionRecord) setNextFileNum(num int64) {
|
||||
p.hasRec |= 1 << recNextFileNum
|
||||
p.nextFileNum = num
|
||||
}
|
||||
@ -95,7 +96,7 @@ func (p *sessionRecord) setSeqNum(num uint64) {
|
||||
p.seqNum = num
|
||||
}
|
||||
|
||||
func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
|
||||
func (p *sessionRecord) addCompPtr(level int, ikey internalKey) {
|
||||
p.hasRec |= 1 << recCompPtr
|
||||
p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
|
||||
}
|
||||
@ -105,13 +106,13 @@ func (p *sessionRecord) resetCompPtrs() {
|
||||
p.compPtrs = p.compPtrs[:0]
|
||||
}
|
||||
|
||||
func (p *sessionRecord) addTable(level int, num, size uint64, imin, imax iKey) {
|
||||
func (p *sessionRecord) addTable(level int, num, size int64, imin, imax internalKey) {
|
||||
p.hasRec |= 1 << recAddTable
|
||||
p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
|
||||
}
|
||||
|
||||
func (p *sessionRecord) addTableFile(level int, t *tFile) {
|
||||
p.addTable(level, t.file.Num(), t.size, t.imin, t.imax)
|
||||
p.addTable(level, t.fd.Num, t.size, t.imin, t.imax)
|
||||
}
|
||||
|
||||
func (p *sessionRecord) resetAddedTables() {
|
||||
@ -119,7 +120,7 @@ func (p *sessionRecord) resetAddedTables() {
|
||||
p.addedTables = p.addedTables[:0]
|
||||
}
|
||||
|
||||
func (p *sessionRecord) delTable(level int, num uint64) {
|
||||
func (p *sessionRecord) delTable(level int, num int64) {
|
||||
p.hasRec |= 1 << recDelTable
|
||||
p.deletedTables = append(p.deletedTables, dtRecord{level, num})
|
||||
}
|
||||
@ -137,6 +138,13 @@ func (p *sessionRecord) putUvarint(w io.Writer, x uint64) {
|
||||
_, p.err = w.Write(p.scratch[:n])
|
||||
}
|
||||
|
||||
func (p *sessionRecord) putVarint(w io.Writer, x int64) {
|
||||
if x < 0 {
|
||||
panic("invalid negative value")
|
||||
}
|
||||
p.putUvarint(w, uint64(x))
|
||||
}
|
||||
|
||||
func (p *sessionRecord) putBytes(w io.Writer, x []byte) {
|
||||
if p.err != nil {
|
||||
return
|
||||
@ -156,11 +164,11 @@ func (p *sessionRecord) encode(w io.Writer) error {
|
||||
}
|
||||
if p.has(recJournalNum) {
|
||||
p.putUvarint(w, recJournalNum)
|
||||
p.putUvarint(w, p.journalNum)
|
||||
p.putVarint(w, p.journalNum)
|
||||
}
|
||||
if p.has(recNextFileNum) {
|
||||
p.putUvarint(w, recNextFileNum)
|
||||
p.putUvarint(w, p.nextFileNum)
|
||||
p.putVarint(w, p.nextFileNum)
|
||||
}
|
||||
if p.has(recSeqNum) {
|
||||
p.putUvarint(w, recSeqNum)
|
||||
@ -174,13 +182,13 @@ func (p *sessionRecord) encode(w io.Writer) error {
|
||||
for _, r := range p.deletedTables {
|
||||
p.putUvarint(w, recDelTable)
|
||||
p.putUvarint(w, uint64(r.level))
|
||||
p.putUvarint(w, r.num)
|
||||
p.putVarint(w, r.num)
|
||||
}
|
||||
for _, r := range p.addedTables {
|
||||
p.putUvarint(w, recAddTable)
|
||||
p.putUvarint(w, uint64(r.level))
|
||||
p.putUvarint(w, r.num)
|
||||
p.putUvarint(w, r.size)
|
||||
p.putVarint(w, r.num)
|
||||
p.putVarint(w, r.size)
|
||||
p.putBytes(w, r.imin)
|
||||
p.putBytes(w, r.imax)
|
||||
}
|
||||
@ -194,9 +202,9 @@ func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF
|
||||
x, err := binary.ReadUvarint(r)
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
|
||||
p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"})
|
||||
} else if strings.HasPrefix(err.Error(), "binary:") {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, err.Error()})
|
||||
p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, err.Error()})
|
||||
} else {
|
||||
p.err = err
|
||||
}
|
||||
@ -209,6 +217,14 @@ func (p *sessionRecord) readUvarint(field string, r io.ByteReader) uint64 {
|
||||
return p.readUvarintMayEOF(field, r, false)
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readVarint(field string, r io.ByteReader) int64 {
|
||||
x := int64(p.readUvarintMayEOF(field, r, false))
|
||||
if x < 0 {
|
||||
p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "invalid negative value"})
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
|
||||
if p.err != nil {
|
||||
return nil
|
||||
@ -221,14 +237,14 @@ func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
|
||||
_, p.err = io.ReadFull(r, x)
|
||||
if p.err != nil {
|
||||
if p.err == io.ErrUnexpectedEOF {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
|
||||
p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int {
|
||||
func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
@ -236,14 +252,10 @@ func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) i
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
if x >= uint64(numLevel) {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
|
||||
return 0
|
||||
}
|
||||
return int(x)
|
||||
}
|
||||
|
||||
func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
|
||||
func (p *sessionRecord) decode(r io.Reader) error {
|
||||
br, ok := r.(byteReader)
|
||||
if !ok {
|
||||
br = bufio.NewReader(r)
|
||||
@ -264,17 +276,17 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
|
||||
p.setComparer(string(x))
|
||||
}
|
||||
case recJournalNum:
|
||||
x := p.readUvarint("journal-num", br)
|
||||
x := p.readVarint("journal-num", br)
|
||||
if p.err == nil {
|
||||
p.setJournalNum(x)
|
||||
}
|
||||
case recPrevJournalNum:
|
||||
x := p.readUvarint("prev-journal-num", br)
|
||||
x := p.readVarint("prev-journal-num", br)
|
||||
if p.err == nil {
|
||||
p.setPrevJournalNum(x)
|
||||
}
|
||||
case recNextFileNum:
|
||||
x := p.readUvarint("next-file-num", br)
|
||||
x := p.readVarint("next-file-num", br)
|
||||
if p.err == nil {
|
||||
p.setNextFileNum(x)
|
||||
}
|
||||
@ -284,23 +296,23 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
|
||||
p.setSeqNum(x)
|
||||
}
|
||||
case recCompPtr:
|
||||
level := p.readLevel("comp-ptr.level", br, numLevel)
|
||||
level := p.readLevel("comp-ptr.level", br)
|
||||
ikey := p.readBytes("comp-ptr.ikey", br)
|
||||
if p.err == nil {
|
||||
p.addCompPtr(level, iKey(ikey))
|
||||
p.addCompPtr(level, internalKey(ikey))
|
||||
}
|
||||
case recAddTable:
|
||||
level := p.readLevel("add-table.level", br, numLevel)
|
||||
num := p.readUvarint("add-table.num", br)
|
||||
size := p.readUvarint("add-table.size", br)
|
||||
level := p.readLevel("add-table.level", br)
|
||||
num := p.readVarint("add-table.num", br)
|
||||
size := p.readVarint("add-table.size", br)
|
||||
imin := p.readBytes("add-table.imin", br)
|
||||
imax := p.readBytes("add-table.imax", br)
|
||||
if p.err == nil {
|
||||
p.addTable(level, num, size, imin, imax)
|
||||
}
|
||||
case recDelTable:
|
||||
level := p.readLevel("del-table.level", br, numLevel)
|
||||
num := p.readUvarint("del-table.num", br)
|
||||
level := p.readLevel("del-table.level", br)
|
||||
num := p.readVarint("del-table.num", br)
|
||||
if p.err == nil {
|
||||
p.delTable(level, num)
|
||||
}
|
||||
62
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/session_record_test.go
generated
vendored
Normal file
62
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/session_record_test.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
b := new(bytes.Buffer)
|
||||
err = v.encode(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
v2 := &sessionRecord{}
|
||||
err = v.decode(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b2 := new(bytes.Buffer)
|
||||
err = v2.encode(b2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return bytes.Equal(b.Bytes(), b2.Bytes()), nil
|
||||
}
|
||||
|
||||
func TestSessionRecord_EncodeDecode(t *testing.T) {
|
||||
big := int64(1) << 50
|
||||
v := &sessionRecord{}
|
||||
i := int64(0)
|
||||
test := func() {
|
||||
res, err := decodeEncode(v)
|
||||
if err != nil {
|
||||
t.Fatalf("error when testing encode/decode sessionRecord: %v", err)
|
||||
}
|
||||
if !res {
|
||||
t.Error("encode/decode test failed at iteration:", i)
|
||||
}
|
||||
}
|
||||
|
||||
for ; i < 4; i++ {
|
||||
test()
|
||||
v.addTable(3, big+300+i, big+400+i,
|
||||
makeInternalKey(nil, []byte("foo"), uint64(big+500+1), keyTypeVal),
|
||||
makeInternalKey(nil, []byte("zoo"), uint64(big+600+1), keyTypeDel))
|
||||
v.delTable(4, big+700+i)
|
||||
v.addCompPtr(int(i), makeInternalKey(nil, []byte("x"), uint64(big+900+1), keyTypeVal))
|
||||
}
|
||||
|
||||
v.setComparer("foo")
|
||||
v.setJournalNum(big + 100)
|
||||
v.setPrevJournalNum(big + 99)
|
||||
v.setNextFileNum(big + 200)
|
||||
v.setSeqNum(uint64(big + 1000))
|
||||
test()
|
||||
}
|
||||
@ -10,22 +10,22 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/journal"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
// Logging.
|
||||
|
||||
type dropper struct {
|
||||
s *session
|
||||
file storage.File
|
||||
s *session
|
||||
fd storage.FileDesc
|
||||
}
|
||||
|
||||
func (d dropper) Drop(err error) {
|
||||
if e, ok := err.(*journal.ErrCorrupted); ok {
|
||||
d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
|
||||
d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason)
|
||||
} else {
|
||||
d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)
|
||||
d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,25 +34,9 @@ func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf
|
||||
|
||||
// File utils.
|
||||
|
||||
func (s *session) getJournalFile(num uint64) storage.File {
|
||||
return s.stor.GetFile(num, storage.TypeJournal)
|
||||
}
|
||||
|
||||
func (s *session) getTableFile(num uint64) storage.File {
|
||||
return s.stor.GetFile(num, storage.TypeTable)
|
||||
}
|
||||
|
||||
func (s *session) getFiles(t storage.FileType) ([]storage.File, error) {
|
||||
return s.stor.GetFiles(t)
|
||||
}
|
||||
|
||||
func (s *session) newTemp() storage.File {
|
||||
num := atomic.AddUint64(&s.stTempFileNum, 1) - 1
|
||||
return s.stor.GetFile(num, storage.TypeTemp)
|
||||
}
|
||||
|
||||
func (s *session) tableFileFromRecord(r atRecord) *tFile {
|
||||
return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax)
|
||||
func (s *session) newTemp() storage.FileDesc {
|
||||
num := atomic.AddInt64(&s.stTempFileNum, 1) - 1
|
||||
return storage.FileDesc{storage.TypeTemp, num}
|
||||
}
|
||||
|
||||
// Session state.
|
||||
@ -80,47 +64,65 @@ func (s *session) setVersion(v *version) {
|
||||
}
|
||||
|
||||
// Get current unused file number.
|
||||
func (s *session) nextFileNum() uint64 {
|
||||
return atomic.LoadUint64(&s.stNextFileNum)
|
||||
func (s *session) nextFileNum() int64 {
|
||||
return atomic.LoadInt64(&s.stNextFileNum)
|
||||
}
|
||||
|
||||
// Set current unused file number to num.
|
||||
func (s *session) setNextFileNum(num uint64) {
|
||||
atomic.StoreUint64(&s.stNextFileNum, num)
|
||||
func (s *session) setNextFileNum(num int64) {
|
||||
atomic.StoreInt64(&s.stNextFileNum, num)
|
||||
}
|
||||
|
||||
// Mark file number as used.
|
||||
func (s *session) markFileNum(num uint64) {
|
||||
func (s *session) markFileNum(num int64) {
|
||||
nextFileNum := num + 1
|
||||
for {
|
||||
old, x := s.stNextFileNum, nextFileNum
|
||||
if old > x {
|
||||
x = old
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
|
||||
if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a file number.
|
||||
func (s *session) allocFileNum() uint64 {
|
||||
return atomic.AddUint64(&s.stNextFileNum, 1) - 1
|
||||
func (s *session) allocFileNum() int64 {
|
||||
return atomic.AddInt64(&s.stNextFileNum, 1) - 1
|
||||
}
|
||||
|
||||
// Reuse given file number.
|
||||
func (s *session) reuseFileNum(num uint64) {
|
||||
func (s *session) reuseFileNum(num int64) {
|
||||
for {
|
||||
old, x := s.stNextFileNum, num
|
||||
if old != x+1 {
|
||||
x = old
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
|
||||
if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set compaction ptr at given level; need external synchronization.
|
||||
func (s *session) setCompPtr(level int, ik internalKey) {
|
||||
if level >= len(s.stCompPtrs) {
|
||||
newCompPtrs := make([]internalKey, level+1)
|
||||
copy(newCompPtrs, s.stCompPtrs)
|
||||
s.stCompPtrs = newCompPtrs
|
||||
}
|
||||
s.stCompPtrs[level] = append(internalKey{}, ik...)
|
||||
}
|
||||
|
||||
// Get compaction ptr at given level; need external synchronization.
|
||||
func (s *session) getCompPtr(level int) internalKey {
|
||||
if level >= len(s.stCompPtrs) {
|
||||
return nil
|
||||
}
|
||||
return s.stCompPtrs[level]
|
||||
}
|
||||
|
||||
// Manifest related utils.
|
||||
|
||||
// Fill given session record obj with current states; need external
|
||||
@ -149,29 +151,28 @@ func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
|
||||
|
||||
// Mark if record has been committed, this will update session state;
|
||||
// need external synchronization.
|
||||
func (s *session) recordCommited(r *sessionRecord) {
|
||||
if r.has(recJournalNum) {
|
||||
s.stJournalNum = r.journalNum
|
||||
func (s *session) recordCommited(rec *sessionRecord) {
|
||||
if rec.has(recJournalNum) {
|
||||
s.stJournalNum = rec.journalNum
|
||||
}
|
||||
|
||||
if r.has(recPrevJournalNum) {
|
||||
s.stPrevJournalNum = r.prevJournalNum
|
||||
if rec.has(recPrevJournalNum) {
|
||||
s.stPrevJournalNum = rec.prevJournalNum
|
||||
}
|
||||
|
||||
if r.has(recSeqNum) {
|
||||
s.stSeqNum = r.seqNum
|
||||
if rec.has(recSeqNum) {
|
||||
s.stSeqNum = rec.seqNum
|
||||
}
|
||||
|
||||
for _, p := range r.compPtrs {
|
||||
s.stCompPtrs[p.level] = iKey(p.ikey)
|
||||
for _, r := range rec.compPtrs {
|
||||
s.setCompPtr(r.level, internalKey(r.ikey))
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new manifest file; need external synchronization.
|
||||
func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
|
||||
num := s.allocFileNum()
|
||||
file := s.stor.GetFile(num, storage.TypeManifest)
|
||||
writer, err := file.Create()
|
||||
fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()}
|
||||
writer, err := s.stor.Create(fd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -196,16 +197,16 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
|
||||
if s.manifestWriter != nil {
|
||||
s.manifestWriter.Close()
|
||||
}
|
||||
if s.manifestFile != nil {
|
||||
s.manifestFile.Remove()
|
||||
if !s.manifestFd.Nil() {
|
||||
s.stor.Remove(s.manifestFd)
|
||||
}
|
||||
s.manifestFile = file
|
||||
s.manifestFd = fd
|
||||
s.manifestWriter = writer
|
||||
s.manifest = jw
|
||||
} else {
|
||||
writer.Close()
|
||||
file.Remove()
|
||||
s.reuseFileNum(num)
|
||||
s.stor.Remove(fd)
|
||||
s.reuseFileNum(fd.Num)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -221,7 +222,7 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = s.stor.SetManifest(file)
|
||||
err = s.stor.SetMeta(fd)
|
||||
return
|
||||
}
|
||||
|
||||
583
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/file_storage.go
generated
vendored
Normal file
583
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/file_storage.go
generated
vendored
Normal file
@ -0,0 +1,583 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reservefs.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errFileOpen = errors.New("leveldb/storage: file still open")
|
||||
errReadOnly = errors.New("leveldb/storage: storage is read-only")
|
||||
)
|
||||
|
||||
type fileLock interface {
|
||||
release() error
|
||||
}
|
||||
|
||||
type fileStorageLock struct {
|
||||
fs *fileStorage
|
||||
}
|
||||
|
||||
func (lock *fileStorageLock) Release() {
|
||||
if lock.fs != nil {
|
||||
lock.fs.mu.Lock()
|
||||
defer lock.fs.mu.Unlock()
|
||||
if lock.fs.slock == lock {
|
||||
lock.fs.slock = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const logSizeThreshold = 1024 * 1024 // 1 MiB
|
||||
|
||||
// fileStorage is a file-system backed storage.
|
||||
type fileStorage struct {
|
||||
path string
|
||||
readOnly bool
|
||||
|
||||
mu sync.Mutex
|
||||
flock fileLock
|
||||
slock *fileStorageLock
|
||||
logw *os.File
|
||||
logSize int64
|
||||
buf []byte
|
||||
// Opened file counter; if open < 0 means closed.
|
||||
open int
|
||||
day int
|
||||
}
|
||||
|
||||
// OpenFile returns a new filesytem-backed storage implementation with the given
|
||||
// path. This also acquire a file lock, so any subsequent attempt to open the
|
||||
// same path will fail.
|
||||
//
|
||||
// The storage must be closed after use, by calling Close method.
|
||||
func OpenFile(path string, readOnly bool) (Storage, error) {
|
||||
if fi, err := os.Stat(path); err == nil {
|
||||
if !fi.IsDir() {
|
||||
return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path)
|
||||
}
|
||||
} else if os.IsNotExist(err) && !readOnly {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
flock.release()
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
logw *os.File
|
||||
logSize int64
|
||||
)
|
||||
if !readOnly {
|
||||
logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logSize, err = logw.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
logw.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
fs := &fileStorage{
|
||||
path: path,
|
||||
readOnly: readOnly,
|
||||
flock: flock,
|
||||
logw: logw,
|
||||
logSize: logSize,
|
||||
}
|
||||
runtime.SetFinalizer(fs, (*fileStorage).Close)
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Lock() (Lock, error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
if fs.readOnly {
|
||||
return &fileStorageLock{}, nil
|
||||
}
|
||||
if fs.slock != nil {
|
||||
return nil, ErrLocked
|
||||
}
|
||||
fs.slock = &fileStorageLock{fs: fs}
|
||||
return fs.slock, nil
|
||||
}
|
||||
|
||||
func itoa(buf []byte, i int, wid int) []byte {
|
||||
u := uint(i)
|
||||
if u == 0 && wid <= 1 {
|
||||
return append(buf, '0')
|
||||
}
|
||||
|
||||
// Assemble decimal in reverse order.
|
||||
var b [32]byte
|
||||
bp := len(b)
|
||||
for ; u > 0 || wid > 0; u /= 10 {
|
||||
bp--
|
||||
wid--
|
||||
b[bp] = byte(u%10) + '0'
|
||||
}
|
||||
return append(buf, b[bp:]...)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) printDay(t time.Time) {
|
||||
if fs.day == t.Day() {
|
||||
return
|
||||
}
|
||||
fs.day = t.Day()
|
||||
fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
|
||||
}
|
||||
|
||||
func (fs *fileStorage) doLog(t time.Time, str string) {
|
||||
if fs.logSize > logSizeThreshold {
|
||||
// Rotate log file.
|
||||
fs.logw.Close()
|
||||
fs.logw = nil
|
||||
fs.logSize = 0
|
||||
rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old"))
|
||||
}
|
||||
if fs.logw == nil {
|
||||
var err error
|
||||
fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Force printDay on new log file.
|
||||
fs.day = 0
|
||||
}
|
||||
fs.printDay(t)
|
||||
hour, min, sec := t.Clock()
|
||||
msec := t.Nanosecond() / 1e3
|
||||
// time
|
||||
fs.buf = itoa(fs.buf[:0], hour, 2)
|
||||
fs.buf = append(fs.buf, ':')
|
||||
fs.buf = itoa(fs.buf, min, 2)
|
||||
fs.buf = append(fs.buf, ':')
|
||||
fs.buf = itoa(fs.buf, sec, 2)
|
||||
fs.buf = append(fs.buf, '.')
|
||||
fs.buf = itoa(fs.buf, msec, 6)
|
||||
fs.buf = append(fs.buf, ' ')
|
||||
// write
|
||||
fs.buf = append(fs.buf, []byte(str)...)
|
||||
fs.buf = append(fs.buf, '\n')
|
||||
fs.logw.Write(fs.buf)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Log(str string) {
|
||||
if !fs.readOnly {
|
||||
t := time.Now()
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return
|
||||
}
|
||||
fs.doLog(t, str)
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *fileStorage) log(str string) {
|
||||
if !fs.readOnly {
|
||||
fs.doLog(time.Now(), str)
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *fileStorage) SetMeta(fd FileDesc) (err error) {
|
||||
if !FileDescOk(fd) {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
if fs.readOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.log(fmt.Sprintf("CURRENT: %v", err))
|
||||
}
|
||||
}()
|
||||
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
|
||||
w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = fmt.Fprintln(w, fsGenName(fd))
|
||||
// Close the file first.
|
||||
if cerr := w.Close(); cerr != nil {
|
||||
fs.log(fmt.Sprintf("close CURRENT.%d: %v", fd.Num, cerr))
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return rename(path, filepath.Join(fs.path, "CURRENT"))
|
||||
}
|
||||
|
||||
func (fs *fileStorage) GetMeta() (fd FileDesc, err error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return FileDesc{}, ErrClosed
|
||||
}
|
||||
dir, err := os.Open(fs.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
names, err := dir.Readdirnames(0)
|
||||
// Close the dir first before checking for Readdirnames error.
|
||||
if ce := dir.Close(); ce != nil {
|
||||
fs.log(fmt.Sprintf("close dir: %v", ce))
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Find latest CURRENT file.
|
||||
var rem []string
|
||||
var pend bool
|
||||
var cerr error
|
||||
for _, name := range names {
|
||||
if strings.HasPrefix(name, "CURRENT") {
|
||||
pend1 := len(name) > 7
|
||||
var pendNum int64
|
||||
// Make sure it is valid name for a CURRENT file, otherwise skip it.
|
||||
if pend1 {
|
||||
if name[7] != '.' || len(name) < 9 {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file name", name))
|
||||
continue
|
||||
}
|
||||
var e1 error
|
||||
if pendNum, e1 = strconv.ParseInt(name[8:], 10, 0); e1 != nil {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", name, e1))
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(fs.path, name)
|
||||
r, e1 := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if e1 != nil {
|
||||
return FileDesc{}, e1
|
||||
}
|
||||
b, e1 := ioutil.ReadAll(r)
|
||||
if e1 != nil {
|
||||
r.Close()
|
||||
return FileDesc{}, e1
|
||||
}
|
||||
var fd1 FileDesc
|
||||
if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd1) {
|
||||
fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", name))
|
||||
if pend1 {
|
||||
rem = append(rem, name)
|
||||
}
|
||||
if !pend1 || cerr == nil {
|
||||
metaFd, _ := fsParseName(name)
|
||||
cerr = &ErrCorrupted{
|
||||
Fd: metaFd,
|
||||
Err: errors.New("leveldb/storage: corrupted or incomplete meta file"),
|
||||
}
|
||||
}
|
||||
} else if pend1 && pendNum != fd1.Num {
|
||||
fs.log(fmt.Sprintf("skipping %s: inconsistent pending-file num: %d vs %d", name, pendNum, fd1.Num))
|
||||
rem = append(rem, name)
|
||||
} else if fd1.Num < fd.Num {
|
||||
fs.log(fmt.Sprintf("skipping %s: obsolete", name))
|
||||
if pend1 {
|
||||
rem = append(rem, name)
|
||||
}
|
||||
} else {
|
||||
fd = fd1
|
||||
pend = pend1
|
||||
}
|
||||
if err := r.Close(); err != nil {
|
||||
fs.log(fmt.Sprintf("close %s: %v", name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Don't remove any files if there is no valid CURRENT file.
|
||||
if fd.Nil() {
|
||||
if cerr != nil {
|
||||
err = cerr
|
||||
} else {
|
||||
err = os.ErrNotExist
|
||||
}
|
||||
return
|
||||
}
|
||||
if !fs.readOnly {
|
||||
// Rename pending CURRENT file to an effective CURRENT.
|
||||
if pend {
|
||||
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
|
||||
if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
|
||||
fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", fd.Num, err))
|
||||
}
|
||||
}
|
||||
// Remove obsolete or incomplete pending CURRENT files.
|
||||
for _, name := range rem {
|
||||
path := filepath.Join(fs.path, name)
|
||||
if err := os.Remove(path); err != nil {
|
||||
fs.log(fmt.Sprintf("remove %s: %v", name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
dir, err := os.Open(fs.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
names, err := dir.Readdirnames(0)
|
||||
// Close the dir first before checking for Readdirnames error.
|
||||
if cerr := dir.Close(); cerr != nil {
|
||||
fs.log(fmt.Sprintf("close dir: %v", cerr))
|
||||
}
|
||||
if err == nil {
|
||||
for _, name := range names {
|
||||
if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 {
|
||||
fds = append(fds, fd)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Open(fd FileDesc) (Reader, error) {
|
||||
if !FileDescOk(fd) {
|
||||
return nil, ErrInvalidFile
|
||||
}
|
||||
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
if fsHasOldName(fd) && os.IsNotExist(err) {
|
||||
of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0)
|
||||
if err == nil {
|
||||
goto ok
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
ok:
|
||||
fs.open++
|
||||
return &fileWrap{File: of, fs: fs, fd: fd}, nil
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Create(fd FileDesc) (Writer, error) {
|
||||
if !FileDescOk(fd) {
|
||||
return nil, ErrInvalidFile
|
||||
}
|
||||
if fs.readOnly {
|
||||
return nil, errReadOnly
|
||||
}
|
||||
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.open++
|
||||
return &fileWrap{File: of, fs: fs, fd: fd}, nil
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Remove(fd FileDesc) error {
|
||||
if !FileDescOk(fd) {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
if fs.readOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
err := os.Remove(filepath.Join(fs.path, fsGenName(fd)))
|
||||
if err != nil {
|
||||
if fsHasOldName(fd) && os.IsNotExist(err) {
|
||||
if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) {
|
||||
fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err))
|
||||
err = e1
|
||||
}
|
||||
} else {
|
||||
fs.log(fmt.Sprintf("remove %s: %v", fd, err))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error {
|
||||
if !FileDescOk(oldfd) || !FileDescOk(newfd) {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
if oldfd == newfd {
|
||||
return nil
|
||||
}
|
||||
if fs.readOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd)))
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Close() error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
// Clear the finalizer.
|
||||
runtime.SetFinalizer(fs, nil)
|
||||
|
||||
if fs.open > 0 {
|
||||
fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
|
||||
}
|
||||
fs.open = -1
|
||||
if fs.logw != nil {
|
||||
fs.logw.Close()
|
||||
}
|
||||
return fs.flock.release()
|
||||
}
|
||||
|
||||
type fileWrap struct {
|
||||
*os.File
|
||||
fs *fileStorage
|
||||
fd FileDesc
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (fw *fileWrap) Sync() error {
|
||||
if err := fw.File.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if fw.fd.Type == TypeManifest {
|
||||
// Also sync parent directory if file type is manifest.
|
||||
// See: https://code.google.com/p/leveldb/issues/detail?id=190.
|
||||
if err := syncDir(fw.fs.path); err != nil {
|
||||
fw.fs.log(fmt.Sprintf("syncDir: %v", err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fw *fileWrap) Close() error {
|
||||
fw.fs.mu.Lock()
|
||||
defer fw.fs.mu.Unlock()
|
||||
if fw.closed {
|
||||
return ErrClosed
|
||||
}
|
||||
fw.closed = true
|
||||
fw.fs.open--
|
||||
err := fw.File.Close()
|
||||
if err != nil {
|
||||
fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func fsGenName(fd FileDesc) string {
|
||||
switch fd.Type {
|
||||
case TypeManifest:
|
||||
return fmt.Sprintf("MANIFEST-%06d", fd.Num)
|
||||
case TypeJournal:
|
||||
return fmt.Sprintf("%06d.log", fd.Num)
|
||||
case TypeTable:
|
||||
return fmt.Sprintf("%06d.ldb", fd.Num)
|
||||
case TypeTemp:
|
||||
return fmt.Sprintf("%06d.tmp", fd.Num)
|
||||
default:
|
||||
panic("invalid file type")
|
||||
}
|
||||
}
|
||||
|
||||
func fsHasOldName(fd FileDesc) bool {
|
||||
return fd.Type == TypeTable
|
||||
}
|
||||
|
||||
func fsGenOldName(fd FileDesc) string {
|
||||
switch fd.Type {
|
||||
case TypeTable:
|
||||
return fmt.Sprintf("%06d.sst", fd.Num)
|
||||
}
|
||||
return fsGenName(fd)
|
||||
}
|
||||
|
||||
func fsParseName(name string) (fd FileDesc, ok bool) {
|
||||
var tail string
|
||||
_, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail)
|
||||
if err == nil {
|
||||
switch tail {
|
||||
case "log":
|
||||
fd.Type = TypeJournal
|
||||
case "ldb", "sst":
|
||||
fd.Type = TypeTable
|
||||
case "tmp":
|
||||
fd.Type = TypeTemp
|
||||
default:
|
||||
return
|
||||
}
|
||||
return fd, true
|
||||
}
|
||||
n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail)
|
||||
if n == 1 {
|
||||
fd.Type = TypeManifest
|
||||
return fd, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fsParseNamePtr(name string, fd *FileDesc) bool {
|
||||
_fd, ok := fsParseName(name)
|
||||
if fd != nil {
|
||||
*fd = _fd
|
||||
}
|
||||
return ok
|
||||
}
|
||||
34
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/file_storage_nacl.go
generated
vendored
Normal file
34
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/file_storage_nacl.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build nacl
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
|
||||
return nil, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func setFileLock(f *os.File, readOnly, lock bool) error {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func rename(oldpath, newpath string) error {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func isErrInvalid(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func syncDir(name string) error {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
@ -19,8 +19,21 @@ func (fl *plan9FileLock) release() error {
|
||||
return fl.f.Close()
|
||||
}
|
||||
|
||||
func newFileLock(path string) (fl fileLock, err error) {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
|
||||
func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
|
||||
var (
|
||||
flag int
|
||||
perm os.FileMode
|
||||
)
|
||||
if readOnly {
|
||||
flag = os.O_RDONLY
|
||||
} else {
|
||||
flag = os.O_RDWR
|
||||
perm = os.ModeExclusive
|
||||
}
|
||||
f, err := os.OpenFile(path, flag, perm)
|
||||
if os.IsNotExist(err) {
|
||||
f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -18,18 +18,27 @@ type unixFileLock struct {
|
||||
}
|
||||
|
||||
func (fl *unixFileLock) release() error {
|
||||
if err := setFileLock(fl.f, false); err != nil {
|
||||
if err := setFileLock(fl.f, false, false); err != nil {
|
||||
return err
|
||||
}
|
||||
return fl.f.Close()
|
||||
}
|
||||
|
||||
func newFileLock(path string) (fl fileLock, err error) {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
|
||||
func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
|
||||
var flag int
|
||||
if readOnly {
|
||||
flag = os.O_RDONLY
|
||||
} else {
|
||||
flag = os.O_RDWR
|
||||
}
|
||||
f, err := os.OpenFile(path, flag, 0)
|
||||
if os.IsNotExist(err) {
|
||||
f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = setFileLock(f, true)
|
||||
err = setFileLock(f, readOnly, true)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return
|
||||
@ -38,7 +47,7 @@ func newFileLock(path string) (fl fileLock, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func setFileLock(f *os.File, lock bool) error {
|
||||
func setFileLock(f *os.File, readOnly, lock bool) error {
|
||||
flock := syscall.Flock_t{
|
||||
Type: syscall.F_UNLCK,
|
||||
Start: 0,
|
||||
@ -46,7 +55,11 @@ func setFileLock(f *os.File, lock bool) error {
|
||||
Whence: 1,
|
||||
}
|
||||
if lock {
|
||||
flock.Type = syscall.F_WRLCK
|
||||
if readOnly {
|
||||
flock.Type = syscall.F_RDLCK
|
||||
} else {
|
||||
flock.Type = syscall.F_WRLCK
|
||||
}
|
||||
}
|
||||
return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
|
||||
}
|
||||
176
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/file_storage_test.go
generated
vendored
Normal file
176
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/file_storage_test.go
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var cases = []struct {
|
||||
oldName []string
|
||||
name string
|
||||
ftype FileType
|
||||
num int64
|
||||
}{
|
||||
{nil, "000100.log", TypeJournal, 100},
|
||||
{nil, "000000.log", TypeJournal, 0},
|
||||
{[]string{"000000.sst"}, "000000.ldb", TypeTable, 0},
|
||||
{nil, "MANIFEST-000002", TypeManifest, 2},
|
||||
{nil, "MANIFEST-000007", TypeManifest, 7},
|
||||
{nil, "9223372036854775807.log", TypeJournal, 9223372036854775807},
|
||||
{nil, "000100.tmp", TypeTemp, 100},
|
||||
}
|
||||
|
||||
var invalidCases = []string{
|
||||
"",
|
||||
"foo",
|
||||
"foo-dx-100.log",
|
||||
".log",
|
||||
"",
|
||||
"manifest",
|
||||
"CURREN",
|
||||
"CURRENTX",
|
||||
"MANIFES",
|
||||
"MANIFEST",
|
||||
"MANIFEST-",
|
||||
"XMANIFEST-3",
|
||||
"MANIFEST-3x",
|
||||
"LOC",
|
||||
"LOCKx",
|
||||
"LO",
|
||||
"LOGx",
|
||||
"18446744073709551616.log",
|
||||
"184467440737095516150.log",
|
||||
"100",
|
||||
"100.",
|
||||
"100.lop",
|
||||
}
|
||||
|
||||
func TestFileStorage_CreateFileName(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
if name := fsGenName(FileDesc{c.ftype, c.num}); name != c.name {
|
||||
t.Errorf("invalid filename got '%s', want '%s'", name, c.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStorage_ParseFileName(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
for _, name := range append([]string{c.name}, c.oldName...) {
|
||||
fd, ok := fsParseName(name)
|
||||
if !ok {
|
||||
t.Errorf("cannot parse filename '%s'", name)
|
||||
continue
|
||||
}
|
||||
if fd.Type != c.ftype {
|
||||
t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, fd.Type, c.ftype)
|
||||
}
|
||||
if fd.Num != c.num {
|
||||
t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, fd.Num, c.num)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStorage_InvalidFileName(t *testing.T) {
|
||||
for _, name := range invalidCases {
|
||||
if fsParseNamePtr(name, nil) {
|
||||
t.Errorf("filename '%s' should be invalid", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStorage_Locking(t *testing.T) {
|
||||
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrwlock-%d", os.Getuid()))
|
||||
if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal("RemoveAll: got error: ", err)
|
||||
}
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
p1, err := OpenFile(path, false)
|
||||
if err != nil {
|
||||
t.Fatal("OpenFile(1): got error: ", err)
|
||||
}
|
||||
|
||||
p2, err := OpenFile(path, false)
|
||||
if err != nil {
|
||||
t.Logf("OpenFile(2): got error: %s (expected)", err)
|
||||
} else {
|
||||
p2.Close()
|
||||
p1.Close()
|
||||
t.Fatal("OpenFile(2): expect error")
|
||||
}
|
||||
|
||||
p1.Close()
|
||||
|
||||
p3, err := OpenFile(path, false)
|
||||
if err != nil {
|
||||
t.Fatal("OpenFile(3): got error: ", err)
|
||||
}
|
||||
defer p3.Close()
|
||||
|
||||
l, err := p3.Lock()
|
||||
if err != nil {
|
||||
t.Fatal("storage lock failed(1): ", err)
|
||||
}
|
||||
_, err = p3.Lock()
|
||||
if err == nil {
|
||||
t.Fatal("expect error for second storage lock attempt")
|
||||
} else {
|
||||
t.Logf("storage lock got error: %s (expected)", err)
|
||||
}
|
||||
l.Release()
|
||||
_, err = p3.Lock()
|
||||
if err != nil {
|
||||
t.Fatal("storage lock failed(2): ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStorage_ReadOnlyLocking(t *testing.T) {
|
||||
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrolock-%d", os.Getuid()))
|
||||
if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal("RemoveAll: got error: ", err)
|
||||
}
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
p1, err := OpenFile(path, false)
|
||||
if err != nil {
|
||||
t.Fatal("OpenFile(1): got error: ", err)
|
||||
}
|
||||
|
||||
_, err = OpenFile(path, true)
|
||||
if err != nil {
|
||||
t.Logf("OpenFile(2): got error: %s (expected)", err)
|
||||
} else {
|
||||
t.Fatal("OpenFile(2): expect error")
|
||||
}
|
||||
|
||||
p1.Close()
|
||||
|
||||
p3, err := OpenFile(path, true)
|
||||
if err != nil {
|
||||
t.Fatal("OpenFile(3): got error: ", err)
|
||||
}
|
||||
|
||||
p4, err := OpenFile(path, true)
|
||||
if err != nil {
|
||||
t.Fatal("OpenFile(4): got error: ", err)
|
||||
}
|
||||
|
||||
_, err = OpenFile(path, false)
|
||||
if err != nil {
|
||||
t.Logf("OpenFile(5): got error: %s (expected)", err)
|
||||
} else {
|
||||
t.Fatal("OpenFile(2): expect error")
|
||||
}
|
||||
|
||||
p3.Close()
|
||||
p4.Close()
|
||||
}
|
||||
@ -18,18 +18,27 @@ type unixFileLock struct {
|
||||
}
|
||||
|
||||
func (fl *unixFileLock) release() error {
|
||||
if err := setFileLock(fl.f, false); err != nil {
|
||||
if err := setFileLock(fl.f, false, false); err != nil {
|
||||
return err
|
||||
}
|
||||
return fl.f.Close()
|
||||
}
|
||||
|
||||
func newFileLock(path string) (fl fileLock, err error) {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
|
||||
func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
|
||||
var flag int
|
||||
if readOnly {
|
||||
flag = os.O_RDONLY
|
||||
} else {
|
||||
flag = os.O_RDWR
|
||||
}
|
||||
f, err := os.OpenFile(path, flag, 0)
|
||||
if os.IsNotExist(err) {
|
||||
f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = setFileLock(f, true)
|
||||
err = setFileLock(f, readOnly, true)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return
|
||||
@ -38,10 +47,14 @@ func newFileLock(path string) (fl fileLock, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func setFileLock(f *os.File, lock bool) error {
|
||||
func setFileLock(f *os.File, readOnly, lock bool) error {
|
||||
how := syscall.LOCK_UN
|
||||
if lock {
|
||||
how = syscall.LOCK_EX
|
||||
if readOnly {
|
||||
how = syscall.LOCK_SH
|
||||
} else {
|
||||
how = syscall.LOCK_EX
|
||||
}
|
||||
}
|
||||
return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB)
|
||||
}
|
||||
@ -29,12 +29,22 @@ func (fl *windowsFileLock) release() error {
|
||||
return syscall.Close(fl.fd)
|
||||
}
|
||||
|
||||
func newFileLock(path string) (fl fileLock, err error) {
|
||||
func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
var access, shareMode uint32
|
||||
if readOnly {
|
||||
access = syscall.GENERIC_READ
|
||||
shareMode = syscall.FILE_SHARE_READ
|
||||
} else {
|
||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
||||
}
|
||||
fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
if err == syscall.ERROR_FILE_NOT_FOUND {
|
||||
fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -47,9 +57,8 @@ func moveFileEx(from *uint16, to *uint16, flags uint32) error {
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
return error(e1)
|
||||
} else {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
218
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/mem_storage.go
generated
vendored
Normal file
218
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/mem_storage.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const typeShift = 3
|
||||
|
||||
type memStorageLock struct {
|
||||
ms *memStorage
|
||||
}
|
||||
|
||||
func (lock *memStorageLock) Release() {
|
||||
ms := lock.ms
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.slock == lock {
|
||||
ms.slock = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// memStorage is a memory-backed storage.
|
||||
type memStorage struct {
|
||||
mu sync.Mutex
|
||||
slock *memStorageLock
|
||||
files map[uint64]*memFile
|
||||
meta FileDesc
|
||||
}
|
||||
|
||||
// NewMemStorage returns a new memory-backed storage implementation.
|
||||
func NewMemStorage() Storage {
|
||||
return &memStorage{
|
||||
files: make(map[uint64]*memFile),
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *memStorage) Lock() (Lock, error) {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.slock != nil {
|
||||
return nil, ErrLocked
|
||||
}
|
||||
ms.slock = &memStorageLock{ms: ms}
|
||||
return ms.slock, nil
|
||||
}
|
||||
|
||||
func (*memStorage) Log(str string) {}
|
||||
|
||||
func (ms *memStorage) SetMeta(fd FileDesc) error {
|
||||
if !FileDescOk(fd) {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
|
||||
ms.mu.Lock()
|
||||
ms.meta = fd
|
||||
ms.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *memStorage) GetMeta() (FileDesc, error) {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.meta.Nil() {
|
||||
return FileDesc{}, os.ErrNotExist
|
||||
}
|
||||
return ms.meta, nil
|
||||
}
|
||||
|
||||
func (ms *memStorage) List(ft FileType) ([]FileDesc, error) {
|
||||
ms.mu.Lock()
|
||||
var fds []FileDesc
|
||||
for x, _ := range ms.files {
|
||||
fd := unpackFile(x)
|
||||
if fd.Type&ft != 0 {
|
||||
fds = append(fds, fd)
|
||||
}
|
||||
}
|
||||
ms.mu.Unlock()
|
||||
return fds, nil
|
||||
}
|
||||
|
||||
func (ms *memStorage) Open(fd FileDesc) (Reader, error) {
|
||||
if !FileDescOk(fd) {
|
||||
return nil, ErrInvalidFile
|
||||
}
|
||||
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if m, exist := ms.files[packFile(fd)]; exist {
|
||||
if m.open {
|
||||
return nil, errFileOpen
|
||||
}
|
||||
m.open = true
|
||||
return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
func (ms *memStorage) Create(fd FileDesc) (Writer, error) {
|
||||
if !FileDescOk(fd) {
|
||||
return nil, ErrInvalidFile
|
||||
}
|
||||
|
||||
x := packFile(fd)
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
m, exist := ms.files[x]
|
||||
if exist {
|
||||
if m.open {
|
||||
return nil, errFileOpen
|
||||
}
|
||||
m.Reset()
|
||||
} else {
|
||||
m = &memFile{}
|
||||
ms.files[x] = m
|
||||
}
|
||||
m.open = true
|
||||
return &memWriter{memFile: m, ms: ms}, nil
|
||||
}
|
||||
|
||||
func (ms *memStorage) Remove(fd FileDesc) error {
|
||||
if !FileDescOk(fd) {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
|
||||
x := packFile(fd)
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if _, exist := ms.files[x]; exist {
|
||||
delete(ms.files, x)
|
||||
return nil
|
||||
}
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
func (ms *memStorage) Rename(oldfd, newfd FileDesc) error {
|
||||
if FileDescOk(oldfd) || FileDescOk(newfd) {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
if oldfd == newfd {
|
||||
return nil
|
||||
}
|
||||
|
||||
oldx := packFile(oldfd)
|
||||
newx := packFile(newfd)
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
oldm, exist := ms.files[oldx]
|
||||
if !exist {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
newm, exist := ms.files[newx]
|
||||
if (exist && newm.open) || oldm.open {
|
||||
return errFileOpen
|
||||
}
|
||||
delete(ms.files, oldx)
|
||||
ms.files[newx] = oldm
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*memStorage) Close() error { return nil }
|
||||
|
||||
type memFile struct {
|
||||
bytes.Buffer
|
||||
open bool
|
||||
}
|
||||
|
||||
type memReader struct {
|
||||
*bytes.Reader
|
||||
ms *memStorage
|
||||
m *memFile
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (mr *memReader) Close() error {
|
||||
mr.ms.mu.Lock()
|
||||
defer mr.ms.mu.Unlock()
|
||||
if mr.closed {
|
||||
return ErrClosed
|
||||
}
|
||||
mr.m.open = false
|
||||
return nil
|
||||
}
|
||||
|
||||
type memWriter struct {
|
||||
*memFile
|
||||
ms *memStorage
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (*memWriter) Sync() error { return nil }
|
||||
|
||||
func (mw *memWriter) Close() error {
|
||||
mw.ms.mu.Lock()
|
||||
defer mw.ms.mu.Unlock()
|
||||
if mw.closed {
|
||||
return ErrClosed
|
||||
}
|
||||
mw.memFile.open = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func packFile(fd FileDesc) uint64 {
|
||||
return uint64(fd.Num)<<typeShift | uint64(fd.Type)
|
||||
}
|
||||
|
||||
func unpackFile(x uint64) FileDesc {
|
||||
return FileDesc{FileType(x) & TypeAll, int64(x >> typeShift)}
|
||||
}
|
||||
65
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/mem_storage_test.go
generated
vendored
Normal file
65
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/storage/mem_storage_test.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMemStorage(t *testing.T) {
|
||||
m := NewMemStorage()
|
||||
|
||||
l, err := m.Lock()
|
||||
if err != nil {
|
||||
t.Fatal("storage lock failed(1): ", err)
|
||||
}
|
||||
_, err = m.Lock()
|
||||
if err == nil {
|
||||
t.Fatal("expect error for second storage lock attempt")
|
||||
} else {
|
||||
t.Logf("storage lock got error: %s (expected)", err)
|
||||
}
|
||||
l.Release()
|
||||
_, err = m.Lock()
|
||||
if err != nil {
|
||||
t.Fatal("storage lock failed(2): ", err)
|
||||
}
|
||||
|
||||
w, err := m.Create(FileDesc{TypeTable, 1})
|
||||
if err != nil {
|
||||
t.Fatal("Storage.Create: ", err)
|
||||
}
|
||||
w.Write([]byte("abc"))
|
||||
w.Close()
|
||||
if fds, _ := m.List(TypeAll); len(fds) != 1 {
|
||||
t.Fatal("invalid GetFiles len")
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
r, err := m.Open(FileDesc{TypeTable, 1})
|
||||
if err != nil {
|
||||
t.Fatal("Open: got error: ", err)
|
||||
}
|
||||
buf.ReadFrom(r)
|
||||
r.Close()
|
||||
if got := buf.String(); got != "abc" {
|
||||
t.Fatalf("Read: invalid value, want=abc got=%s", got)
|
||||
}
|
||||
if _, err := m.Open(FileDesc{TypeTable, 1}); err != nil {
|
||||
t.Fatal("Open: got error: ", err)
|
||||
}
|
||||
if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil {
|
||||
t.Fatal("expecting error")
|
||||
}
|
||||
m.Remove(FileDesc{TypeTable, 1})
|
||||
if fds, _ := m.List(TypeAll); len(fds) != 0 {
|
||||
t.Fatal("invalid GetFiles len", len(fds))
|
||||
}
|
||||
if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil {
|
||||
t.Fatal("expecting error")
|
||||
}
|
||||
}
|
||||
@ -12,10 +12,10 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type FileType uint32
|
||||
type FileType int
|
||||
|
||||
const (
|
||||
TypeManifest FileType = 1 << iota
|
||||
@ -50,13 +50,13 @@ var (
|
||||
// a file. Package storage has its own type instead of using
|
||||
// errors.ErrCorrupted to prevent circular import.
|
||||
type ErrCorrupted struct {
|
||||
File *FileInfo
|
||||
Err error
|
||||
Fd FileDesc
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ErrCorrupted) Error() string {
|
||||
if e.File != nil {
|
||||
return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
|
||||
if !e.Fd.Nil() {
|
||||
return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
|
||||
} else {
|
||||
return e.Err.Error()
|
||||
}
|
||||
@ -83,31 +83,47 @@ type Writer interface {
|
||||
Syncer
|
||||
}
|
||||
|
||||
// File is the file. A file instance must be goroutine-safe.
|
||||
type File interface {
|
||||
// Open opens the file for read. Returns os.ErrNotExist error
|
||||
// if the file does not exist.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Open() (r Reader, err error)
|
||||
type Lock interface {
|
||||
util.Releaser
|
||||
}
|
||||
|
||||
// Create creates the file for writting. Truncate the file if
|
||||
// already exist.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Create() (w Writer, err error)
|
||||
// FileDesc is a file descriptor.
|
||||
type FileDesc struct {
|
||||
Type FileType
|
||||
Num int64
|
||||
}
|
||||
|
||||
// Replace replaces file with newfile.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Replace(newfile File) error
|
||||
func (fd FileDesc) String() string {
|
||||
switch fd.Type {
|
||||
case TypeManifest:
|
||||
return fmt.Sprintf("MANIFEST-%06d", fd.Num)
|
||||
case TypeJournal:
|
||||
return fmt.Sprintf("%06d.log", fd.Num)
|
||||
case TypeTable:
|
||||
return fmt.Sprintf("%06d.ldb", fd.Num)
|
||||
case TypeTemp:
|
||||
return fmt.Sprintf("%06d.tmp", fd.Num)
|
||||
default:
|
||||
return fmt.Sprintf("%#x-%d", fd.Type, fd.Num)
|
||||
}
|
||||
}
|
||||
|
||||
// Type returns the file type
|
||||
Type() FileType
|
||||
// Nil returns true if fd == (FileDesc{}).
|
||||
func (fd FileDesc) Nil() bool {
|
||||
return fd == (FileDesc{})
|
||||
}
|
||||
|
||||
// Num returns the file number.
|
||||
Num() uint64
|
||||
|
||||
// Remove removes the file.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Remove() error
|
||||
// FileDescOk returns true if fd is a valid file descriptor.
|
||||
func FileDescOk(fd FileDesc) bool {
|
||||
switch fd.Type {
|
||||
case TypeManifest:
|
||||
case TypeJournal:
|
||||
case TypeTable:
|
||||
case TypeTemp:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return fd.Num >= 0
|
||||
}
|
||||
|
||||
// Storage is the storage. A storage instance must be goroutine-safe.
|
||||
@ -115,59 +131,47 @@ type Storage interface {
|
||||
// Lock locks the storage. Any subsequent attempt to call Lock will fail
|
||||
// until the last lock released.
|
||||
// After use the caller should call the Release method.
|
||||
Lock() (l util.Releaser, err error)
|
||||
Lock() (Lock, error)
|
||||
|
||||
// Log logs a string. This is used for logging. An implementation
|
||||
// may write to a file, stdout or simply do nothing.
|
||||
// Log logs a string. This is used for logging.
|
||||
// An implementation may write to a file, stdout or simply do nothing.
|
||||
Log(str string)
|
||||
|
||||
// GetFile returns a file for the given number and type. GetFile will never
|
||||
// returns nil, even if the underlying storage is closed.
|
||||
GetFile(num uint64, t FileType) File
|
||||
// SetMeta sets to point to the given fd, which then can be acquired using
|
||||
// GetMeta method.
|
||||
// SetMeta should be implemented in such way that changes should happened
|
||||
// atomically.
|
||||
SetMeta(fd FileDesc) error
|
||||
|
||||
// GetFiles returns a slice of files that match the given file types.
|
||||
// GetManifest returns a manifest file.
|
||||
// Returns os.ErrNotExist if meta doesn't point to any fd, or point to fd
|
||||
// that doesn't exist.
|
||||
GetMeta() (FileDesc, error)
|
||||
|
||||
// List returns fds that match the given file types.
|
||||
// The file types may be OR'ed together.
|
||||
GetFiles(t FileType) ([]File, error)
|
||||
List(ft FileType) ([]FileDesc, error)
|
||||
|
||||
// GetManifest returns a manifest file. Returns os.ErrNotExist if manifest
|
||||
// file does not exist.
|
||||
GetManifest() (File, error)
|
||||
// Open opens file with the given fd read-only.
|
||||
// Returns os.ErrNotExist error if the file does not exist.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Open(fd FileDesc) (Reader, error)
|
||||
|
||||
// SetManifest sets the given file as manifest file. The given file should
|
||||
// be a manifest file type or error will be returned.
|
||||
SetManifest(f File) error
|
||||
// Create creates file with the given fd, truncate if already exist and
|
||||
// opens write-only.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Create(fd FileDesc) (Writer, error)
|
||||
|
||||
// Close closes the storage. It is valid to call Close multiple times.
|
||||
// Other methods should not be called after the storage has been closed.
|
||||
// Remove removes file with the given fd.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Remove(fd FileDesc) error
|
||||
|
||||
// Rename renames file from oldfd to newfd.
|
||||
// Returns ErrClosed if the underlying storage is closed.
|
||||
Rename(oldfd, newfd FileDesc) error
|
||||
|
||||
// Close closes the storage.
|
||||
// It is valid to call Close multiple times. Other methods should not be
|
||||
// called after the storage has been closed.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// FileInfo wraps basic file info.
|
||||
type FileInfo struct {
|
||||
Type FileType
|
||||
Num uint64
|
||||
}
|
||||
|
||||
func (fi FileInfo) String() string {
|
||||
switch fi.Type {
|
||||
case TypeManifest:
|
||||
return fmt.Sprintf("MANIFEST-%06d", fi.Num)
|
||||
case TypeJournal:
|
||||
return fmt.Sprintf("%06d.log", fi.Num)
|
||||
case TypeTable:
|
||||
return fmt.Sprintf("%06d.ldb", fi.Num)
|
||||
case TypeTemp:
|
||||
return fmt.Sprintf("%06d.tmp", fi.Num)
|
||||
default:
|
||||
return fmt.Sprintf("%#x-%d", fi.Type, fi.Num)
|
||||
}
|
||||
}
|
||||
|
||||
// NewFileInfo creates new FileInfo from the given File. It will returns nil
|
||||
// if File is nil.
|
||||
func NewFileInfo(f File) *FileInfo {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
return &FileInfo{f.Type(), f.Num()}
|
||||
}
|
||||
@ -11,20 +11,20 @@ import (
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/table"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/cache"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/table"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// tFile holds basic information about a table.
|
||||
type tFile struct {
|
||||
file storage.File
|
||||
fd storage.FileDesc
|
||||
seekLeft int32
|
||||
size uint64
|
||||
imin, imax iKey
|
||||
size int64
|
||||
imin, imax internalKey
|
||||
}
|
||||
|
||||
// Returns true if given key is after largest key of this table.
|
||||
@ -48,9 +48,9 @@ func (t *tFile) consumeSeek() int32 {
|
||||
}
|
||||
|
||||
// Creates new tFile.
|
||||
func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile {
|
||||
func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
|
||||
f := &tFile{
|
||||
file: file,
|
||||
fd: fd,
|
||||
size: size,
|
||||
imin: imin,
|
||||
imax: imax,
|
||||
@ -77,6 +77,10 @@ func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile {
|
||||
return f
|
||||
}
|
||||
|
||||
func tableFileFromRecord(r atRecord) *tFile {
|
||||
return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax)
|
||||
}
|
||||
|
||||
// tFiles hold multiple tFile.
|
||||
type tFiles []*tFile
|
||||
|
||||
@ -89,7 +93,7 @@ func (tf tFiles) nums() string {
|
||||
if i != 0 {
|
||||
x += ", "
|
||||
}
|
||||
x += fmt.Sprint(f.file.Num())
|
||||
x += fmt.Sprint(f.fd.Num)
|
||||
}
|
||||
x += " ]"
|
||||
return x
|
||||
@ -101,7 +105,7 @@ func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
|
||||
a, b := tf[i], tf[j]
|
||||
n := icmp.Compare(a.imin, b.imin)
|
||||
if n == 0 {
|
||||
return a.file.Num() < b.file.Num()
|
||||
return a.fd.Num < b.fd.Num
|
||||
}
|
||||
return n < 0
|
||||
}
|
||||
@ -109,7 +113,7 @@ func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
|
||||
// Returns true if i file number is greater than j.
|
||||
// This used for sort by file number in descending order.
|
||||
func (tf tFiles) lessByNum(i, j int) bool {
|
||||
return tf[i].file.Num() > tf[j].file.Num()
|
||||
return tf[i].fd.Num > tf[j].fd.Num
|
||||
}
|
||||
|
||||
// Sorts tables by key in ascending order.
|
||||
@ -123,7 +127,7 @@ func (tf tFiles) sortByNum() {
|
||||
}
|
||||
|
||||
// Returns sum of all tables size.
|
||||
func (tf tFiles) size() (sum uint64) {
|
||||
func (tf tFiles) size() (sum int64) {
|
||||
for _, t := range tf {
|
||||
sum += t.size
|
||||
}
|
||||
@ -132,7 +136,7 @@ func (tf tFiles) size() (sum uint64) {
|
||||
|
||||
// Searches smallest index of tables whose its smallest
|
||||
// key is after or equal with given key.
|
||||
func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
|
||||
func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
|
||||
return sort.Search(len(tf), func(i int) bool {
|
||||
return icmp.Compare(tf[i].imin, ikey) >= 0
|
||||
})
|
||||
@ -140,7 +144,7 @@ func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
|
||||
|
||||
// Searches smallest index of tables whose its largest
|
||||
// key is after or equal with given key.
|
||||
func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
|
||||
func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
|
||||
return sort.Search(len(tf), func(i int) bool {
|
||||
return icmp.Compare(tf[i].imax, ikey) >= 0
|
||||
})
|
||||
@ -162,7 +166,7 @@ func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) boo
|
||||
i := 0
|
||||
if len(umin) > 0 {
|
||||
// Find the earliest possible internal key for min.
|
||||
i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek))
|
||||
i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
|
||||
}
|
||||
if i >= len(tf) {
|
||||
// Beginning of range is after all files, so no overlap.
|
||||
@ -205,7 +209,7 @@ func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, ove
|
||||
}
|
||||
|
||||
// Returns tables key range.
|
||||
func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
|
||||
func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
|
||||
for i, t := range tf {
|
||||
if i == 0 {
|
||||
imin, imax = t.imin, t.imax
|
||||
@ -227,10 +231,10 @@ func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range
|
||||
if slice != nil {
|
||||
var start, limit int
|
||||
if slice.Start != nil {
|
||||
start = tf.searchMax(icmp, iKey(slice.Start))
|
||||
start = tf.searchMax(icmp, internalKey(slice.Start))
|
||||
}
|
||||
if slice.Limit != nil {
|
||||
limit = tf.searchMin(icmp, iKey(slice.Limit))
|
||||
limit = tf.searchMin(icmp, internalKey(slice.Limit))
|
||||
} else {
|
||||
limit = tf.Len()
|
||||
}
|
||||
@ -255,7 +259,7 @@ type tFilesArrayIndexer struct {
|
||||
}
|
||||
|
||||
func (a *tFilesArrayIndexer) Search(key []byte) int {
|
||||
return a.searchMax(a.icmp, iKey(key))
|
||||
return a.searchMax(a.icmp, internalKey(key))
|
||||
}
|
||||
|
||||
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
|
||||
@ -295,16 +299,16 @@ type tOps struct {
|
||||
|
||||
// Creates an empty table and returns table writer.
|
||||
func (t *tOps) create() (*tWriter, error) {
|
||||
file := t.s.getTableFile(t.s.allocFileNum())
|
||||
fw, err := file.Create()
|
||||
fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()}
|
||||
fw, err := t.s.stor.Create(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tWriter{
|
||||
t: t,
|
||||
file: file,
|
||||
w: fw,
|
||||
tw: table.NewWriter(fw, t.s.o.Options),
|
||||
t: t,
|
||||
fd: fd,
|
||||
w: fw,
|
||||
tw: table.NewWriter(fw, t.s.o.Options),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -340,21 +344,20 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
|
||||
// Opens table. It returns a cache handle, which should
|
||||
// be released after use.
|
||||
func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
|
||||
num := f.file.Num()
|
||||
ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
|
||||
ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) {
|
||||
var r storage.Reader
|
||||
r, err = f.file.Open()
|
||||
r, err = t.s.stor.Open(f.fd)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var bcache *cache.CacheGetter
|
||||
var bcache *cache.NamespaceGetter
|
||||
if t.bcache != nil {
|
||||
bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
|
||||
bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
|
||||
}
|
||||
|
||||
var tr *table.Reader
|
||||
tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
|
||||
tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options)
|
||||
if err != nil {
|
||||
r.Close()
|
||||
return 0, nil
|
||||
@ -390,14 +393,13 @@ func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte,
|
||||
}
|
||||
|
||||
// Returns approximate offset of the given key.
|
||||
func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
|
||||
func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
|
||||
ch, err := t.open(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer ch.Release()
|
||||
offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
|
||||
return uint64(offset_), err
|
||||
return ch.Value().(*table.Reader).OffsetOf(key)
|
||||
}
|
||||
|
||||
// Creates an iterator from the given table.
|
||||
@ -414,15 +416,14 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
|
||||
// Removes table from persistent storage. It waits until
|
||||
// no one use the the table.
|
||||
func (t *tOps) remove(f *tFile) {
|
||||
num := f.file.Num()
|
||||
t.cache.Delete(0, num, func() {
|
||||
if err := f.file.Remove(); err != nil {
|
||||
t.s.logf("table@remove removing @%d %q", num, err)
|
||||
t.cache.Delete(0, uint64(f.fd.Num), func() {
|
||||
if err := t.s.stor.Remove(f.fd); err != nil {
|
||||
t.s.logf("table@remove removing @%d %q", f.fd.Num, err)
|
||||
} else {
|
||||
t.s.logf("table@remove removed @%d", num)
|
||||
t.s.logf("table@remove removed @%d", f.fd.Num)
|
||||
}
|
||||
if t.bcache != nil {
|
||||
t.bcache.EvictNS(num)
|
||||
t.bcache.EvictNS(uint64(f.fd.Num))
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -471,9 +472,9 @@ func newTableOps(s *session) *tOps {
|
||||
type tWriter struct {
|
||||
t *tOps
|
||||
|
||||
file storage.File
|
||||
w storage.Writer
|
||||
tw *table.Writer
|
||||
fd storage.FileDesc
|
||||
w storage.Writer
|
||||
tw *table.Writer
|
||||
|
||||
first, last []byte
|
||||
}
|
||||
@ -513,16 +514,15 @@ func (w *tWriter) finish() (f *tFile, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
|
||||
f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
|
||||
return
|
||||
}
|
||||
|
||||
// Drops the table.
|
||||
func (w *tWriter) drop() {
|
||||
w.close()
|
||||
w.file.Remove()
|
||||
w.t.s.reuseFileNum(w.file.Num())
|
||||
w.file = nil
|
||||
w.t.s.stor.Remove(w.fd)
|
||||
w.t.s.reuseFileNum(w.fd.Num)
|
||||
w.tw = nil
|
||||
w.first = nil
|
||||
w.last = nil
|
||||
139
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/table/block_test.go
generated
vendored
Normal file
139
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/table/block_test.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type blockTesting struct {
|
||||
tr *Reader
|
||||
b *block
|
||||
}
|
||||
|
||||
func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return t.tr.newBlockIter(t.b, nil, slice, false)
|
||||
}
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Block", func() {
|
||||
Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
|
||||
// Building the block.
|
||||
bw := &blockWriter{
|
||||
restartInterval: restartInterval,
|
||||
scratch: make([]byte, 30),
|
||||
}
|
||||
kv.Iterate(func(i int, key, value []byte) {
|
||||
bw.append(key, value)
|
||||
})
|
||||
bw.finish()
|
||||
|
||||
// Opening the block.
|
||||
data := bw.buf.Bytes()
|
||||
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
|
||||
return &blockTesting{
|
||||
tr: &Reader{cmp: comparer.DefaultComparer},
|
||||
b: &block{
|
||||
data: data,
|
||||
restartsLen: restartsLen,
|
||||
restartsOffset: len(data) - (restartsLen+1)*4,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Describe("read test", func() {
|
||||
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
|
||||
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
|
||||
kv := &testutil.KeyValue{}
|
||||
Text := func() string {
|
||||
return fmt.Sprintf("and %d keys", kv.Len())
|
||||
}
|
||||
|
||||
Test := func() {
|
||||
// Make block.
|
||||
br := Build(kv, restartInterval)
|
||||
// Do testing.
|
||||
testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil)
|
||||
}
|
||||
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("", "empty")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a1", "foo")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a2", "v")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a3qqwrkks", "hello")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a4", "bar")
|
||||
Describe(Text(), Test)
|
||||
|
||||
kv.PutString("a5111111", "v5")
|
||||
kv.PutString("a6", "")
|
||||
kv.PutString("a7", "v7")
|
||||
kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8")
|
||||
kv.PutString("b", "v9")
|
||||
kv.PutString("c9", "v9")
|
||||
kv.PutString("c91", "v9")
|
||||
kv.PutString("d0", "v9")
|
||||
Describe(Text(), Test)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Describe("out-of-bound slice test", func() {
|
||||
kv := &testutil.KeyValue{}
|
||||
kv.PutString("k1", "v1")
|
||||
kv.PutString("k2", "v2")
|
||||
kv.PutString("k3abcdefgg", "v3")
|
||||
kv.PutString("k4", "v4")
|
||||
kv.PutString("k5", "v5")
|
||||
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
|
||||
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
|
||||
// Make block.
|
||||
bt := Build(kv, restartInterval)
|
||||
|
||||
Test := func(r *util.Range) func(done Done) {
|
||||
return func(done Done) {
|
||||
iter := bt.TestNewIterator(r)
|
||||
Expect(iter.Error()).ShouldNot(HaveOccurred())
|
||||
|
||||
t := testutil.IteratorTesting{
|
||||
KeyValue: kv.Clone(),
|
||||
Iter: iter,
|
||||
}
|
||||
|
||||
testutil.DoIteratorTesting(&t)
|
||||
iter.Release()
|
||||
done <- true
|
||||
}
|
||||
}
|
||||
|
||||
It("Should do iterations and seeks correctly #0",
|
||||
Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0)
|
||||
|
||||
It("Should do iterations and seeks correctly #1",
|
||||
Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -16,14 +16,14 @@ import (
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/cache"
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/filter"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -507,9 +507,9 @@ func (i *indexIter) Get() iterator.Iterator {
|
||||
// Reader is a table reader.
|
||||
type Reader struct {
|
||||
mu sync.RWMutex
|
||||
fi *storage.FileInfo
|
||||
fd storage.FileDesc
|
||||
reader io.ReaderAt
|
||||
cache *cache.CacheGetter
|
||||
cache *cache.NamespaceGetter
|
||||
err error
|
||||
bpool *util.BufferPool
|
||||
// Options
|
||||
@ -539,7 +539,7 @@ func (r *Reader) blockKind(bh blockHandle) string {
|
||||
}
|
||||
|
||||
func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
|
||||
return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
|
||||
return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
|
||||
}
|
||||
|
||||
func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
|
||||
@ -551,7 +551,7 @@ func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
|
||||
cerr.Pos = int64(bh.offset)
|
||||
cerr.Size = int64(bh.length)
|
||||
cerr.Kind = r.blockKind(bh)
|
||||
return &errors.ErrCorrupted{File: r.fi, Err: cerr}
|
||||
return &errors.ErrCorrupted{Fd: r.fd, Err: cerr}
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -988,13 +988,13 @@ func (r *Reader) Release() {
|
||||
// The fi, cache and bpool is optional and can be nil.
|
||||
//
|
||||
// The returned table reader instance is goroutine-safe.
|
||||
func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
|
||||
func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
|
||||
if f == nil {
|
||||
return nil, errors.New("leveldb/table: nil file")
|
||||
}
|
||||
|
||||
r := &Reader{
|
||||
fi: fi,
|
||||
fd: fd,
|
||||
reader: f,
|
||||
cache: cache,
|
||||
bpool: bpool,
|
||||
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/table/table_suite_test.go
generated
vendored
Normal file
11
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/table/table_suite_test.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package table
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
func TestTable(t *testing.T) {
|
||||
testutil.RunSuite(t, "Table Suite")
|
||||
}
|
||||
123
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/table/table_test.go
generated
vendored
Normal file
123
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/table/table_test.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type tableWrapper struct {
|
||||
*Reader
|
||||
}
|
||||
|
||||
func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
|
||||
return t.Reader.Find(key, false, nil)
|
||||
}
|
||||
|
||||
func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {
|
||||
return t.Reader.Get(key, nil)
|
||||
}
|
||||
|
||||
func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return t.Reader.NewIterator(slice, nil)
|
||||
}
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Table", func() {
|
||||
Describe("approximate offset test", func() {
|
||||
var (
|
||||
buf = &bytes.Buffer{}
|
||||
o = &opt.Options{
|
||||
BlockSize: 1024,
|
||||
Compression: opt.NoCompression,
|
||||
}
|
||||
)
|
||||
|
||||
// Building the table.
|
||||
tw := NewWriter(buf, o)
|
||||
tw.Append([]byte("k01"), []byte("hello"))
|
||||
tw.Append([]byte("k02"), []byte("hello2"))
|
||||
tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000))
|
||||
tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000))
|
||||
tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000))
|
||||
tw.Append([]byte("k06"), []byte("hello3"))
|
||||
tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000))
|
||||
err := tw.Close()
|
||||
|
||||
It("Should be able to approximate offset of a key correctly", func() {
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
CheckOffset := func(key string, expect, threshold int) {
|
||||
offset, err := tr.OffsetOf([]byte(key))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key)
|
||||
}
|
||||
|
||||
CheckOffset("k0", 0, 0)
|
||||
CheckOffset("k01a", 0, 0)
|
||||
CheckOffset("k02", 0, 0)
|
||||
CheckOffset("k03", 0, 0)
|
||||
CheckOffset("k04", 10000, 1000)
|
||||
CheckOffset("k04a", 210000, 1000)
|
||||
CheckOffset("k05", 210000, 1000)
|
||||
CheckOffset("k06", 510000, 1000)
|
||||
CheckOffset("k07", 510000, 1000)
|
||||
CheckOffset("xyz", 610000, 2000)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("read test", func() {
|
||||
Build := func(kv testutil.KeyValue) testutil.DB {
|
||||
o := &opt.Options{
|
||||
BlockSize: 512,
|
||||
BlockRestartInterval: 3,
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
// Building the table.
|
||||
tw := NewWriter(buf, o)
|
||||
kv.Iterate(func(i int, key, value []byte) {
|
||||
tw.Append(key, value)
|
||||
})
|
||||
tw.Close()
|
||||
|
||||
// Opening the table.
|
||||
tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o)
|
||||
return tableWrapper{tr}
|
||||
}
|
||||
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
|
||||
return func() {
|
||||
db := Build(*kv)
|
||||
if body != nil {
|
||||
body(db.(tableWrapper).Reader)
|
||||
}
|
||||
testutil.KeyValueTesting(nil, *kv, db, nil, nil)
|
||||
}
|
||||
}
|
||||
|
||||
testutil.AllKeyValueTesting(nil, Build, nil, nil)
|
||||
Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
|
||||
It("should have correct blocks number", func() {
|
||||
indexBlock, err := r.readBlock(r.indexBH, true)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(indexBlock.restartsLen).Should(Equal(9))
|
||||
})
|
||||
}))
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -14,10 +14,10 @@ import (
|
||||
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
"github.com/pingcap/goleveldb/leveldb/filter"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func sharedPrefixLen(a, b []byte) int {
|
||||
222
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/db.go
generated
vendored
Normal file
222
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/db.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type DB interface{}
|
||||
|
||||
type Put interface {
|
||||
TestPut(key []byte, value []byte) error
|
||||
}
|
||||
|
||||
type Delete interface {
|
||||
TestDelete(key []byte) error
|
||||
}
|
||||
|
||||
type Find interface {
|
||||
TestFind(key []byte) (rkey, rvalue []byte, err error)
|
||||
}
|
||||
|
||||
type Get interface {
|
||||
TestGet(key []byte) (value []byte, err error)
|
||||
}
|
||||
|
||||
type Has interface {
|
||||
TestHas(key []byte) (ret bool, err error)
|
||||
}
|
||||
|
||||
type NewIterator interface {
|
||||
TestNewIterator(slice *util.Range) iterator.Iterator
|
||||
}
|
||||
|
||||
type DBAct int
|
||||
|
||||
func (a DBAct) String() string {
|
||||
switch a {
|
||||
case DBNone:
|
||||
return "none"
|
||||
case DBPut:
|
||||
return "put"
|
||||
case DBOverwrite:
|
||||
return "overwrite"
|
||||
case DBDelete:
|
||||
return "delete"
|
||||
case DBDeleteNA:
|
||||
return "delete_na"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
const (
|
||||
DBNone DBAct = iota
|
||||
DBPut
|
||||
DBOverwrite
|
||||
DBDelete
|
||||
DBDeleteNA
|
||||
)
|
||||
|
||||
type DBTesting struct {
|
||||
Rand *rand.Rand
|
||||
DB interface {
|
||||
Get
|
||||
Put
|
||||
Delete
|
||||
}
|
||||
PostFn func(t *DBTesting)
|
||||
Deleted, Present KeyValue
|
||||
Act, LastAct DBAct
|
||||
ActKey, LastActKey []byte
|
||||
}
|
||||
|
||||
func (t *DBTesting) post() {
|
||||
if t.PostFn != nil {
|
||||
t.PostFn(t)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *DBTesting) setAct(act DBAct, key []byte) {
|
||||
t.LastAct, t.Act = t.Act, act
|
||||
t.LastActKey, t.ActKey = t.ActKey, key
|
||||
}
|
||||
|
||||
func (t *DBTesting) text() string {
|
||||
return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey)
|
||||
}
|
||||
|
||||
func (t *DBTesting) Text() string {
|
||||
return "DBTesting " + t.text()
|
||||
}
|
||||
|
||||
func (t *DBTesting) TestPresentKV(key, value []byte) {
|
||||
rvalue, err := t.DB.TestGet(key)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text())
|
||||
Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text())
|
||||
}
|
||||
|
||||
func (t *DBTesting) TestAllPresent() {
|
||||
t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) {
|
||||
t.TestPresentKV(key, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *DBTesting) TestDeletedKey(key []byte) {
|
||||
_, err := t.DB.TestGet(key)
|
||||
Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
|
||||
}
|
||||
|
||||
func (t *DBTesting) TestAllDeleted() {
|
||||
t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) {
|
||||
t.TestDeletedKey(key)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *DBTesting) TestAll() {
|
||||
dn := t.Deleted.Len()
|
||||
pn := t.Present.Len()
|
||||
ShuffledIndex(t.Rand, dn+pn, 1, func(i int) {
|
||||
if i >= dn {
|
||||
key, value := t.Present.Index(i - dn)
|
||||
t.TestPresentKV(key, value)
|
||||
} else {
|
||||
t.TestDeletedKey(t.Deleted.KeyAt(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (t *DBTesting) Put(key, value []byte) {
|
||||
if new := t.Present.PutU(key, value); new {
|
||||
t.setAct(DBPut, key)
|
||||
} else {
|
||||
t.setAct(DBOverwrite, key)
|
||||
}
|
||||
t.Deleted.Delete(key)
|
||||
err := t.DB.TestPut(key, value)
|
||||
Expect(err).ShouldNot(HaveOccurred(), t.Text())
|
||||
t.TestPresentKV(key, value)
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *DBTesting) PutRandom() bool {
|
||||
if t.Deleted.Len() > 0 {
|
||||
i := t.Rand.Intn(t.Deleted.Len())
|
||||
key, value := t.Deleted.Index(i)
|
||||
t.Put(key, value)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *DBTesting) Delete(key []byte) {
|
||||
if exist, value := t.Present.Delete(key); exist {
|
||||
t.setAct(DBDelete, key)
|
||||
t.Deleted.PutU(key, value)
|
||||
} else {
|
||||
t.setAct(DBDeleteNA, key)
|
||||
}
|
||||
err := t.DB.TestDelete(key)
|
||||
Expect(err).ShouldNot(HaveOccurred(), t.Text())
|
||||
t.TestDeletedKey(key)
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *DBTesting) DeleteRandom() bool {
|
||||
if t.Present.Len() > 0 {
|
||||
i := t.Rand.Intn(t.Present.Len())
|
||||
t.Delete(t.Present.KeyAt(i))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *DBTesting) RandomAct(round int) {
|
||||
for i := 0; i < round; i++ {
|
||||
if t.Rand.Int()%2 == 0 {
|
||||
t.PutRandom()
|
||||
} else {
|
||||
t.DeleteRandom()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func DoDBTesting(t *DBTesting) {
|
||||
if t.Rand == nil {
|
||||
t.Rand = NewRand()
|
||||
}
|
||||
|
||||
t.DeleteRandom()
|
||||
t.PutRandom()
|
||||
t.DeleteRandom()
|
||||
t.DeleteRandom()
|
||||
for i := t.Deleted.Len() / 2; i >= 0; i-- {
|
||||
t.PutRandom()
|
||||
}
|
||||
t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10)
|
||||
|
||||
// Additional iterator testing
|
||||
if db, ok := t.DB.(NewIterator); ok {
|
||||
iter := db.TestNewIterator(nil)
|
||||
Expect(iter.Error()).NotTo(HaveOccurred())
|
||||
|
||||
it := IteratorTesting{
|
||||
KeyValue: t.Present,
|
||||
Iter: iter,
|
||||
}
|
||||
|
||||
DoIteratorTesting(&it)
|
||||
iter.Release()
|
||||
}
|
||||
}
|
||||
21
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/ginkgo.go
generated
vendored
Normal file
21
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/ginkgo.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func RunSuite(t GinkgoTestingT, name string) {
|
||||
RunDefer()
|
||||
|
||||
SynchronizedBeforeSuite(func() []byte {
|
||||
RunDefer("setup")
|
||||
return nil
|
||||
}, func(data []byte) {})
|
||||
SynchronizedAfterSuite(func() {
|
||||
RunDefer("teardown")
|
||||
}, func() {})
|
||||
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, name)
|
||||
}
|
||||
327
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/iter.go
generated
vendored
Normal file
327
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/iter.go
generated
vendored
Normal file
@ -0,0 +1,327 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
)
|
||||
|
||||
type IterAct int
|
||||
|
||||
func (a IterAct) String() string {
|
||||
switch a {
|
||||
case IterNone:
|
||||
return "none"
|
||||
case IterFirst:
|
||||
return "first"
|
||||
case IterLast:
|
||||
return "last"
|
||||
case IterPrev:
|
||||
return "prev"
|
||||
case IterNext:
|
||||
return "next"
|
||||
case IterSeek:
|
||||
return "seek"
|
||||
case IterSOI:
|
||||
return "soi"
|
||||
case IterEOI:
|
||||
return "eoi"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
const (
|
||||
IterNone IterAct = iota
|
||||
IterFirst
|
||||
IterLast
|
||||
IterPrev
|
||||
IterNext
|
||||
IterSeek
|
||||
IterSOI
|
||||
IterEOI
|
||||
)
|
||||
|
||||
type IteratorTesting struct {
|
||||
KeyValue
|
||||
Iter iterator.Iterator
|
||||
Rand *rand.Rand
|
||||
PostFn func(t *IteratorTesting)
|
||||
Pos int
|
||||
Act, LastAct IterAct
|
||||
|
||||
once bool
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) init() {
|
||||
if !t.once {
|
||||
t.Pos = -1
|
||||
t.once = true
|
||||
}
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) post() {
|
||||
if t.PostFn != nil {
|
||||
t.PostFn(t)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) setAct(act IterAct) {
|
||||
t.LastAct, t.Act = t.Act, act
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) text() string {
|
||||
return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act)
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) Text() string {
|
||||
return "IteratorTesting is " + t.text()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) IsFirst() bool {
|
||||
t.init()
|
||||
return t.Len() > 0 && t.Pos == 0
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) IsLast() bool {
|
||||
t.init()
|
||||
return t.Len() > 0 && t.Pos == t.Len()-1
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) TestKV() {
|
||||
t.init()
|
||||
key, value := t.Index(t.Pos)
|
||||
Expect(t.Iter.Key()).NotTo(BeNil())
|
||||
Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text())
|
||||
Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text())
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) First() {
|
||||
t.init()
|
||||
t.setAct(IterFirst)
|
||||
|
||||
ok := t.Iter.First()
|
||||
Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
|
||||
if t.Len() > 0 {
|
||||
t.Pos = 0
|
||||
Expect(ok).Should(BeTrue(), t.Text())
|
||||
t.TestKV()
|
||||
} else {
|
||||
t.Pos = -1
|
||||
Expect(ok).ShouldNot(BeTrue(), t.Text())
|
||||
}
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) Last() {
|
||||
t.init()
|
||||
t.setAct(IterLast)
|
||||
|
||||
ok := t.Iter.Last()
|
||||
Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
|
||||
if t.Len() > 0 {
|
||||
t.Pos = t.Len() - 1
|
||||
Expect(ok).Should(BeTrue(), t.Text())
|
||||
t.TestKV()
|
||||
} else {
|
||||
t.Pos = 0
|
||||
Expect(ok).ShouldNot(BeTrue(), t.Text())
|
||||
}
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) Next() {
|
||||
t.init()
|
||||
t.setAct(IterNext)
|
||||
|
||||
ok := t.Iter.Next()
|
||||
Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
|
||||
if t.Pos < t.Len()-1 {
|
||||
t.Pos++
|
||||
Expect(ok).Should(BeTrue(), t.Text())
|
||||
t.TestKV()
|
||||
} else {
|
||||
t.Pos = t.Len()
|
||||
Expect(ok).ShouldNot(BeTrue(), t.Text())
|
||||
}
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) Prev() {
|
||||
t.init()
|
||||
t.setAct(IterPrev)
|
||||
|
||||
ok := t.Iter.Prev()
|
||||
Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
|
||||
if t.Pos > 0 {
|
||||
t.Pos--
|
||||
Expect(ok).Should(BeTrue(), t.Text())
|
||||
t.TestKV()
|
||||
} else {
|
||||
t.Pos = -1
|
||||
Expect(ok).ShouldNot(BeTrue(), t.Text())
|
||||
}
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) Seek(i int) {
|
||||
t.init()
|
||||
t.setAct(IterSeek)
|
||||
|
||||
key, _ := t.Index(i)
|
||||
oldKey, _ := t.IndexOrNil(t.Pos)
|
||||
|
||||
ok := t.Iter.Seek(key)
|
||||
Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
|
||||
Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text()))
|
||||
|
||||
t.Pos = i
|
||||
t.TestKV()
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) SeekInexact(i int) {
|
||||
t.init()
|
||||
t.setAct(IterSeek)
|
||||
var key0 []byte
|
||||
key1, _ := t.Index(i)
|
||||
if i > 0 {
|
||||
key0, _ = t.Index(i - 1)
|
||||
}
|
||||
key := BytesSeparator(key0, key1)
|
||||
oldKey, _ := t.IndexOrNil(t.Pos)
|
||||
|
||||
ok := t.Iter.Seek(key)
|
||||
Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
|
||||
Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text()))
|
||||
|
||||
t.Pos = i
|
||||
t.TestKV()
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) SeekKey(key []byte) {
|
||||
t.init()
|
||||
t.setAct(IterSeek)
|
||||
oldKey, _ := t.IndexOrNil(t.Pos)
|
||||
i := t.Search(key)
|
||||
|
||||
ok := t.Iter.Seek(key)
|
||||
Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
|
||||
if i < t.Len() {
|
||||
key_, _ := t.Index(i)
|
||||
Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text()))
|
||||
t.Pos = i
|
||||
t.TestKV()
|
||||
} else {
|
||||
Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text()))
|
||||
}
|
||||
|
||||
t.Pos = i
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) SOI() {
|
||||
t.init()
|
||||
t.setAct(IterSOI)
|
||||
Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text())
|
||||
for i := 0; i < 3; i++ {
|
||||
t.Prev()
|
||||
}
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) EOI() {
|
||||
t.init()
|
||||
t.setAct(IterEOI)
|
||||
Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text())
|
||||
for i := 0; i < 3; i++ {
|
||||
t.Next()
|
||||
}
|
||||
t.post()
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) {
|
||||
t.init()
|
||||
for old := t.Pos; t.Pos > 0; old = t.Pos {
|
||||
fn(t)
|
||||
Expect(t.Pos).Should(BeNumerically("<", old), t.Text())
|
||||
}
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) {
|
||||
t.init()
|
||||
for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos {
|
||||
fn(t)
|
||||
Expect(t.Pos).Should(BeNumerically(">", old), t.Text())
|
||||
}
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) PrevAll() {
|
||||
t.WalkPrev(func(t *IteratorTesting) {
|
||||
t.Prev()
|
||||
})
|
||||
}
|
||||
|
||||
func (t *IteratorTesting) NextAll() {
|
||||
t.WalkNext(func(t *IteratorTesting) {
|
||||
t.Next()
|
||||
})
|
||||
}
|
||||
|
||||
func DoIteratorTesting(t *IteratorTesting) {
|
||||
if t.Rand == nil {
|
||||
t.Rand = NewRand()
|
||||
}
|
||||
t.SOI()
|
||||
t.NextAll()
|
||||
t.First()
|
||||
t.SOI()
|
||||
t.NextAll()
|
||||
t.EOI()
|
||||
t.PrevAll()
|
||||
t.Last()
|
||||
t.EOI()
|
||||
t.PrevAll()
|
||||
t.SOI()
|
||||
|
||||
t.NextAll()
|
||||
t.PrevAll()
|
||||
t.NextAll()
|
||||
t.Last()
|
||||
t.PrevAll()
|
||||
t.First()
|
||||
t.NextAll()
|
||||
t.EOI()
|
||||
|
||||
ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
|
||||
t.Seek(i)
|
||||
})
|
||||
|
||||
ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
|
||||
t.SeekInexact(i)
|
||||
})
|
||||
|
||||
ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
|
||||
t.Seek(i)
|
||||
if i%2 != 0 {
|
||||
t.PrevAll()
|
||||
t.SOI()
|
||||
} else {
|
||||
t.NextAll()
|
||||
t.EOI()
|
||||
}
|
||||
})
|
||||
|
||||
for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} {
|
||||
t.SeekKey([]byte(key))
|
||||
}
|
||||
}
|
||||
352
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/kv.go
generated
vendored
Normal file
352
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/kv.go
generated
vendored
Normal file
@ -0,0 +1,352 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type KeyValueEntry struct {
|
||||
key, value []byte
|
||||
}
|
||||
|
||||
type KeyValue struct {
|
||||
entries []KeyValueEntry
|
||||
nbytes int
|
||||
}
|
||||
|
||||
func (kv *KeyValue) Put(key, value []byte) {
|
||||
if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 {
|
||||
panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key))
|
||||
}
|
||||
kv.entries = append(kv.entries, KeyValueEntry{key, value})
|
||||
kv.nbytes += len(key) + len(value)
|
||||
}
|
||||
|
||||
func (kv *KeyValue) PutString(key, value string) {
|
||||
kv.Put([]byte(key), []byte(value))
|
||||
}
|
||||
|
||||
func (kv *KeyValue) PutU(key, value []byte) bool {
|
||||
if i, exist := kv.Get(key); !exist {
|
||||
if i < kv.Len() {
|
||||
kv.entries = append(kv.entries[:i+1], kv.entries[i:]...)
|
||||
kv.entries[i] = KeyValueEntry{key, value}
|
||||
} else {
|
||||
kv.entries = append(kv.entries, KeyValueEntry{key, value})
|
||||
}
|
||||
kv.nbytes += len(key) + len(value)
|
||||
return true
|
||||
} else {
|
||||
kv.nbytes += len(value) - len(kv.ValueAt(i))
|
||||
kv.entries[i].value = value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (kv *KeyValue) PutUString(key, value string) bool {
|
||||
return kv.PutU([]byte(key), []byte(value))
|
||||
}
|
||||
|
||||
func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) {
|
||||
i, exist := kv.Get(key)
|
||||
if exist {
|
||||
value = kv.entries[i].value
|
||||
kv.DeleteIndex(i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (kv *KeyValue) DeleteIndex(i int) bool {
|
||||
if i < kv.Len() {
|
||||
kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i))
|
||||
kv.entries = append(kv.entries[:i], kv.entries[i+1:]...)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (kv KeyValue) Len() int {
|
||||
return len(kv.entries)
|
||||
}
|
||||
|
||||
func (kv *KeyValue) Size() int {
|
||||
return kv.nbytes
|
||||
}
|
||||
|
||||
func (kv KeyValue) KeyAt(i int) []byte {
|
||||
return kv.entries[i].key
|
||||
}
|
||||
|
||||
func (kv KeyValue) ValueAt(i int) []byte {
|
||||
return kv.entries[i].value
|
||||
}
|
||||
|
||||
func (kv KeyValue) Index(i int) (key, value []byte) {
|
||||
if i < 0 || i >= len(kv.entries) {
|
||||
panic(fmt.Sprintf("Index #%d: out of range", i))
|
||||
}
|
||||
return kv.entries[i].key, kv.entries[i].value
|
||||
}
|
||||
|
||||
func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) {
|
||||
key, value = kv.Index(i)
|
||||
var key0 []byte
|
||||
var key1 = kv.KeyAt(i)
|
||||
if i > 0 {
|
||||
key0 = kv.KeyAt(i - 1)
|
||||
}
|
||||
key_ = BytesSeparator(key0, key1)
|
||||
return
|
||||
}
|
||||
|
||||
func (kv KeyValue) IndexOrNil(i int) (key, value []byte) {
|
||||
if i >= 0 && i < len(kv.entries) {
|
||||
return kv.entries[i].key, kv.entries[i].value
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (kv KeyValue) IndexString(i int) (key, value string) {
|
||||
key_, _value := kv.Index(i)
|
||||
return string(key_), string(_value)
|
||||
}
|
||||
|
||||
func (kv KeyValue) Search(key []byte) int {
|
||||
return sort.Search(kv.Len(), func(i int) bool {
|
||||
return cmp.Compare(kv.KeyAt(i), key) >= 0
|
||||
})
|
||||
}
|
||||
|
||||
func (kv KeyValue) SearchString(key string) int {
|
||||
return kv.Search([]byte(key))
|
||||
}
|
||||
|
||||
func (kv KeyValue) Get(key []byte) (i int, exist bool) {
|
||||
i = kv.Search(key)
|
||||
if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 {
|
||||
exist = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (kv KeyValue) GetString(key string) (i int, exist bool) {
|
||||
return kv.Get([]byte(key))
|
||||
}
|
||||
|
||||
func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) {
|
||||
for i, x := range kv.entries {
|
||||
fn(i, x.key, x.value)
|
||||
}
|
||||
}
|
||||
|
||||
func (kv KeyValue) IterateString(fn func(i int, key, value string)) {
|
||||
kv.Iterate(func(i int, key, value []byte) {
|
||||
fn(i, string(key), string(value))
|
||||
})
|
||||
}
|
||||
|
||||
func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) {
|
||||
ShuffledIndex(rnd, kv.Len(), 1, func(i int) {
|
||||
fn(i, kv.entries[i].key, kv.entries[i].value)
|
||||
})
|
||||
}
|
||||
|
||||
func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) {
|
||||
kv.IterateShuffled(rnd, func(i int, key, value []byte) {
|
||||
fn(i, string(key), string(value))
|
||||
})
|
||||
}
|
||||
|
||||
func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) {
|
||||
for i := range kv.entries {
|
||||
key_, key, value := kv.IndexInexact(i)
|
||||
fn(i, key_, key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) {
|
||||
kv.IterateInexact(func(i int, key_, key, value []byte) {
|
||||
fn(i, string(key_), string(key), string(value))
|
||||
})
|
||||
}
|
||||
|
||||
func (kv KeyValue) Clone() KeyValue {
|
||||
return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes}
|
||||
}
|
||||
|
||||
func (kv KeyValue) Slice(start, limit int) KeyValue {
|
||||
if start < 0 || limit > kv.Len() {
|
||||
panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit))
|
||||
} else if limit < start {
|
||||
panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit))
|
||||
}
|
||||
return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes}
|
||||
}
|
||||
|
||||
func (kv KeyValue) SliceKey(start, limit []byte) KeyValue {
|
||||
start_ := 0
|
||||
limit_ := kv.Len()
|
||||
if start != nil {
|
||||
start_ = kv.Search(start)
|
||||
}
|
||||
if limit != nil {
|
||||
limit_ = kv.Search(limit)
|
||||
}
|
||||
return kv.Slice(start_, limit_)
|
||||
}
|
||||
|
||||
func (kv KeyValue) SliceKeyString(start, limit string) KeyValue {
|
||||
return kv.SliceKey([]byte(start), []byte(limit))
|
||||
}
|
||||
|
||||
func (kv KeyValue) SliceRange(r *util.Range) KeyValue {
|
||||
if r != nil {
|
||||
return kv.SliceKey(r.Start, r.Limit)
|
||||
}
|
||||
return kv.Clone()
|
||||
}
|
||||
|
||||
func (kv KeyValue) Range(start, limit int) (r util.Range) {
|
||||
if kv.Len() > 0 {
|
||||
if start == kv.Len() {
|
||||
r.Start = BytesAfter(kv.KeyAt(start - 1))
|
||||
} else {
|
||||
r.Start = kv.KeyAt(start)
|
||||
}
|
||||
}
|
||||
if limit < kv.Len() {
|
||||
r.Limit = kv.KeyAt(limit)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func KeyValue_EmptyKey() *KeyValue {
|
||||
kv := &KeyValue{}
|
||||
kv.PutString("", "v")
|
||||
return kv
|
||||
}
|
||||
|
||||
func KeyValue_EmptyValue() *KeyValue {
|
||||
kv := &KeyValue{}
|
||||
kv.PutString("abc", "")
|
||||
kv.PutString("abcd", "")
|
||||
return kv
|
||||
}
|
||||
|
||||
func KeyValue_OneKeyValue() *KeyValue {
|
||||
kv := &KeyValue{}
|
||||
kv.PutString("abc", "v")
|
||||
return kv
|
||||
}
|
||||
|
||||
func KeyValue_BigValue() *KeyValue {
|
||||
kv := &KeyValue{}
|
||||
kv.PutString("big1", strings.Repeat("1", 200000))
|
||||
return kv
|
||||
}
|
||||
|
||||
func KeyValue_SpecialKey() *KeyValue {
|
||||
kv := &KeyValue{}
|
||||
kv.PutString("\xff\xff", "v3")
|
||||
return kv
|
||||
}
|
||||
|
||||
func KeyValue_MultipleKeyValue() *KeyValue {
|
||||
kv := &KeyValue{}
|
||||
kv.PutString("a", "v")
|
||||
kv.PutString("aa", "v1")
|
||||
kv.PutString("aaa", "v2")
|
||||
kv.PutString("aaacccccccccc", "v2")
|
||||
kv.PutString("aaaccccccccccd", "v3")
|
||||
kv.PutString("aaaccccccccccf", "v4")
|
||||
kv.PutString("aaaccccccccccfg", "v5")
|
||||
kv.PutString("ab", "v6")
|
||||
kv.PutString("abc", "v7")
|
||||
kv.PutString("abcd", "v8")
|
||||
kv.PutString("accccccccccccccc", "v9")
|
||||
kv.PutString("b", "v10")
|
||||
kv.PutString("bb", "v11")
|
||||
kv.PutString("bc", "v12")
|
||||
kv.PutString("c", "v13")
|
||||
kv.PutString("c1", "v13")
|
||||
kv.PutString("czzzzzzzzzzzzzz", "v14")
|
||||
kv.PutString("fffffffffffffff", "v15")
|
||||
kv.PutString("g11", "v15")
|
||||
kv.PutString("g111", "v15")
|
||||
kv.PutString("g111\xff", "v15")
|
||||
kv.PutString("zz", "v16")
|
||||
kv.PutString("zzzzzzz", "v16")
|
||||
kv.PutString("zzzzzzzzzzzzzzzz", "v16")
|
||||
return kv
|
||||
}
|
||||
|
||||
var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy")
|
||||
|
||||
func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue {
|
||||
if rnd == nil {
|
||||
rnd = NewRand()
|
||||
}
|
||||
if maxlen < minlen {
|
||||
panic("max len should >= min len")
|
||||
}
|
||||
|
||||
rrand := func(min, max int) int {
|
||||
if min == max {
|
||||
return max
|
||||
}
|
||||
return rnd.Intn(max-min) + min
|
||||
}
|
||||
|
||||
kv := &KeyValue{}
|
||||
endC := byte(len(keymap) - 1)
|
||||
gen := make([]byte, 0, maxlen)
|
||||
for i := 0; i < n; i++ {
|
||||
m := rrand(minlen, maxlen)
|
||||
last := gen
|
||||
retry:
|
||||
gen = last[:m]
|
||||
if k := len(last); m > k {
|
||||
for j := k; j < m; j++ {
|
||||
gen[j] = 0
|
||||
}
|
||||
} else {
|
||||
for j := m - 1; j >= 0; j-- {
|
||||
c := last[j]
|
||||
if c == endC {
|
||||
continue
|
||||
}
|
||||
gen[j] = c + 1
|
||||
for j += 1; j < m; j++ {
|
||||
gen[j] = 0
|
||||
}
|
||||
goto ok
|
||||
}
|
||||
if m < maxlen {
|
||||
m++
|
||||
goto retry
|
||||
}
|
||||
panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n))
|
||||
ok:
|
||||
}
|
||||
key := make([]byte, m)
|
||||
for j := 0; j < m; j++ {
|
||||
key[j] = keymap[gen[j]]
|
||||
}
|
||||
value := make([]byte, rrand(vminlen, vmaxlen))
|
||||
for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ {
|
||||
value[n] = 'x'
|
||||
}
|
||||
kv.Put(key, value)
|
||||
}
|
||||
return kv
|
||||
}
|
||||
211
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/kvtest.go
generated
vendored
Normal file
211
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/kvtest.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func TestFind(db Find, kv KeyValue) {
|
||||
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
|
||||
key_, key, value := kv.IndexInexact(i)
|
||||
|
||||
// Using exact key.
|
||||
rkey, rvalue, err := db.TestFind(key)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
|
||||
Expect(rkey).Should(Equal(key), "Key")
|
||||
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
|
||||
|
||||
// Using inexact key.
|
||||
rkey, rvalue, err = db.TestFind(key_)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key)
|
||||
Expect(rkey).Should(Equal(key))
|
||||
Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindAfterLast(db Find, kv KeyValue) {
|
||||
var key []byte
|
||||
if kv.Len() > 0 {
|
||||
key_, _ := kv.Index(kv.Len() - 1)
|
||||
key = BytesAfter(key_)
|
||||
}
|
||||
rkey, _, err := db.TestFind(key)
|
||||
Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
|
||||
Expect(err).Should(Equal(errors.ErrNotFound))
|
||||
}
|
||||
|
||||
func TestGet(db Get, kv KeyValue) {
|
||||
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
|
||||
key_, key, value := kv.IndexInexact(i)
|
||||
|
||||
// Using exact key.
|
||||
rvalue, err := db.TestGet(key)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
|
||||
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
|
||||
|
||||
// Using inexact key.
|
||||
if len(key_) > 0 {
|
||||
_, err = db.TestGet(key_)
|
||||
Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
|
||||
Expect(err).Should(Equal(errors.ErrNotFound))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHas(db Has, kv KeyValue) {
|
||||
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
|
||||
key_, key, _ := kv.IndexInexact(i)
|
||||
|
||||
// Using exact key.
|
||||
ret, err := db.TestHas(key)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
|
||||
Expect(ret).Should(BeTrue(), "False for key %q", key)
|
||||
|
||||
// Using inexact key.
|
||||
if len(key_) > 0 {
|
||||
ret, err = db.TestHas(key_)
|
||||
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
|
||||
Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIter(db NewIterator, r *util.Range, kv KeyValue) {
|
||||
iter := db.TestNewIterator(r)
|
||||
Expect(iter.Error()).ShouldNot(HaveOccurred())
|
||||
|
||||
t := IteratorTesting{
|
||||
KeyValue: kv,
|
||||
Iter: iter,
|
||||
}
|
||||
|
||||
DoIteratorTesting(&t)
|
||||
iter.Release()
|
||||
}
|
||||
|
||||
func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
|
||||
if rnd == nil {
|
||||
rnd = NewRand()
|
||||
}
|
||||
|
||||
if p == nil {
|
||||
BeforeEach(func() {
|
||||
p = setup(kv)
|
||||
})
|
||||
if teardown != nil {
|
||||
AfterEach(func() {
|
||||
teardown(p)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
It("Should find all keys with Find", func() {
|
||||
if db, ok := p.(Find); ok {
|
||||
TestFind(db, kv)
|
||||
}
|
||||
})
|
||||
|
||||
It("Should return error if Find on key after the last", func() {
|
||||
if db, ok := p.(Find); ok {
|
||||
TestFindAfterLast(db, kv)
|
||||
}
|
||||
})
|
||||
|
||||
It("Should only find exact key with Get", func() {
|
||||
if db, ok := p.(Get); ok {
|
||||
TestGet(db, kv)
|
||||
}
|
||||
})
|
||||
|
||||
It("Should only find present key with Has", func() {
|
||||
if db, ok := p.(Has); ok {
|
||||
TestHas(db, kv)
|
||||
}
|
||||
})
|
||||
|
||||
It("Should iterates and seeks correctly", func(done Done) {
|
||||
if db, ok := p.(NewIterator); ok {
|
||||
TestIter(db, nil, kv.Clone())
|
||||
}
|
||||
done <- true
|
||||
}, 3.0)
|
||||
|
||||
It("Should iterates and seeks slice correctly", func(done Done) {
|
||||
if db, ok := p.(NewIterator); ok {
|
||||
RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
|
||||
type slice struct {
|
||||
r *util.Range
|
||||
start, limit int
|
||||
}
|
||||
|
||||
key_, _, _ := kv.IndexInexact(i)
|
||||
for _, x := range []slice{
|
||||
{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
|
||||
{&util.Range{Start: nil, Limit: key_}, 0, i},
|
||||
} {
|
||||
By(fmt.Sprintf("Random index of %d .. %d", x.start, x.limit), func() {
|
||||
TestIter(db, x.r, kv.Slice(x.start, x.limit))
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
done <- true
|
||||
}, 50.0)
|
||||
|
||||
It("Should iterates and seeks slice correctly", func(done Done) {
|
||||
if db, ok := p.(NewIterator); ok {
|
||||
RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
|
||||
By(fmt.Sprintf("Random range of %d .. %d", start, limit), func() {
|
||||
r := kv.Range(start, limit)
|
||||
TestIter(db, &r, kv.Slice(start, limit))
|
||||
})
|
||||
})
|
||||
}
|
||||
done <- true
|
||||
}, 50.0)
|
||||
}
|
||||
|
||||
func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
|
||||
Test := func(kv *KeyValue) func() {
|
||||
return func() {
|
||||
var p DB
|
||||
if setup != nil {
|
||||
Defer("setup", func() {
|
||||
p = setup(*kv)
|
||||
})
|
||||
}
|
||||
if teardown != nil {
|
||||
Defer("teardown", func() {
|
||||
teardown(p)
|
||||
})
|
||||
}
|
||||
if body != nil {
|
||||
p = body(*kv)
|
||||
}
|
||||
KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
|
||||
return p
|
||||
}, nil)
|
||||
}
|
||||
}
|
||||
|
||||
Describe("with no key/value (empty)", Test(&KeyValue{}))
|
||||
Describe("with empty key", Test(KeyValue_EmptyKey()))
|
||||
Describe("with empty value", Test(KeyValue_EmptyValue()))
|
||||
Describe("with one key/value", Test(KeyValue_OneKeyValue()))
|
||||
Describe("with big value", Test(KeyValue_BigValue()))
|
||||
Describe("with special key", Test(KeyValue_SpecialKey()))
|
||||
Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
|
||||
Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
|
||||
}
|
||||
694
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/storage.go
generated
vendored
Normal file
694
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/storage.go
generated
vendored
Normal file
@ -0,0 +1,694 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
storageMu sync.Mutex
|
||||
storageUseFS = true
|
||||
storageKeepFS = false
|
||||
storageNum int
|
||||
)
|
||||
|
||||
type StorageMode int
|
||||
|
||||
const (
|
||||
ModeOpen StorageMode = 1 << iota
|
||||
ModeCreate
|
||||
ModeRemove
|
||||
ModeRename
|
||||
ModeRead
|
||||
ModeWrite
|
||||
ModeSync
|
||||
ModeClose
|
||||
)
|
||||
|
||||
const (
|
||||
modeOpen = iota
|
||||
modeCreate
|
||||
modeRemove
|
||||
modeRename
|
||||
modeRead
|
||||
modeWrite
|
||||
modeSync
|
||||
modeClose
|
||||
|
||||
modeCount
|
||||
)
|
||||
|
||||
const (
|
||||
typeManifest = iota
|
||||
typeJournal
|
||||
typeTable
|
||||
typeTemp
|
||||
|
||||
typeCount
|
||||
)
|
||||
|
||||
const flattenCount = modeCount * typeCount
|
||||
|
||||
func flattenType(m StorageMode, t storage.FileType) int {
|
||||
var x int
|
||||
switch m {
|
||||
case ModeOpen:
|
||||
x = modeOpen
|
||||
case ModeCreate:
|
||||
x = modeCreate
|
||||
case ModeRemove:
|
||||
x = modeRemove
|
||||
case ModeRename:
|
||||
x = modeRename
|
||||
case ModeRead:
|
||||
x = modeRead
|
||||
case ModeWrite:
|
||||
x = modeWrite
|
||||
case ModeSync:
|
||||
x = modeSync
|
||||
case ModeClose:
|
||||
x = modeClose
|
||||
default:
|
||||
panic("invalid storage mode")
|
||||
}
|
||||
x *= typeCount
|
||||
switch t {
|
||||
case storage.TypeManifest:
|
||||
return x + typeManifest
|
||||
case storage.TypeJournal:
|
||||
return x + typeJournal
|
||||
case storage.TypeTable:
|
||||
return x + typeTable
|
||||
case storage.TypeTemp:
|
||||
return x + typeTemp
|
||||
default:
|
||||
panic("invalid file type")
|
||||
}
|
||||
}
|
||||
|
||||
func listFlattenType(m StorageMode, t storage.FileType) []int {
|
||||
ret := make([]int, 0, flattenCount)
|
||||
add := func(x int) {
|
||||
x *= typeCount
|
||||
switch {
|
||||
case t&storage.TypeManifest != 0:
|
||||
ret = append(ret, x+typeManifest)
|
||||
case t&storage.TypeJournal != 0:
|
||||
ret = append(ret, x+typeJournal)
|
||||
case t&storage.TypeTable != 0:
|
||||
ret = append(ret, x+typeTable)
|
||||
case t&storage.TypeTemp != 0:
|
||||
ret = append(ret, x+typeTemp)
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case m&ModeOpen != 0:
|
||||
add(modeOpen)
|
||||
case m&ModeCreate != 0:
|
||||
add(modeCreate)
|
||||
case m&ModeRemove != 0:
|
||||
add(modeRemove)
|
||||
case m&ModeRename != 0:
|
||||
add(modeRename)
|
||||
case m&ModeRead != 0:
|
||||
add(modeRead)
|
||||
case m&ModeWrite != 0:
|
||||
add(modeWrite)
|
||||
case m&ModeSync != 0:
|
||||
add(modeSync)
|
||||
case m&ModeClose != 0:
|
||||
add(modeClose)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func packFile(fd storage.FileDesc) uint64 {
|
||||
if fd.Num>>(63-typeCount) != 0 {
|
||||
panic("overflow")
|
||||
}
|
||||
return uint64(fd.Num<<typeCount) | uint64(fd.Type)
|
||||
}
|
||||
|
||||
func unpackFile(x uint64) storage.FileDesc {
|
||||
return storage.FileDesc{storage.FileType(x) & storage.TypeAll, int64(x >> typeCount)}
|
||||
}
|
||||
|
||||
type emulatedError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (err emulatedError) Error() string {
|
||||
return fmt.Sprintf("emulated storage error: %v", err.err)
|
||||
}
|
||||
|
||||
type storageLock struct {
|
||||
s *Storage
|
||||
r util.Releaser
|
||||
}
|
||||
|
||||
func (l storageLock) Release() {
|
||||
l.r.Release()
|
||||
l.s.logI("storage lock released")
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
s *Storage
|
||||
fd storage.FileDesc
|
||||
storage.Reader
|
||||
}
|
||||
|
||||
func (r *reader) Read(p []byte) (n int, err error) {
|
||||
err = r.s.emulateError(ModeRead, r.fd.Type)
|
||||
if err == nil {
|
||||
r.s.stall(ModeRead, r.fd.Type)
|
||||
n, err = r.Reader.Read(p)
|
||||
}
|
||||
r.s.count(ModeRead, r.fd.Type, n)
|
||||
if err != nil && err != io.EOF {
|
||||
r.s.logI("read error, fd=%s n=%d err=%v", r.fd, n, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *reader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
err = r.s.emulateError(ModeRead, r.fd.Type)
|
||||
if err == nil {
|
||||
r.s.stall(ModeRead, r.fd.Type)
|
||||
n, err = r.Reader.ReadAt(p, off)
|
||||
}
|
||||
r.s.count(ModeRead, r.fd.Type, n)
|
||||
if err != nil && err != io.EOF {
|
||||
r.s.logI("readAt error, fd=%s offset=%d n=%d err=%v", r.fd, off, n, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *reader) Close() (err error) {
|
||||
return r.s.fileClose(r.fd, r.Reader)
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
s *Storage
|
||||
fd storage.FileDesc
|
||||
storage.Writer
|
||||
}
|
||||
|
||||
func (w *writer) Write(p []byte) (n int, err error) {
|
||||
err = w.s.emulateError(ModeWrite, w.fd.Type)
|
||||
if err == nil {
|
||||
w.s.stall(ModeWrite, w.fd.Type)
|
||||
n, err = w.Writer.Write(p)
|
||||
}
|
||||
w.s.count(ModeWrite, w.fd.Type, n)
|
||||
if err != nil && err != io.EOF {
|
||||
w.s.logI("write error, fd=%s n=%d err=%v", w.fd, n, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *writer) Sync() (err error) {
|
||||
err = w.s.emulateError(ModeSync, w.fd.Type)
|
||||
if err == nil {
|
||||
w.s.stall(ModeSync, w.fd.Type)
|
||||
err = w.Writer.Sync()
|
||||
}
|
||||
w.s.count(ModeSync, w.fd.Type, 0)
|
||||
if err != nil {
|
||||
w.s.logI("sync error, fd=%s err=%v", w.fd, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *writer) Close() (err error) {
|
||||
return w.s.fileClose(w.fd, w.Writer)
|
||||
}
|
||||
|
||||
type Storage struct {
|
||||
storage.Storage
|
||||
path string
|
||||
onClose func() (preserve bool, err error)
|
||||
onLog func(str string)
|
||||
|
||||
lmu sync.Mutex
|
||||
lb bytes.Buffer
|
||||
|
||||
mu sync.Mutex
|
||||
rand *rand.Rand
|
||||
// Open files, true=writer, false=reader
|
||||
opens map[uint64]bool
|
||||
counters [flattenCount]int
|
||||
bytesCounter [flattenCount]int64
|
||||
emulatedError [flattenCount]error
|
||||
emulatedErrorOnce [flattenCount]bool
|
||||
emulatedRandomError [flattenCount]error
|
||||
emulatedRandomErrorProb [flattenCount]float64
|
||||
stallCond sync.Cond
|
||||
stalled [flattenCount]bool
|
||||
}
|
||||
|
||||
func (s *Storage) log(skip int, str string) {
|
||||
s.lmu.Lock()
|
||||
defer s.lmu.Unlock()
|
||||
_, file, line, ok := runtime.Caller(skip + 2)
|
||||
if ok {
|
||||
// Truncate file name at last file name separator.
|
||||
if index := strings.LastIndex(file, "/"); index >= 0 {
|
||||
file = file[index+1:]
|
||||
} else if index = strings.LastIndex(file, "\\"); index >= 0 {
|
||||
file = file[index+1:]
|
||||
}
|
||||
} else {
|
||||
file = "???"
|
||||
line = 1
|
||||
}
|
||||
fmt.Fprintf(&s.lb, "%s:%d: ", file, line)
|
||||
lines := strings.Split(str, "\n")
|
||||
if l := len(lines); l > 1 && lines[l-1] == "" {
|
||||
lines = lines[:l-1]
|
||||
}
|
||||
for i, line := range lines {
|
||||
if i > 0 {
|
||||
s.lb.WriteString("\n\t")
|
||||
}
|
||||
s.lb.WriteString(line)
|
||||
}
|
||||
if s.onLog != nil {
|
||||
s.onLog(s.lb.String())
|
||||
s.lb.Reset()
|
||||
} else {
|
||||
s.lb.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) logISkip(skip int, format string, args ...interface{}) {
|
||||
pc, _, _, ok := runtime.Caller(skip + 1)
|
||||
if ok {
|
||||
if f := runtime.FuncForPC(pc); f != nil {
|
||||
fname := f.Name()
|
||||
if index := strings.LastIndex(fname, "."); index >= 0 {
|
||||
fname = fname[index+1:]
|
||||
}
|
||||
format = fname + ": " + format
|
||||
}
|
||||
}
|
||||
s.log(skip+1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (s *Storage) logI(format string, args ...interface{}) {
|
||||
s.logISkip(1, format, args...)
|
||||
}
|
||||
|
||||
func (s *Storage) OnLog(onLog func(log string)) {
|
||||
s.lmu.Lock()
|
||||
s.onLog = onLog
|
||||
if s.lb.Len() != 0 {
|
||||
log := s.lb.String()
|
||||
s.onLog(log[:len(log)-1])
|
||||
s.lb.Reset()
|
||||
}
|
||||
s.lmu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Storage) Log(str string) {
|
||||
s.log(1, "Log: "+str)
|
||||
s.Storage.Log(str)
|
||||
}
|
||||
|
||||
func (s *Storage) Lock() (l storage.Lock, err error) {
|
||||
l, err = s.Storage.Lock()
|
||||
if err != nil {
|
||||
s.logI("storage locking failed, err=%v", err)
|
||||
} else {
|
||||
s.logI("storage locked")
|
||||
l = storageLock{s, l}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) List(t storage.FileType) (fds []storage.FileDesc, err error) {
|
||||
fds, err = s.Storage.List(t)
|
||||
if err != nil {
|
||||
s.logI("list failed, err=%v", err)
|
||||
return
|
||||
}
|
||||
s.logI("list, type=0x%x count=%d", int(t), len(fds))
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) GetMeta() (fd storage.FileDesc, err error) {
|
||||
fd, err = s.Storage.GetMeta()
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
s.logI("get meta failed, err=%v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
s.logI("get meta, fd=%s", fd)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) SetMeta(fd storage.FileDesc) error {
|
||||
ExpectWithOffset(1, fd.Type).To(Equal(storage.TypeManifest))
|
||||
err := s.Storage.SetMeta(fd)
|
||||
if err != nil {
|
||||
s.logI("set meta failed, fd=%s err=%v", fd, err)
|
||||
} else {
|
||||
s.logI("set meta, fd=%s", fd)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Storage) fileClose(fd storage.FileDesc, closer io.Closer) (err error) {
|
||||
err = s.emulateError(ModeClose, fd.Type)
|
||||
if err == nil {
|
||||
s.stall(ModeClose, fd.Type)
|
||||
}
|
||||
x := packFile(fd)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err == nil {
|
||||
ExpectWithOffset(2, s.opens).To(HaveKey(x), "File closed, fd=%s", fd)
|
||||
err = closer.Close()
|
||||
}
|
||||
s.countNB(ModeClose, fd.Type, 0)
|
||||
writer := s.opens[x]
|
||||
if err != nil {
|
||||
s.logISkip(1, "file close failed, fd=%s writer=%v err=%v", fd, writer, err)
|
||||
} else {
|
||||
s.logISkip(1, "file closed, fd=%s writer=%v", fd, writer)
|
||||
delete(s.opens, x)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) assertOpen(fd storage.FileDesc) {
|
||||
x := packFile(fd)
|
||||
ExpectWithOffset(2, s.opens).NotTo(HaveKey(x), "File open, fd=%s writer=%v", fd, s.opens[x])
|
||||
}
|
||||
|
||||
func (s *Storage) Open(fd storage.FileDesc) (r storage.Reader, err error) {
|
||||
err = s.emulateError(ModeOpen, fd.Type)
|
||||
if err == nil {
|
||||
s.stall(ModeOpen, fd.Type)
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err == nil {
|
||||
s.assertOpen(fd)
|
||||
s.countNB(ModeOpen, fd.Type, 0)
|
||||
r, err = s.Storage.Open(fd)
|
||||
}
|
||||
if err != nil {
|
||||
s.logI("file open failed, fd=%s err=%v", fd, err)
|
||||
} else {
|
||||
s.logI("file opened, fd=%s", fd)
|
||||
s.opens[packFile(fd)] = false
|
||||
r = &reader{s, fd, r}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) Create(fd storage.FileDesc) (w storage.Writer, err error) {
|
||||
err = s.emulateError(ModeCreate, fd.Type)
|
||||
if err == nil {
|
||||
s.stall(ModeCreate, fd.Type)
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err == nil {
|
||||
s.assertOpen(fd)
|
||||
s.countNB(ModeCreate, fd.Type, 0)
|
||||
w, err = s.Storage.Create(fd)
|
||||
}
|
||||
if err != nil {
|
||||
s.logI("file create failed, fd=%s err=%v", fd, err)
|
||||
} else {
|
||||
s.logI("file created, fd=%s", fd)
|
||||
s.opens[packFile(fd)] = true
|
||||
w = &writer{s, fd, w}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) Remove(fd storage.FileDesc) (err error) {
|
||||
err = s.emulateError(ModeRemove, fd.Type)
|
||||
if err == nil {
|
||||
s.stall(ModeRemove, fd.Type)
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err == nil {
|
||||
s.assertOpen(fd)
|
||||
s.countNB(ModeRemove, fd.Type, 0)
|
||||
err = s.Storage.Remove(fd)
|
||||
}
|
||||
if err != nil {
|
||||
s.logI("file remove failed, fd=%s err=%v", fd, err)
|
||||
} else {
|
||||
s.logI("file removed, fd=%s", fd)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) ForceRemove(fd storage.FileDesc) (err error) {
|
||||
s.countNB(ModeRemove, fd.Type, 0)
|
||||
if err = s.Storage.Remove(fd); err != nil {
|
||||
s.logI("file remove failed (forced), fd=%s err=%v", fd, err)
|
||||
} else {
|
||||
s.logI("file removed (forced), fd=%s", fd)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) Rename(oldfd, newfd storage.FileDesc) (err error) {
|
||||
err = s.emulateError(ModeRename, oldfd.Type)
|
||||
if err == nil {
|
||||
s.stall(ModeRename, oldfd.Type)
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if err == nil {
|
||||
s.assertOpen(oldfd)
|
||||
s.assertOpen(newfd)
|
||||
s.countNB(ModeRename, oldfd.Type, 0)
|
||||
err = s.Storage.Rename(oldfd, newfd)
|
||||
}
|
||||
if err != nil {
|
||||
s.logI("file rename failed, oldfd=%s newfd=%s err=%v", oldfd, newfd, err)
|
||||
} else {
|
||||
s.logI("file renamed, oldfd=%s newfd=%s", oldfd, newfd)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) ForceRename(oldfd, newfd storage.FileDesc) (err error) {
|
||||
s.countNB(ModeRename, oldfd.Type, 0)
|
||||
if err = s.Storage.Rename(oldfd, newfd); err != nil {
|
||||
s.logI("file rename failed (forced), oldfd=%s newfd=%s err=%v", oldfd, newfd, err)
|
||||
} else {
|
||||
s.logI("file renamed (forced), oldfd=%s newfd=%s", oldfd, newfd)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) openFiles() string {
|
||||
out := "Open files:"
|
||||
for x, writer := range s.opens {
|
||||
fd := unpackFile(x)
|
||||
out += fmt.Sprintf("\n · fd=%s writer=%v", fd, writer)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *Storage) CloseCheck() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
|
||||
}
|
||||
|
||||
func (s *Storage) OnClose(onClose func() (preserve bool, err error)) {
|
||||
s.mu.Lock()
|
||||
s.onClose = onClose
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Storage) Close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
|
||||
err := s.Storage.Close()
|
||||
if err != nil {
|
||||
s.logI("storage closing failed, err=%v", err)
|
||||
} else {
|
||||
s.logI("storage closed")
|
||||
}
|
||||
var preserve bool
|
||||
if s.onClose != nil {
|
||||
var err0 error
|
||||
if preserve, err0 = s.onClose(); err0 != nil {
|
||||
s.logI("onClose error, err=%v", err0)
|
||||
}
|
||||
}
|
||||
if s.path != "" {
|
||||
if storageKeepFS || preserve {
|
||||
s.logI("storage is preserved, path=%v", s.path)
|
||||
} else {
|
||||
if err1 := os.RemoveAll(s.path); err1 != nil {
|
||||
s.logI("cannot remove storage, err=%v", err1)
|
||||
} else {
|
||||
s.logI("storage has been removed")
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) {
|
||||
s.counters[flattenType(m, t)]++
|
||||
s.bytesCounter[flattenType(m, t)] += int64(n)
|
||||
}
|
||||
|
||||
func (s *Storage) count(m StorageMode, t storage.FileType, n int) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.countNB(m, t, n)
|
||||
}
|
||||
|
||||
func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) {
|
||||
for _, x := range listFlattenType(m, t) {
|
||||
s.counters[x] = 0
|
||||
s.bytesCounter[x] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) {
|
||||
for _, x := range listFlattenType(m, t) {
|
||||
count += s.counters[x]
|
||||
bytes += s.bytesCounter[x]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Storage) emulateError(m StorageMode, t storage.FileType) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
x := flattenType(m, t)
|
||||
if err := s.emulatedError[x]; err != nil {
|
||||
if s.emulatedErrorOnce[x] {
|
||||
s.emulatedError[x] = nil
|
||||
}
|
||||
return emulatedError{err}
|
||||
}
|
||||
if err := s.emulatedRandomError[x]; err != nil && s.rand.Float64() < s.emulatedRandomErrorProb[x] {
|
||||
return emulatedError{err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, x := range listFlattenType(m, t) {
|
||||
s.emulatedError[x] = err
|
||||
s.emulatedErrorOnce[x] = false
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) EmulateErrorOnce(m StorageMode, t storage.FileType, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, x := range listFlattenType(m, t) {
|
||||
s.emulatedError[x] = err
|
||||
s.emulatedErrorOnce[x] = true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) EmulateRandomError(m StorageMode, t storage.FileType, prob float64, err error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, x := range listFlattenType(m, t) {
|
||||
s.emulatedRandomError[x] = err
|
||||
s.emulatedRandomErrorProb[x] = prob
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) stall(m StorageMode, t storage.FileType) {
|
||||
x := flattenType(m, t)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for s.stalled[x] {
|
||||
s.stallCond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) Stall(m StorageMode, t storage.FileType) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, x := range listFlattenType(m, t) {
|
||||
s.stalled[x] = true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) Release(m StorageMode, t storage.FileType) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, x := range listFlattenType(m, t) {
|
||||
s.stalled[x] = false
|
||||
}
|
||||
s.stallCond.Broadcast()
|
||||
}
|
||||
|
||||
func NewStorage() *Storage {
|
||||
var (
|
||||
stor storage.Storage
|
||||
path string
|
||||
)
|
||||
if storageUseFS {
|
||||
for {
|
||||
storageMu.Lock()
|
||||
num := storageNum
|
||||
storageNum++
|
||||
storageMu.Unlock()
|
||||
path = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
stor, err = storage.OpenFile(path, false)
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
stor = storage.NewMemStorage()
|
||||
}
|
||||
s := &Storage{
|
||||
Storage: stor,
|
||||
path: path,
|
||||
rand: NewRand(),
|
||||
opens: make(map[uint64]bool),
|
||||
}
|
||||
s.stallCond.L = &s.mu
|
||||
if s.path != "" {
|
||||
s.logI("using FS storage")
|
||||
s.logI("storage path: %s", s.path)
|
||||
} else {
|
||||
s.logI("using MEM storage")
|
||||
}
|
||||
return s
|
||||
}
|
||||
171
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/util.go
generated
vendored
Normal file
171
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil/util.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/comparer"
|
||||
)
|
||||
|
||||
var (
|
||||
runfn = make(map[string][]func())
|
||||
runmu sync.Mutex
|
||||
)
|
||||
|
||||
func Defer(args ...interface{}) bool {
|
||||
var (
|
||||
group string
|
||||
fn func()
|
||||
)
|
||||
for _, arg := range args {
|
||||
v := reflect.ValueOf(arg)
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
group = v.String()
|
||||
case reflect.Func:
|
||||
r := reflect.ValueOf(&fn).Elem()
|
||||
r.Set(v)
|
||||
}
|
||||
}
|
||||
if fn != nil {
|
||||
runmu.Lock()
|
||||
runfn[group] = append(runfn[group], fn)
|
||||
runmu.Unlock()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func RunDefer(groups ...string) bool {
|
||||
if len(groups) == 0 {
|
||||
groups = append(groups, "")
|
||||
}
|
||||
runmu.Lock()
|
||||
var runfn_ []func()
|
||||
for _, group := range groups {
|
||||
runfn_ = append(runfn_, runfn[group]...)
|
||||
delete(runfn, group)
|
||||
}
|
||||
runmu.Unlock()
|
||||
for _, fn := range runfn_ {
|
||||
fn()
|
||||
}
|
||||
return runfn_ != nil
|
||||
}
|
||||
|
||||
func RandomSeed() int64 {
|
||||
if !flag.Parsed() {
|
||||
panic("random seed not initialized")
|
||||
}
|
||||
return config.GinkgoConfig.RandomSeed
|
||||
}
|
||||
|
||||
func NewRand() *rand.Rand {
|
||||
return rand.New(rand.NewSource(RandomSeed()))
|
||||
}
|
||||
|
||||
var cmp = comparer.DefaultComparer
|
||||
|
||||
func BytesSeparator(a, b []byte) []byte {
|
||||
if bytes.Equal(a, b) {
|
||||
return b
|
||||
}
|
||||
i, n := 0, len(a)
|
||||
if n > len(b) {
|
||||
n = len(b)
|
||||
}
|
||||
for ; i < n && (a[i] == b[i]); i++ {
|
||||
}
|
||||
x := append([]byte{}, a[:i]...)
|
||||
if i < n {
|
||||
if c := a[i] + 1; c < b[i] {
|
||||
return append(x, c)
|
||||
}
|
||||
x = append(x, a[i])
|
||||
i++
|
||||
}
|
||||
for ; i < len(a); i++ {
|
||||
if c := a[i]; c < 0xff {
|
||||
return append(x, c+1)
|
||||
} else {
|
||||
x = append(x, c)
|
||||
}
|
||||
}
|
||||
if len(b) > i && b[i] > 0 {
|
||||
return append(x, b[i]-1)
|
||||
}
|
||||
return append(x, 'x')
|
||||
}
|
||||
|
||||
func BytesAfter(b []byte) []byte {
|
||||
var x []byte
|
||||
for _, c := range b {
|
||||
if c < 0xff {
|
||||
return append(x, c+1)
|
||||
} else {
|
||||
x = append(x, c)
|
||||
}
|
||||
}
|
||||
return append(x, 'x')
|
||||
}
|
||||
|
||||
func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
|
||||
if rnd == nil {
|
||||
rnd = NewRand()
|
||||
}
|
||||
for x := 0; x < round; x++ {
|
||||
fn(rnd.Intn(n))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
|
||||
if rnd == nil {
|
||||
rnd = NewRand()
|
||||
}
|
||||
for x := 0; x < round; x++ {
|
||||
for _, i := range rnd.Perm(n) {
|
||||
fn(i)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) {
|
||||
if rnd == nil {
|
||||
rnd = NewRand()
|
||||
}
|
||||
for x := 0; x < round; x++ {
|
||||
start := rnd.Intn(n)
|
||||
length := 0
|
||||
if j := n - start; j > 0 {
|
||||
length = rnd.Intn(j)
|
||||
}
|
||||
fn(start, start+length)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Max(x, y int) int {
|
||||
if x > y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func Min(x, y int) int {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
91
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil_test.go
generated
vendored
Normal file
91
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/testutil_test.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type testingDB struct {
|
||||
*DB
|
||||
ro *opt.ReadOptions
|
||||
wo *opt.WriteOptions
|
||||
stor *testutil.Storage
|
||||
}
|
||||
|
||||
func (t *testingDB) TestPut(key []byte, value []byte) error {
|
||||
return t.Put(key, value, t.wo)
|
||||
}
|
||||
|
||||
func (t *testingDB) TestDelete(key []byte) error {
|
||||
return t.Delete(key, t.wo)
|
||||
}
|
||||
|
||||
func (t *testingDB) TestGet(key []byte) (value []byte, err error) {
|
||||
return t.Get(key, t.ro)
|
||||
}
|
||||
|
||||
func (t *testingDB) TestHas(key []byte) (ret bool, err error) {
|
||||
return t.Has(key, t.ro)
|
||||
}
|
||||
|
||||
func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return t.NewIterator(slice, t.ro)
|
||||
}
|
||||
|
||||
func (t *testingDB) TestClose() {
|
||||
err := t.Close()
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred())
|
||||
err = t.stor.Close()
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB {
|
||||
stor := testutil.NewStorage()
|
||||
db, err := Open(stor, o)
|
||||
// FIXME: This may be called from outside It, which may cause panic.
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return &testingDB{
|
||||
DB: db,
|
||||
ro: ro,
|
||||
wo: wo,
|
||||
stor: stor,
|
||||
}
|
||||
}
|
||||
|
||||
type testingTransaction struct {
|
||||
*Transaction
|
||||
ro *opt.ReadOptions
|
||||
wo *opt.WriteOptions
|
||||
}
|
||||
|
||||
func (t *testingTransaction) TestPut(key []byte, value []byte) error {
|
||||
return t.Put(key, value, t.wo)
|
||||
}
|
||||
|
||||
func (t *testingTransaction) TestDelete(key []byte) error {
|
||||
return t.Delete(key, t.wo)
|
||||
}
|
||||
|
||||
func (t *testingTransaction) TestGet(key []byte) (value []byte, err error) {
|
||||
return t.Get(key, t.ro)
|
||||
}
|
||||
|
||||
func (t *testingTransaction) TestHas(key []byte) (ret bool, err error) {
|
||||
return t.Has(key, t.ro)
|
||||
}
|
||||
|
||||
func (t *testingTransaction) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return t.NewIterator(slice, t.ro)
|
||||
}
|
||||
|
||||
func (t *testingTransaction) TestClose() {}
|
||||
@ -10,7 +10,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func shorten(str string) string {
|
||||
@ -72,20 +72,20 @@ func maxInt(a, b int) int {
|
||||
return b
|
||||
}
|
||||
|
||||
type files []storage.File
|
||||
type fdSorter []storage.FileDesc
|
||||
|
||||
func (p files) Len() int {
|
||||
func (p fdSorter) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
func (p files) Less(i, j int) bool {
|
||||
return p[i].Num() < p[j].Num()
|
||||
func (p fdSorter) Less(i, j int) bool {
|
||||
return p[i].Num < p[j].Num
|
||||
}
|
||||
|
||||
func (p files) Swap(i, j int) {
|
||||
func (p fdSorter) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
||||
|
||||
func (p files) sort() {
|
||||
sort.Sort(p)
|
||||
func sortFds(fds []storage.FileDesc) {
|
||||
sort.Sort(fdSorter(fds))
|
||||
}
|
||||
369
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/util/buffer_test.go
generated
vendored
Normal file
369
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/util/buffer_test.go
generated
vendored
Normal file
@ -0,0 +1,369 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const N = 10000 // make this bigger for a larger (and slower) test
|
||||
var data string // test data for write tests
|
||||
var testBytes []byte // test data; same as data but as a slice.
|
||||
|
||||
func init() {
|
||||
testBytes = make([]byte, N)
|
||||
for i := 0; i < N; i++ {
|
||||
testBytes[i] = 'a' + byte(i%26)
|
||||
}
|
||||
data = string(testBytes)
|
||||
}
|
||||
|
||||
// Verify that contents of buf match the string s.
|
||||
func check(t *testing.T, testname string, buf *Buffer, s string) {
|
||||
bytes := buf.Bytes()
|
||||
str := buf.String()
|
||||
if buf.Len() != len(bytes) {
|
||||
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
|
||||
}
|
||||
|
||||
if buf.Len() != len(str) {
|
||||
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
|
||||
}
|
||||
|
||||
if buf.Len() != len(s) {
|
||||
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
|
||||
}
|
||||
|
||||
if string(bytes) != s {
|
||||
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
|
||||
}
|
||||
}
|
||||
|
||||
// Fill buf through n writes of byte slice fub.
|
||||
// The initial contents of buf corresponds to the string s;
|
||||
// the result is the final contents of buf returned as a string.
|
||||
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
|
||||
check(t, testname+" (fill 1)", buf, s)
|
||||
for ; n > 0; n-- {
|
||||
m, err := buf.Write(fub)
|
||||
if m != len(fub) {
|
||||
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
|
||||
}
|
||||
s += string(fub)
|
||||
check(t, testname+" (fill 4)", buf, s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestNewBuffer(t *testing.T) {
|
||||
buf := NewBuffer(testBytes)
|
||||
check(t, "NewBuffer", buf, data)
|
||||
}
|
||||
|
||||
// Empty buf through repeated reads into fub.
|
||||
// The initial contents of buf corresponds to the string s.
|
||||
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
|
||||
check(t, testname+" (empty 1)", buf, s)
|
||||
|
||||
for {
|
||||
n, err := buf.Read(fub)
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
|
||||
}
|
||||
s = s[n:]
|
||||
check(t, testname+" (empty 3)", buf, s)
|
||||
}
|
||||
|
||||
check(t, testname+" (empty 4)", buf, "")
|
||||
}
|
||||
|
||||
func TestBasicOperations(t *testing.T) {
|
||||
var buf Buffer
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
check(t, "TestBasicOperations (1)", &buf, "")
|
||||
|
||||
buf.Reset()
|
||||
check(t, "TestBasicOperations (2)", &buf, "")
|
||||
|
||||
buf.Truncate(0)
|
||||
check(t, "TestBasicOperations (3)", &buf, "")
|
||||
|
||||
n, err := buf.Write([]byte(data[0:1]))
|
||||
if n != 1 {
|
||||
t.Errorf("wrote 1 byte, but n == %d", n)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err should always be nil, but err == %s", err)
|
||||
}
|
||||
check(t, "TestBasicOperations (4)", &buf, "a")
|
||||
|
||||
buf.WriteByte(data[1])
|
||||
check(t, "TestBasicOperations (5)", &buf, "ab")
|
||||
|
||||
n, err = buf.Write([]byte(data[2:26]))
|
||||
if n != 24 {
|
||||
t.Errorf("wrote 25 bytes, but n == %d", n)
|
||||
}
|
||||
check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
|
||||
|
||||
buf.Truncate(26)
|
||||
check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
|
||||
|
||||
buf.Truncate(20)
|
||||
check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
|
||||
|
||||
empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
|
||||
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
|
||||
|
||||
buf.WriteByte(data[1])
|
||||
c, err := buf.ReadByte()
|
||||
if err != nil {
|
||||
t.Error("ReadByte unexpected eof")
|
||||
}
|
||||
if c != data[1] {
|
||||
t.Errorf("ReadByte wrong value c=%v", c)
|
||||
}
|
||||
c, err = buf.ReadByte()
|
||||
if err == nil {
|
||||
t.Error("ReadByte unexpected not eof")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLargeByteWrites(t *testing.T) {
|
||||
var buf Buffer
|
||||
limit := 30
|
||||
if testing.Short() {
|
||||
limit = 9
|
||||
}
|
||||
for i := 3; i < limit; i += 3 {
|
||||
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
|
||||
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
|
||||
}
|
||||
check(t, "TestLargeByteWrites (3)", &buf, "")
|
||||
}
|
||||
|
||||
func TestLargeByteReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
|
||||
}
|
||||
check(t, "TestLargeByteReads (3)", &buf, "")
|
||||
}
|
||||
|
||||
func TestMixedReadsAndWrites(t *testing.T) {
|
||||
var buf Buffer
|
||||
s := ""
|
||||
for i := 0; i < 50; i++ {
|
||||
wlen := rand.Intn(len(data))
|
||||
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
|
||||
rlen := rand.Intn(len(data))
|
||||
fub := make([]byte, rlen)
|
||||
n, _ := buf.Read(fub)
|
||||
s = s[n:]
|
||||
}
|
||||
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
|
||||
}
|
||||
|
||||
func TestNil(t *testing.T) {
|
||||
var b *Buffer
|
||||
if b.String() != "<nil>" {
|
||||
t.Errorf("expected <nil>; got %q", b.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFrom(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
b.ReadFrom(&buf)
|
||||
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteTo(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
buf.WriteTo(&b)
|
||||
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNext(t *testing.T) {
|
||||
b := []byte{0, 1, 2, 3, 4}
|
||||
tmp := make([]byte, 5)
|
||||
for i := 0; i <= 5; i++ {
|
||||
for j := i; j <= 5; j++ {
|
||||
for k := 0; k <= 6; k++ {
|
||||
// 0 <= i <= j <= 5; 0 <= k <= 6
|
||||
// Check that if we start with a buffer
|
||||
// of length j at offset i and ask for
|
||||
// Next(k), we get the right bytes.
|
||||
buf := NewBuffer(b[0:j])
|
||||
n, _ := buf.Read(tmp[0:i])
|
||||
if n != i {
|
||||
t.Fatalf("Read %d returned %d", i, n)
|
||||
}
|
||||
bb := buf.Next(k)
|
||||
want := k
|
||||
if want > j-i {
|
||||
want = j - i
|
||||
}
|
||||
if len(bb) != want {
|
||||
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
|
||||
}
|
||||
for l, v := range bb {
|
||||
if v != byte(l+i) {
|
||||
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var readBytesTests = []struct {
|
||||
buffer string
|
||||
delim byte
|
||||
expected []string
|
||||
err error
|
||||
}{
|
||||
{"", 0, []string{""}, io.EOF},
|
||||
{"a\x00", 0, []string{"a\x00"}, nil},
|
||||
{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
|
||||
{"hello\x01world", 1, []string{"hello\x01"}, nil},
|
||||
{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
|
||||
{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
|
||||
{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
|
||||
}
|
||||
|
||||
func TestReadBytes(t *testing.T) {
|
||||
for _, test := range readBytesTests {
|
||||
buf := NewBuffer([]byte(test.buffer))
|
||||
var err error
|
||||
for _, expected := range test.expected {
|
||||
var bytes []byte
|
||||
bytes, err = buf.ReadBytes(test.delim)
|
||||
if string(bytes) != expected {
|
||||
t.Errorf("expected %q, got %q", expected, bytes)
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != test.err {
|
||||
t.Errorf("expected error %v, got %v", test.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGrow(t *testing.T) {
|
||||
x := []byte{'x'}
|
||||
y := []byte{'y'}
|
||||
tmp := make([]byte, 72)
|
||||
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
|
||||
xBytes := bytes.Repeat(x, startLen)
|
||||
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
|
||||
buf := NewBuffer(xBytes)
|
||||
// If we read, this affects buf.off, which is good to test.
|
||||
readBytes, _ := buf.Read(tmp)
|
||||
buf.Grow(growLen)
|
||||
yBytes := bytes.Repeat(y, growLen)
|
||||
// Check no allocation occurs in write, as long as we're single-threaded.
|
||||
var m1, m2 runtime.MemStats
|
||||
runtime.ReadMemStats(&m1)
|
||||
buf.Write(yBytes)
|
||||
runtime.ReadMemStats(&m2)
|
||||
if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
|
||||
t.Errorf("allocation occurred during write")
|
||||
}
|
||||
// Check that buffer has correct data.
|
||||
if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
|
||||
t.Errorf("bad initial data at %d %d", startLen, growLen)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
|
||||
t.Errorf("bad written data at %d %d", startLen, growLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Was a bug: used to give EOF reading empty slice at EOF.
|
||||
func TestReadEmptyAtEOF(t *testing.T) {
|
||||
b := new(Buffer)
|
||||
slice := make([]byte, 0)
|
||||
n, err := b.Read(slice)
|
||||
if err != nil {
|
||||
t.Errorf("read error: %v", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Errorf("wrong count; got %d want 0", n)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that we occasionally compact. Issue 5154.
|
||||
func TestBufferGrowth(t *testing.T) {
|
||||
var b Buffer
|
||||
buf := make([]byte, 1024)
|
||||
b.Write(buf[0:1])
|
||||
var cap0 int
|
||||
for i := 0; i < 5<<10; i++ {
|
||||
b.Write(buf)
|
||||
b.Read(buf)
|
||||
if i == 0 {
|
||||
cap0 = cap(b.buf)
|
||||
}
|
||||
}
|
||||
cap1 := cap(b.buf)
|
||||
// (*Buffer).grow allows for 2x capacity slop before sliding,
|
||||
// so set our error threshold at 3x.
|
||||
if cap1 > cap0*3 {
|
||||
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
|
||||
}
|
||||
}
|
||||
|
||||
// From Issue 5154.
|
||||
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
|
||||
buf := make([]byte, 1024)
|
||||
for i := 0; i < b.N; i++ {
|
||||
var b Buffer
|
||||
b.Write(buf[0:1])
|
||||
for i := 0; i < 5<<10; i++ {
|
||||
b.Write(buf)
|
||||
b.Read(buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that we don't compact too often. From Issue 5154.
|
||||
func BenchmarkBufferFullSmallReads(b *testing.B) {
|
||||
buf := make([]byte, 1024)
|
||||
for i := 0; i < b.N; i++ {
|
||||
var b Buffer
|
||||
b.Write(buf)
|
||||
for b.Len()+20 < cap(b.buf) {
|
||||
b.Write(buf[:10])
|
||||
}
|
||||
for i := 0; i < 5<<10; i++ {
|
||||
b.Read(buf[:1])
|
||||
b.Write(buf[:1])
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -7,38 +7,38 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// Hash return hash of the given data.
|
||||
func Hash(data []byte, seed uint32) uint32 {
|
||||
// Similar to murmur hash
|
||||
var m uint32 = 0xc6a4a793
|
||||
var r uint32 = 24
|
||||
h := seed ^ (uint32(len(data)) * m)
|
||||
const (
|
||||
m = uint32(0xc6a4a793)
|
||||
r = uint32(24)
|
||||
)
|
||||
var (
|
||||
h = seed ^ (uint32(len(data)) * m)
|
||||
i int
|
||||
)
|
||||
|
||||
buf := bytes.NewBuffer(data)
|
||||
for buf.Len() >= 4 {
|
||||
var w uint32
|
||||
binary.Read(buf, binary.LittleEndian, &w)
|
||||
h += w
|
||||
for n := len(data) - len(data)%4; i < n; i += 4 {
|
||||
h += binary.LittleEndian.Uint32(data[i:])
|
||||
h *= m
|
||||
h ^= (h >> 16)
|
||||
}
|
||||
|
||||
rest := buf.Bytes()
|
||||
switch len(rest) {
|
||||
switch len(data) - i {
|
||||
default:
|
||||
panic("not reached")
|
||||
case 3:
|
||||
h += uint32(rest[2]) << 16
|
||||
h += uint32(data[i+2]) << 16
|
||||
fallthrough
|
||||
case 2:
|
||||
h += uint32(rest[1]) << 8
|
||||
h += uint32(data[i+1]) << 8
|
||||
fallthrough
|
||||
case 1:
|
||||
h += uint32(rest[0])
|
||||
h += uint32(data[i])
|
||||
h *= m
|
||||
h ^= (h >> r)
|
||||
case 0:
|
||||
46
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/util/hash_test.go
generated
vendored
Normal file
46
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/util/hash_test.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var hashTests = []struct {
|
||||
data []byte
|
||||
seed uint32
|
||||
hash uint32
|
||||
}{
|
||||
{nil, 0xbc9f1d34, 0xbc9f1d34},
|
||||
{[]byte{0x62}, 0xbc9f1d34, 0xef1345c4},
|
||||
{[]byte{0xc3, 0x97}, 0xbc9f1d34, 0x5b663814},
|
||||
{[]byte{0xe2, 0x99, 0xa5}, 0xbc9f1d34, 0x323c078f},
|
||||
{[]byte{0xe1, 0x80, 0xb9, 0x32}, 0xbc9f1d34, 0xed21633a},
|
||||
{[]byte{
|
||||
0x01, 0xc0, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x14, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x04, 0x00,
|
||||
0x00, 0x00, 0x00, 0x14,
|
||||
0x00, 0x00, 0x00, 0x18,
|
||||
0x28, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x02, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
}, 0x12345678, 0xf333dabb},
|
||||
}
|
||||
|
||||
func TestHash(t *testing.T) {
|
||||
for i, x := range hashTests {
|
||||
h := Hash(x.data, x.seed)
|
||||
if h != x.hash {
|
||||
t.Fatalf("test-%d: invalid hash, %#x vs %#x", i, h, x.hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -7,12 +7,13 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/pingcap/goleveldb/leveldb/iterator"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type tSet struct {
|
||||
@ -23,7 +24,7 @@ type tSet struct {
|
||||
type version struct {
|
||||
s *session
|
||||
|
||||
tables []tFiles
|
||||
levels []tFiles
|
||||
|
||||
// Level that should be compacted next and its compaction score.
|
||||
// Score < 1 means compaction is not strictly needed. These fields
|
||||
@ -39,7 +40,7 @@ type version struct {
|
||||
}
|
||||
|
||||
func newVersion(s *session) *version {
|
||||
return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())}
|
||||
return &version{s: s}
|
||||
}
|
||||
|
||||
func (v *version) releaseNB() {
|
||||
@ -51,18 +52,18 @@ func (v *version) releaseNB() {
|
||||
panic("negative version ref")
|
||||
}
|
||||
|
||||
tables := make(map[uint64]bool)
|
||||
for _, tt := range v.next.tables {
|
||||
nextTables := make(map[int64]bool)
|
||||
for _, tt := range v.next.levels {
|
||||
for _, t := range tt {
|
||||
num := t.file.Num()
|
||||
tables[num] = true
|
||||
num := t.fd.Num
|
||||
nextTables[num] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, tt := range v.tables {
|
||||
for _, tt := range v.levels {
|
||||
for _, t := range tt {
|
||||
num := t.file.Num()
|
||||
if _, ok := tables[num]; !ok {
|
||||
num := t.fd.Num
|
||||
if _, ok := nextTables[num]; !ok {
|
||||
v.s.tops.remove(t)
|
||||
}
|
||||
}
|
||||
@ -78,11 +79,26 @@ func (v *version) release() {
|
||||
v.s.vmu.Unlock()
|
||||
}
|
||||
|
||||
func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
|
||||
func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
|
||||
ukey := ikey.ukey()
|
||||
|
||||
// Aux level.
|
||||
if aux != nil {
|
||||
for _, t := range aux {
|
||||
if t.overlaps(v.s.icmp, ukey, ukey) {
|
||||
if !f(-1, t) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if lf != nil && !lf(-1) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Walk tables level-by-level.
|
||||
for level, tables := range v.tables {
|
||||
for level, tables := range v.levels {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
}
|
||||
@ -114,7 +130,7 @@ func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, l
|
||||
}
|
||||
}
|
||||
|
||||
func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
|
||||
func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
|
||||
ukey := ikey.ukey()
|
||||
|
||||
var (
|
||||
@ -124,16 +140,16 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
// Level-0.
|
||||
zfound bool
|
||||
zseq uint64
|
||||
zkt kType
|
||||
zkt keyType
|
||||
zval []byte
|
||||
)
|
||||
|
||||
err = ErrNotFound
|
||||
|
||||
// Since entries never hope across level, finding key/value
|
||||
// Since entries never hop across level, finding key/value
|
||||
// in smaller level make later levels irrelevant.
|
||||
v.walkOverlapping(ikey, func(level int, t *tFile) bool {
|
||||
if !tseek {
|
||||
v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool {
|
||||
if level >= 0 && !tseek {
|
||||
if tset == nil {
|
||||
tset = &tSet{level, t}
|
||||
} else {
|
||||
@ -150,6 +166,7 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
} else {
|
||||
fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
|
||||
}
|
||||
|
||||
switch ferr {
|
||||
case nil:
|
||||
case ErrNotFound:
|
||||
@ -159,9 +176,10 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
return false
|
||||
}
|
||||
|
||||
if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
|
||||
if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil {
|
||||
if v.s.icmp.uCompare(ukey, fukey) == 0 {
|
||||
if level == 0 {
|
||||
// Level <= 0 may overlaps each-other.
|
||||
if level <= 0 {
|
||||
if fseq >= zseq {
|
||||
zfound = true
|
||||
zseq = fseq
|
||||
@ -170,12 +188,12 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
}
|
||||
} else {
|
||||
switch fkt {
|
||||
case ktVal:
|
||||
case keyTypeVal:
|
||||
value = fval
|
||||
err = nil
|
||||
case ktDel:
|
||||
case keyTypeDel:
|
||||
default:
|
||||
panic("leveldb: invalid iKey type")
|
||||
panic("leveldb: invalid internalKey type")
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -189,12 +207,12 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
}, func(level int) bool {
|
||||
if zfound {
|
||||
switch zkt {
|
||||
case ktVal:
|
||||
case keyTypeVal:
|
||||
value = zval
|
||||
err = nil
|
||||
case ktDel:
|
||||
case keyTypeDel:
|
||||
default:
|
||||
panic("leveldb: invalid iKey type")
|
||||
panic("leveldb: invalid internalKey type")
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -209,46 +227,40 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
|
||||
func (v *version) sampleSeek(ikey internalKey) (tcomp bool) {
|
||||
var tset *tSet
|
||||
|
||||
v.walkOverlapping(ikey, func(level int, t *tFile) bool {
|
||||
v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool {
|
||||
if tset == nil {
|
||||
tset = &tSet{level, t}
|
||||
return true
|
||||
} else {
|
||||
if tset.table.consumeSeek() <= 0 {
|
||||
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
||||
}
|
||||
return false
|
||||
}
|
||||
if tset.table.consumeSeek() <= 0 {
|
||||
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
||||
}
|
||||
return false
|
||||
}, nil)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
|
||||
// Merge all level zero files together since they may overlap
|
||||
for _, t := range v.tables[0] {
|
||||
it := v.s.tops.newIterator(t, slice, ro)
|
||||
its = append(its, it)
|
||||
}
|
||||
|
||||
strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader)
|
||||
for _, tables := range v.tables[1:] {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
for level, tables := range v.levels {
|
||||
if level == 0 {
|
||||
// Merge all level zero files together since they may overlap.
|
||||
for _, t := range tables {
|
||||
its = append(its, v.s.tops.newIterator(t, slice, ro))
|
||||
}
|
||||
} else if len(tables) != 0 {
|
||||
its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict))
|
||||
}
|
||||
|
||||
it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)
|
||||
its = append(its, it)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) newStaging() *versionStaging {
|
||||
return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())}
|
||||
return &versionStaging{base: v}
|
||||
}
|
||||
|
||||
// Spawn a new version based on this version.
|
||||
@ -259,19 +271,22 @@ func (v *version) spawn(r *sessionRecord) *version {
|
||||
}
|
||||
|
||||
func (v *version) fillRecord(r *sessionRecord) {
|
||||
for level, ts := range v.tables {
|
||||
for _, t := range ts {
|
||||
for level, tables := range v.levels {
|
||||
for _, t := range tables {
|
||||
r.addTableFile(level, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *version) tLen(level int) int {
|
||||
return len(v.tables[level])
|
||||
if level < len(v.levels) {
|
||||
return len(v.levels[level])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
|
||||
for level, tables := range v.tables {
|
||||
func (v *version) offsetOf(ikey internalKey) (n int64, err error) {
|
||||
for level, tables := range v.levels {
|
||||
for _, t := range tables {
|
||||
if v.s.icmp.Compare(t.imax, ikey) <= 0 {
|
||||
// Entire file is before "ikey", so just add the file size
|
||||
@ -287,12 +302,11 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
|
||||
} else {
|
||||
// "ikey" falls in the range for this table. Add the
|
||||
// approximate offset of "ikey" within the table.
|
||||
var nn uint64
|
||||
nn, err = v.s.tops.offsetOf(t, ikey)
|
||||
if err != nil {
|
||||
if m, err := v.s.tops.offsetOf(t, ikey); err == nil {
|
||||
n += m
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
n += nn
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -300,37 +314,50 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
|
||||
if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
|
||||
var overlaps tFiles
|
||||
maxLevel := v.s.o.GetMaxMemCompationLevel()
|
||||
for ; level < maxLevel; level++ {
|
||||
if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) {
|
||||
break
|
||||
}
|
||||
overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
|
||||
if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) {
|
||||
break
|
||||
func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) {
|
||||
if maxLevel > 0 {
|
||||
if len(v.levels) == 0 {
|
||||
return maxLevel
|
||||
}
|
||||
if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) {
|
||||
var overlaps tFiles
|
||||
for ; level < maxLevel; level++ {
|
||||
if pLevel := level + 1; pLevel >= len(v.levels) {
|
||||
return maxLevel
|
||||
} else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) {
|
||||
break
|
||||
}
|
||||
if gpLevel := level + 2; gpLevel < len(v.levels) {
|
||||
overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
|
||||
if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) computeCompaction() {
|
||||
// Precomputed best level for next compaction
|
||||
var bestLevel int = -1
|
||||
var bestScore float64 = -1
|
||||
bestLevel := int(-1)
|
||||
bestScore := float64(-1)
|
||||
|
||||
for level, tables := range v.tables {
|
||||
statFiles := make([]int, len(v.levels))
|
||||
statSizes := make([]string, len(v.levels))
|
||||
statScore := make([]string, len(v.levels))
|
||||
statTotSize := int64(0)
|
||||
|
||||
for level, tables := range v.levels {
|
||||
var score float64
|
||||
size := tables.size()
|
||||
if level == 0 {
|
||||
// We treat level-0 specially by bounding the number of files
|
||||
// instead of number of bytes for two reasons:
|
||||
//
|
||||
// (1) With larger write-buffer sizes, it is nice not to do too
|
||||
// many level-0 compactions.
|
||||
// many level-0 compaction.
|
||||
//
|
||||
// (2) The files in level-0 are merged on every read and
|
||||
// therefore we wish to avoid too many files when the individual
|
||||
@ -339,17 +366,24 @@ func (v *version) computeCompaction() {
|
||||
// overwrites/deletions).
|
||||
score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger())
|
||||
} else {
|
||||
score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level))
|
||||
score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level))
|
||||
}
|
||||
|
||||
if score > bestScore {
|
||||
bestLevel = level
|
||||
bestScore = score
|
||||
}
|
||||
|
||||
statFiles[level] = len(tables)
|
||||
statSizes[level] = shortenb(int(size))
|
||||
statScore[level] = fmt.Sprintf("%.2f", score)
|
||||
statTotSize += size
|
||||
}
|
||||
|
||||
v.cLevel = bestLevel
|
||||
v.cScore = bestScore
|
||||
|
||||
v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore)
|
||||
}
|
||||
|
||||
func (v *version) needCompaction() bool {
|
||||
@ -357,43 +391,48 @@ func (v *version) needCompaction() bool {
|
||||
}
|
||||
|
||||
type tablesScratch struct {
|
||||
added map[uint64]atRecord
|
||||
deleted map[uint64]struct{}
|
||||
added map[int64]atRecord
|
||||
deleted map[int64]struct{}
|
||||
}
|
||||
|
||||
type versionStaging struct {
|
||||
base *version
|
||||
tables []tablesScratch
|
||||
levels []tablesScratch
|
||||
}
|
||||
|
||||
func (p *versionStaging) getScratch(level int) *tablesScratch {
|
||||
if level >= len(p.levels) {
|
||||
newLevels := make([]tablesScratch, level+1)
|
||||
copy(newLevels, p.levels)
|
||||
p.levels = newLevels
|
||||
}
|
||||
return &(p.levels[level])
|
||||
}
|
||||
|
||||
func (p *versionStaging) commit(r *sessionRecord) {
|
||||
// Deleted tables.
|
||||
for _, r := range r.deletedTables {
|
||||
tm := &(p.tables[r.level])
|
||||
|
||||
if len(p.base.tables[r.level]) > 0 {
|
||||
if tm.deleted == nil {
|
||||
tm.deleted = make(map[uint64]struct{})
|
||||
scratch := p.getScratch(r.level)
|
||||
if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 {
|
||||
if scratch.deleted == nil {
|
||||
scratch.deleted = make(map[int64]struct{})
|
||||
}
|
||||
tm.deleted[r.num] = struct{}{}
|
||||
scratch.deleted[r.num] = struct{}{}
|
||||
}
|
||||
|
||||
if tm.added != nil {
|
||||
delete(tm.added, r.num)
|
||||
if scratch.added != nil {
|
||||
delete(scratch.added, r.num)
|
||||
}
|
||||
}
|
||||
|
||||
// New tables.
|
||||
for _, r := range r.addedTables {
|
||||
tm := &(p.tables[r.level])
|
||||
|
||||
if tm.added == nil {
|
||||
tm.added = make(map[uint64]atRecord)
|
||||
scratch := p.getScratch(r.level)
|
||||
if scratch.added == nil {
|
||||
scratch.added = make(map[int64]atRecord)
|
||||
}
|
||||
tm.added[r.num] = r
|
||||
|
||||
if tm.deleted != nil {
|
||||
delete(tm.deleted, r.num)
|
||||
scratch.added[r.num] = r
|
||||
if scratch.deleted != nil {
|
||||
delete(scratch.deleted, r.num)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -401,39 +440,62 @@ func (p *versionStaging) commit(r *sessionRecord) {
|
||||
func (p *versionStaging) finish() *version {
|
||||
// Build new version.
|
||||
nv := newVersion(p.base.s)
|
||||
for level, tm := range p.tables {
|
||||
btables := p.base.tables[level]
|
||||
|
||||
n := len(btables) + len(tm.added) - len(tm.deleted)
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
nt := make(tFiles, 0, n)
|
||||
|
||||
// Base tables.
|
||||
for _, t := range btables {
|
||||
if _, ok := tm.deleted[t.file.Num()]; ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := tm.added[t.file.Num()]; ok {
|
||||
continue
|
||||
}
|
||||
nt = append(nt, t)
|
||||
}
|
||||
|
||||
// New tables.
|
||||
for _, r := range tm.added {
|
||||
nt = append(nt, p.base.s.tableFileFromRecord(r))
|
||||
}
|
||||
|
||||
// Sort tables.
|
||||
if level == 0 {
|
||||
nt.sortByNum()
|
||||
} else {
|
||||
nt.sortByKey(p.base.s.icmp)
|
||||
}
|
||||
nv.tables[level] = nt
|
||||
numLevel := len(p.levels)
|
||||
if len(p.base.levels) > numLevel {
|
||||
numLevel = len(p.base.levels)
|
||||
}
|
||||
nv.levels = make([]tFiles, numLevel)
|
||||
for level := 0; level < numLevel; level++ {
|
||||
var baseTabels tFiles
|
||||
if level < len(p.base.levels) {
|
||||
baseTabels = p.base.levels[level]
|
||||
}
|
||||
|
||||
if level < len(p.levels) {
|
||||
scratch := p.levels[level]
|
||||
|
||||
var nt tFiles
|
||||
// Prealloc list if possible.
|
||||
if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 {
|
||||
nt = make(tFiles, 0, n)
|
||||
}
|
||||
|
||||
// Base tables.
|
||||
for _, t := range baseTabels {
|
||||
if _, ok := scratch.deleted[t.fd.Num]; ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := scratch.added[t.fd.Num]; ok {
|
||||
continue
|
||||
}
|
||||
nt = append(nt, t)
|
||||
}
|
||||
|
||||
// New tables.
|
||||
for _, r := range scratch.added {
|
||||
nt = append(nt, tableFileFromRecord(r))
|
||||
}
|
||||
|
||||
if len(nt) != 0 {
|
||||
// Sort tables.
|
||||
if level == 0 {
|
||||
nt.sortByNum()
|
||||
} else {
|
||||
nt.sortByKey(p.base.s.icmp)
|
||||
}
|
||||
|
||||
nv.levels[level] = nt
|
||||
}
|
||||
} else {
|
||||
nv.levels[level] = baseTabels
|
||||
}
|
||||
}
|
||||
|
||||
// Trim levels.
|
||||
n := len(nv.levels)
|
||||
for ; n > 0 && nv.levels[n-1] == nil; n-- {
|
||||
}
|
||||
nv.levels = nv.levels[:n]
|
||||
|
||||
// Compute compaction score for new version.
|
||||
nv.computeCompaction()
|
||||
181
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/version_test.go
generated
vendored
Normal file
181
_vendor/vendor/github.com/pingcap/goleveldb/leveldb/version_test.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/testutil"
|
||||
)
|
||||
|
||||
type testFileRec struct {
|
||||
level int
|
||||
num int64
|
||||
}
|
||||
|
||||
func TestVersionStaging(t *testing.T) {
|
||||
gomega.RegisterTestingT(t)
|
||||
stor := testutil.NewStorage()
|
||||
defer stor.Close()
|
||||
s, err := newSession(stor, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
v := newVersion(s)
|
||||
v.newStaging()
|
||||
|
||||
tmp := make([]byte, 4)
|
||||
mik := func(i uint64) []byte {
|
||||
binary.BigEndian.PutUint32(tmp, uint32(i))
|
||||
return []byte(makeInternalKey(nil, tmp, 0, keyTypeVal))
|
||||
}
|
||||
|
||||
for i, x := range []struct {
|
||||
add, del []testFileRec
|
||||
levels [][]int64
|
||||
}{
|
||||
{
|
||||
add: []testFileRec{
|
||||
{1, 1},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{},
|
||||
{1},
|
||||
},
|
||||
},
|
||||
{
|
||||
add: []testFileRec{
|
||||
{1, 1},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{},
|
||||
{1},
|
||||
},
|
||||
},
|
||||
{
|
||||
del: []testFileRec{
|
||||
{1, 1},
|
||||
},
|
||||
levels: [][]int64{},
|
||||
},
|
||||
{
|
||||
add: []testFileRec{
|
||||
{0, 1},
|
||||
{0, 3},
|
||||
{0, 2},
|
||||
{2, 5},
|
||||
{1, 4},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{3, 2, 1},
|
||||
{4},
|
||||
{5},
|
||||
},
|
||||
},
|
||||
{
|
||||
add: []testFileRec{
|
||||
{1, 6},
|
||||
{2, 5},
|
||||
},
|
||||
del: []testFileRec{
|
||||
{0, 1},
|
||||
{0, 4},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{3, 2},
|
||||
{4, 6},
|
||||
{5},
|
||||
},
|
||||
},
|
||||
{
|
||||
del: []testFileRec{
|
||||
{0, 3},
|
||||
{0, 2},
|
||||
{1, 4},
|
||||
{1, 6},
|
||||
{2, 5},
|
||||
},
|
||||
levels: [][]int64{},
|
||||
},
|
||||
{
|
||||
add: []testFileRec{
|
||||
{0, 1},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{1},
|
||||
},
|
||||
},
|
||||
{
|
||||
add: []testFileRec{
|
||||
{1, 2},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{1},
|
||||
{2},
|
||||
},
|
||||
},
|
||||
{
|
||||
add: []testFileRec{
|
||||
{0, 3},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{3, 1},
|
||||
{2},
|
||||
},
|
||||
},
|
||||
{
|
||||
add: []testFileRec{
|
||||
{6, 9},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{3, 1},
|
||||
{2},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{9},
|
||||
},
|
||||
},
|
||||
{
|
||||
del: []testFileRec{
|
||||
{6, 9},
|
||||
},
|
||||
levels: [][]int64{
|
||||
{3, 1},
|
||||
{2},
|
||||
},
|
||||
},
|
||||
} {
|
||||
rec := &sessionRecord{}
|
||||
for _, f := range x.add {
|
||||
ik := mik(uint64(f.num))
|
||||
rec.addTable(f.level, f.num, 1, ik, ik)
|
||||
}
|
||||
for _, f := range x.del {
|
||||
rec.delTable(f.level, f.num)
|
||||
}
|
||||
vs := v.newStaging()
|
||||
vs.commit(rec)
|
||||
v = vs.finish()
|
||||
if len(v.levels) != len(x.levels) {
|
||||
t.Fatalf("#%d: invalid level count: want=%d got=%d", i, len(x.levels), len(v.levels))
|
||||
}
|
||||
for j, want := range x.levels {
|
||||
tables := v.levels[j]
|
||||
if len(want) != len(tables) {
|
||||
t.Fatalf("#%d.%d: invalid tables count: want=%d got=%d", i, j, len(want), len(tables))
|
||||
}
|
||||
got := make([]int64, len(tables))
|
||||
for k, t := range tables {
|
||||
got[k] = t.fd.Num
|
||||
}
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
t.Fatalf("#%d.%d: invalid tables: want=%v got=%v", i, j, want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,16 +1,11 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
type ErrIkeyCorrupted struct {
|
||||
@ -23,7 +18,7 @@ func (e *ErrIkeyCorrupted) Error() string {
|
||||
}
|
||||
|
||||
func newErrIkeyCorrupted(ikey []byte, reason string) error {
|
||||
return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
|
||||
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
|
||||
}
|
||||
|
||||
type kType int
|
||||
@ -106,7 +101,7 @@ func (ik iKey) assert() {
|
||||
panic("leveldb: nil iKey")
|
||||
}
|
||||
if len(ik) < 8 {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", ik, len(ik)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,7 +119,7 @@ func (ik iKey) parseNum() (seq uint64, kt kType) {
|
||||
num := ik.num()
|
||||
seq, kt = uint64(num>>8), kType(num&0xff)
|
||||
if kt > ktVal {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", ik, len(ik), kt))
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -135,7 +130,7 @@ func (ik iKey) String() string {
|
||||
}
|
||||
|
||||
if ukey, seq, kt, err := parseIkey(ik); err == nil {
|
||||
return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
|
||||
return fmt.Sprintf("%x,%s%d", ukey, kt, seq)
|
||||
} else {
|
||||
return "<invalid>"
|
||||
}
|
||||
628
_vendor/vendor/github.com/pingcap/goleveldb/manualtest/dbstress/main.go
generated
vendored
Normal file
628
_vendor/vendor/github.com/pingcap/goleveldb/manualtest/dbstress/main.go
generated
vendored
Normal file
@ -0,0 +1,628 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
mrand "math/rand"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb"
|
||||
"github.com/pingcap/goleveldb/leveldb/errors"
|
||||
"github.com/pingcap/goleveldb/leveldb/opt"
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
"github.com/pingcap/goleveldb/leveldb/table"
|
||||
"github.com/pingcap/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
dbPath = path.Join(os.TempDir(), "goleveldb-testdb")
|
||||
openFilesCacheCapacity = 500
|
||||
keyLen = 63
|
||||
valueLen = 256
|
||||
numKeys = arrayInt{100000, 1332, 531, 1234, 9553, 1024, 35743}
|
||||
httpProf = "127.0.0.1:5454"
|
||||
transactionProb = 0.5
|
||||
enableBlockCache = false
|
||||
enableCompression = false
|
||||
enableBufferPool = false
|
||||
|
||||
wg = new(sync.WaitGroup)
|
||||
done, fail uint32
|
||||
|
||||
bpool *util.BufferPool
|
||||
)
|
||||
|
||||
type arrayInt []int
|
||||
|
||||
func (a arrayInt) String() string {
|
||||
var str string
|
||||
for i, n := range a {
|
||||
if i > 0 {
|
||||
str += ","
|
||||
}
|
||||
str += strconv.Itoa(n)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func (a *arrayInt) Set(str string) error {
|
||||
var na arrayInt
|
||||
for _, s := range strings.Split(str, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if s != "" {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
na = append(na, n)
|
||||
}
|
||||
}
|
||||
*a = na
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&dbPath, "db", dbPath, "testdb path")
|
||||
flag.IntVar(&openFilesCacheCapacity, "openfilescachecap", openFilesCacheCapacity, "open files cache capacity")
|
||||
flag.IntVar(&keyLen, "keylen", keyLen, "key length")
|
||||
flag.IntVar(&valueLen, "valuelen", valueLen, "value length")
|
||||
flag.Var(&numKeys, "numkeys", "num keys")
|
||||
flag.StringVar(&httpProf, "httpprof", httpProf, "http pprof listen addr")
|
||||
flag.Float64Var(&transactionProb, "transactionprob", transactionProb, "probablity of writes using transaction")
|
||||
flag.BoolVar(&enableBufferPool, "enablebufferpool", enableBufferPool, "enable buffer pool")
|
||||
flag.BoolVar(&enableBlockCache, "enableblockcache", enableBlockCache, "enable block cache")
|
||||
flag.BoolVar(&enableCompression, "enablecompression", enableCompression, "enable block compression")
|
||||
}
|
||||
|
||||
func randomData(dst []byte, ns, prefix byte, i uint32, dataLen int) []byte {
|
||||
if dataLen < (2+4+4)*2+4 {
|
||||
panic("dataLen is too small")
|
||||
}
|
||||
if cap(dst) < dataLen {
|
||||
dst = make([]byte, dataLen)
|
||||
} else {
|
||||
dst = dst[:dataLen]
|
||||
}
|
||||
half := (dataLen - 4) / 2
|
||||
if _, err := rand.Reader.Read(dst[2 : half-8]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dst[0] = ns
|
||||
dst[1] = prefix
|
||||
binary.LittleEndian.PutUint32(dst[half-8:], i)
|
||||
binary.LittleEndian.PutUint32(dst[half-8:], i)
|
||||
binary.LittleEndian.PutUint32(dst[half-4:], util.NewCRC(dst[:half-4]).Value())
|
||||
full := half * 2
|
||||
copy(dst[half:full], dst[:half])
|
||||
if full < dataLen-4 {
|
||||
if _, err := rand.Reader.Read(dst[full : dataLen-4]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
binary.LittleEndian.PutUint32(dst[dataLen-4:], util.NewCRC(dst[:dataLen-4]).Value())
|
||||
return dst
|
||||
}
|
||||
|
||||
func dataSplit(data []byte) (data0, data1 []byte) {
|
||||
n := (len(data) - 4) / 2
|
||||
return data[:n], data[n : n+n]
|
||||
}
|
||||
|
||||
func dataNS(data []byte) byte {
|
||||
return data[0]
|
||||
}
|
||||
|
||||
func dataPrefix(data []byte) byte {
|
||||
return data[1]
|
||||
}
|
||||
|
||||
func dataI(data []byte) uint32 {
|
||||
return binary.LittleEndian.Uint32(data[(len(data)-4)/2-8:])
|
||||
}
|
||||
|
||||
func dataChecksum(data []byte) (uint32, uint32) {
|
||||
checksum0 := binary.LittleEndian.Uint32(data[len(data)-4:])
|
||||
checksum1 := util.NewCRC(data[:len(data)-4]).Value()
|
||||
return checksum0, checksum1
|
||||
}
|
||||
|
||||
func dataPrefixSlice(ns, prefix byte) *util.Range {
|
||||
return util.BytesPrefix([]byte{ns, prefix})
|
||||
}
|
||||
|
||||
func dataNsSlice(ns byte) *util.Range {
|
||||
return util.BytesPrefix([]byte{ns})
|
||||
}
|
||||
|
||||
type testingStorage struct {
|
||||
storage.Storage
|
||||
}
|
||||
|
||||
func (ts *testingStorage) scanTable(fd storage.FileDesc, checksum bool) (corrupted bool) {
|
||||
r, err := ts.Open(fd)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
size, err := r.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
o := &opt.Options{
|
||||
DisableLargeBatchTransaction: true,
|
||||
Strict: opt.NoStrict,
|
||||
}
|
||||
if checksum {
|
||||
o.Strict = opt.StrictBlockChecksum | opt.StrictReader
|
||||
}
|
||||
tr, err := table.NewReader(r, size, fd, nil, bpool, o)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer tr.Release()
|
||||
|
||||
checkData := func(i int, t string, data []byte) bool {
|
||||
if len(data) == 0 {
|
||||
panic(fmt.Sprintf("[%v] nil data: i=%d t=%s", fd, i, t))
|
||||
}
|
||||
|
||||
checksum0, checksum1 := dataChecksum(data)
|
||||
if checksum0 != checksum1 {
|
||||
atomic.StoreUint32(&fail, 1)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
corrupted = true
|
||||
|
||||
data0, data1 := dataSplit(data)
|
||||
data0c0, data0c1 := dataChecksum(data0)
|
||||
data1c0, data1c1 := dataChecksum(data1)
|
||||
log.Printf("FATAL: [%v] Corrupted data i=%d t=%s (%#x != %#x): %x(%v) vs %x(%v)",
|
||||
fd, i, t, checksum0, checksum1, data0, data0c0 == data0c1, data1, data1c0 == data1c1)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
iter := tr.NewIterator(nil, nil)
|
||||
defer iter.Release()
|
||||
for i := 0; iter.Next(); i++ {
|
||||
ukey, _, kt, kerr := parseIkey(iter.Key())
|
||||
if kerr != nil {
|
||||
atomic.StoreUint32(&fail, 1)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
corrupted = true
|
||||
|
||||
log.Printf("FATAL: [%v] Corrupted ikey i=%d: %v", fd, i, kerr)
|
||||
return
|
||||
}
|
||||
if checkData(i, "key", ukey) {
|
||||
return
|
||||
}
|
||||
if kt == ktVal && checkData(i, "value", iter.Value()) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
if errors.IsCorrupted(err) {
|
||||
atomic.StoreUint32(&fail, 1)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
corrupted = true
|
||||
|
||||
log.Printf("FATAL: [%v] Corruption detected: %v", fd, err)
|
||||
} else {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ts *testingStorage) Remove(fd storage.FileDesc) error {
|
||||
if atomic.LoadUint32(&fail) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if fd.Type == storage.TypeTable {
|
||||
if ts.scanTable(fd, true) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return ts.Storage.Remove(fd)
|
||||
}
|
||||
|
||||
type latencyStats struct {
|
||||
mark time.Time
|
||||
dur, min, max time.Duration
|
||||
num int
|
||||
}
|
||||
|
||||
func (s *latencyStats) start() {
|
||||
s.mark = time.Now()
|
||||
}
|
||||
|
||||
func (s *latencyStats) record(n int) {
|
||||
if s.mark.IsZero() {
|
||||
panic("not started")
|
||||
}
|
||||
dur := time.Now().Sub(s.mark)
|
||||
dur1 := dur / time.Duration(n)
|
||||
if dur1 < s.min || s.min == 0 {
|
||||
s.min = dur1
|
||||
}
|
||||
if dur1 > s.max {
|
||||
s.max = dur1
|
||||
}
|
||||
s.dur += dur
|
||||
s.num += n
|
||||
s.mark = time.Time{}
|
||||
}
|
||||
|
||||
func (s *latencyStats) ratePerSec() int {
|
||||
durSec := s.dur / time.Second
|
||||
if durSec > 0 {
|
||||
return s.num / int(durSec)
|
||||
}
|
||||
return s.num
|
||||
}
|
||||
|
||||
func (s *latencyStats) avg() time.Duration {
|
||||
if s.num > 0 {
|
||||
return s.dur / time.Duration(s.num)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *latencyStats) add(x *latencyStats) {
|
||||
if x.min < s.min || s.min == 0 {
|
||||
s.min = x.min
|
||||
}
|
||||
if x.max > s.max {
|
||||
s.max = x.max
|
||||
}
|
||||
s.dur += x.dur
|
||||
s.num += x.num
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if enableBufferPool {
|
||||
bpool = util.NewBufferPool(opt.DefaultBlockSize + 128)
|
||||
}
|
||||
|
||||
log.Printf("Test DB stored at %q", dbPath)
|
||||
if httpProf != "" {
|
||||
log.Printf("HTTP pprof listening at %q", httpProf)
|
||||
runtime.SetBlockProfileRate(1)
|
||||
go func() {
|
||||
if err := http.ListenAndServe(httpProf, nil); err != nil {
|
||||
log.Fatalf("HTTPPROF: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
os.RemoveAll(dbPath)
|
||||
stor, err := storage.OpenFile(dbPath, false)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
tstor := &testingStorage{stor}
|
||||
defer tstor.Close()
|
||||
|
||||
fatalf := func(err error, format string, v ...interface{}) {
|
||||
atomic.StoreUint32(&fail, 1)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
log.Printf("FATAL: "+format, v...)
|
||||
if err != nil && errors.IsCorrupted(err) {
|
||||
cerr := err.(*errors.ErrCorrupted)
|
||||
if !cerr.Fd.Nil() && cerr.Fd.Type == storage.TypeTable {
|
||||
log.Print("FATAL: corruption detected, scanning...")
|
||||
if !tstor.scanTable(storage.FileDesc{Type: storage.TypeTable, Num: cerr.Fd.Num}, false) {
|
||||
log.Printf("FATAL: unable to find corrupted key/value pair in table %v", cerr.Fd)
|
||||
}
|
||||
}
|
||||
}
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
if openFilesCacheCapacity == 0 {
|
||||
openFilesCacheCapacity = -1
|
||||
}
|
||||
o := &opt.Options{
|
||||
OpenFilesCacheCapacity: openFilesCacheCapacity,
|
||||
DisableBufferPool: !enableBufferPool,
|
||||
DisableBlockCache: !enableBlockCache,
|
||||
ErrorIfExist: true,
|
||||
Compression: opt.NoCompression,
|
||||
}
|
||||
if enableCompression {
|
||||
o.Compression = opt.DefaultCompression
|
||||
}
|
||||
|
||||
db, err := leveldb.Open(tstor, o)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var (
|
||||
mu = &sync.Mutex{}
|
||||
gGetStat = &latencyStats{}
|
||||
gIterStat = &latencyStats{}
|
||||
gWriteStat = &latencyStats{}
|
||||
gTrasactionStat = &latencyStats{}
|
||||
startTime = time.Now()
|
||||
|
||||
writeReq = make(chan *leveldb.Batch)
|
||||
writeAck = make(chan error)
|
||||
writeAckAck = make(chan struct{})
|
||||
)
|
||||
|
||||
go func() {
|
||||
for b := range writeReq {
|
||||
|
||||
var err error
|
||||
if mrand.Float64() < transactionProb {
|
||||
log.Print("> Write using transaction")
|
||||
gTrasactionStat.start()
|
||||
var tr *leveldb.Transaction
|
||||
if tr, err = db.OpenTransaction(); err == nil {
|
||||
if err = tr.Write(b, nil); err == nil {
|
||||
if err = tr.Commit(); err == nil {
|
||||
gTrasactionStat.record(b.Len())
|
||||
}
|
||||
} else {
|
||||
tr.Discard()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
gWriteStat.start()
|
||||
if err = db.Write(b, nil); err == nil {
|
||||
gWriteStat.record(b.Len())
|
||||
}
|
||||
}
|
||||
writeAck <- err
|
||||
<-writeAckAck
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
log.Print("------------------------")
|
||||
|
||||
log.Printf("> Elapsed=%v", time.Now().Sub(startTime))
|
||||
mu.Lock()
|
||||
log.Printf("> GetLatencyMin=%v GetLatencyMax=%v GetLatencyAvg=%v GetRatePerSec=%d",
|
||||
gGetStat.min, gGetStat.max, gGetStat.avg(), gGetStat.ratePerSec())
|
||||
log.Printf("> IterLatencyMin=%v IterLatencyMax=%v IterLatencyAvg=%v IterRatePerSec=%d",
|
||||
gIterStat.min, gIterStat.max, gIterStat.avg(), gIterStat.ratePerSec())
|
||||
log.Printf("> WriteLatencyMin=%v WriteLatencyMax=%v WriteLatencyAvg=%v WriteRatePerSec=%d",
|
||||
gWriteStat.min, gWriteStat.max, gWriteStat.avg(), gWriteStat.ratePerSec())
|
||||
log.Printf("> TransactionLatencyMin=%v TransactionLatencyMax=%v TransactionLatencyAvg=%v TransactionRatePerSec=%d",
|
||||
gTrasactionStat.min, gTrasactionStat.max, gTrasactionStat.avg(), gTrasactionStat.ratePerSec())
|
||||
mu.Unlock()
|
||||
|
||||
cachedblock, _ := db.GetProperty("leveldb.cachedblock")
|
||||
openedtables, _ := db.GetProperty("leveldb.openedtables")
|
||||
alivesnaps, _ := db.GetProperty("leveldb.alivesnaps")
|
||||
aliveiters, _ := db.GetProperty("leveldb.aliveiters")
|
||||
blockpool, _ := db.GetProperty("leveldb.blockpool")
|
||||
log.Printf("> BlockCache=%s OpenedTables=%s AliveSnaps=%s AliveIter=%s BlockPool=%q",
|
||||
cachedblock, openedtables, alivesnaps, aliveiters, blockpool)
|
||||
|
||||
log.Print("------------------------")
|
||||
}
|
||||
}()
|
||||
|
||||
for ns, numKey := range numKeys {
|
||||
func(ns, numKey int) {
|
||||
log.Printf("[%02d] STARTING: numKey=%d", ns, numKey)
|
||||
|
||||
keys := make([][]byte, numKey)
|
||||
for i := range keys {
|
||||
keys[i] = randomData(nil, byte(ns), 1, uint32(i), keyLen)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
var wi uint32
|
||||
defer func() {
|
||||
log.Printf("[%02d] WRITER DONE #%d", ns, wi)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
var (
|
||||
b = new(leveldb.Batch)
|
||||
k2, v2 []byte
|
||||
nReader int32
|
||||
)
|
||||
for atomic.LoadUint32(&done) == 0 {
|
||||
log.Printf("[%02d] WRITER #%d", ns, wi)
|
||||
|
||||
b.Reset()
|
||||
for _, k1 := range keys {
|
||||
k2 = randomData(k2, byte(ns), 2, wi, keyLen)
|
||||
v2 = randomData(v2, byte(ns), 3, wi, valueLen)
|
||||
b.Put(k2, v2)
|
||||
b.Put(k1, k2)
|
||||
}
|
||||
writeReq <- b
|
||||
if err := <-writeAck; err != nil {
|
||||
writeAckAck <- struct{}{}
|
||||
fatalf(err, "[%02d] WRITER #%d db.Write: %v", ns, wi, err)
|
||||
}
|
||||
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
writeAckAck <- struct{}{}
|
||||
fatalf(err, "[%02d] WRITER #%d db.GetSnapshot: %v", ns, wi, err)
|
||||
}
|
||||
|
||||
writeAckAck <- struct{}{}
|
||||
|
||||
wg.Add(1)
|
||||
atomic.AddInt32(&nReader, 1)
|
||||
go func(snapwi uint32, snap *leveldb.Snapshot) {
|
||||
var (
|
||||
ri int
|
||||
iterStat = &latencyStats{}
|
||||
getStat = &latencyStats{}
|
||||
)
|
||||
defer func() {
|
||||
mu.Lock()
|
||||
gGetStat.add(getStat)
|
||||
gIterStat.add(iterStat)
|
||||
mu.Unlock()
|
||||
|
||||
atomic.AddInt32(&nReader, -1)
|
||||
log.Printf("[%02d] READER #%d.%d DONE Snap=%v Alive=%d IterLatency=%v GetLatency=%v", ns, snapwi, ri, snap, atomic.LoadInt32(&nReader), iterStat.avg(), getStat.avg())
|
||||
snap.Release()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
stopi := snapwi + 3
|
||||
for (ri < 3 || atomic.LoadUint32(&wi) < stopi) && atomic.LoadUint32(&done) == 0 {
|
||||
var n int
|
||||
iter := snap.NewIterator(dataPrefixSlice(byte(ns), 1), nil)
|
||||
iterStat.start()
|
||||
for iter.Next() {
|
||||
k1 := iter.Key()
|
||||
k2 := iter.Value()
|
||||
iterStat.record(1)
|
||||
|
||||
if dataNS(k2) != byte(ns) {
|
||||
fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key NS: want=%d got=%d", ns, snapwi, ri, n, ns, dataNS(k2))
|
||||
}
|
||||
|
||||
kwritei := dataI(k2)
|
||||
if kwritei != snapwi {
|
||||
fatalf(nil, "[%02d] READER #%d.%d K%d invalid in-key iter num: %d", ns, snapwi, ri, n, kwritei)
|
||||
}
|
||||
|
||||
getStat.start()
|
||||
v2, err := snap.Get(k2, nil)
|
||||
if err != nil {
|
||||
fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
|
||||
}
|
||||
getStat.record(1)
|
||||
|
||||
if checksum0, checksum1 := dataChecksum(v2); checksum0 != checksum1 {
|
||||
err := &errors.ErrCorrupted{Fd: storage.FileDesc{0xff, 0}, Err: fmt.Errorf("v2: %x: checksum mismatch: %v vs %v", v2, checksum0, checksum1)}
|
||||
fatalf(err, "[%02d] READER #%d.%d K%d snap.Get: %v\nk1: %x\n -> k2: %x", ns, snapwi, ri, n, err, k1, k2)
|
||||
}
|
||||
|
||||
n++
|
||||
iterStat.start()
|
||||
}
|
||||
iter.Release()
|
||||
if err := iter.Error(); err != nil {
|
||||
fatalf(err, "[%02d] READER #%d.%d K%d iter.Error: %v", ns, snapwi, ri, numKey, err)
|
||||
}
|
||||
if n != numKey {
|
||||
fatalf(nil, "[%02d] READER #%d.%d missing keys: want=%d got=%d", ns, snapwi, ri, numKey, n)
|
||||
}
|
||||
|
||||
ri++
|
||||
}
|
||||
}(wi, snap)
|
||||
|
||||
atomic.AddUint32(&wi, 1)
|
||||
}
|
||||
}()
|
||||
|
||||
delB := new(leveldb.Batch)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
var (
|
||||
i int
|
||||
iterStat = &latencyStats{}
|
||||
)
|
||||
defer func() {
|
||||
log.Printf("[%02d] SCANNER DONE #%d", ns, i)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
for atomic.LoadUint32(&done) == 0 {
|
||||
var n int
|
||||
delB.Reset()
|
||||
iter := db.NewIterator(dataNsSlice(byte(ns)), nil)
|
||||
iterStat.start()
|
||||
for iter.Next() && atomic.LoadUint32(&done) == 0 {
|
||||
k := iter.Key()
|
||||
v := iter.Value()
|
||||
iterStat.record(1)
|
||||
|
||||
for ci, x := range [...][]byte{k, v} {
|
||||
checksum0, checksum1 := dataChecksum(x)
|
||||
if checksum0 != checksum1 {
|
||||
if ci == 0 {
|
||||
fatalf(nil, "[%02d] SCANNER %d.%d invalid key checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
|
||||
} else {
|
||||
fatalf(nil, "[%02d] SCANNER %d.%d invalid value checksum: want %d, got %d\n%x -> %x", ns, i, n, checksum0, checksum1, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dataPrefix(k) == 2 || mrand.Int()%999 == 0 {
|
||||
delB.Delete(k)
|
||||
}
|
||||
|
||||
n++
|
||||
iterStat.start()
|
||||
}
|
||||
iter.Release()
|
||||
if err := iter.Error(); err != nil {
|
||||
fatalf(err, "[%02d] SCANNER #%d.%d iter.Error: %v", ns, i, n, err)
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
log.Printf("[%02d] SCANNER #%d IterLatency=%v", ns, i, iterStat.avg())
|
||||
}
|
||||
|
||||
if delB.Len() > 0 && atomic.LoadUint32(&done) == 0 {
|
||||
t := time.Now()
|
||||
writeReq <- delB
|
||||
if err := <-writeAck; err != nil {
|
||||
writeAckAck <- struct{}{}
|
||||
fatalf(err, "[%02d] SCANNER #%d db.Write: %v", ns, i, err)
|
||||
} else {
|
||||
writeAckAck <- struct{}{}
|
||||
}
|
||||
log.Printf("[%02d] SCANNER #%d Deleted=%d Time=%v", ns, i, delB.Len(), time.Now().Sub(t))
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}()
|
||||
}(ns, numKey)
|
||||
}
|
||||
|
||||
go func() {
|
||||
sig := make(chan os.Signal)
|
||||
signal.Notify(sig, os.Interrupt, os.Kill)
|
||||
log.Printf("Got signal: %v, exiting...", <-sig)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
85
_vendor/vendor/github.com/pingcap/goleveldb/manualtest/filelock/main.go
generated
vendored
Normal file
85
_vendor/vendor/github.com/pingcap/goleveldb/manualtest/filelock/main.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pingcap/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
filename string
|
||||
child bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&filename, "filename", filepath.Join(os.TempDir(), "goleveldb_filelock_test"), "Filename used for testing")
|
||||
flag.BoolVar(&child, "child", false, "This is the child")
|
||||
}
|
||||
|
||||
func runChild() error {
|
||||
var args []string
|
||||
args = append(args, os.Args[1:]...)
|
||||
args = append(args, "-child")
|
||||
cmd := exec.Command(os.Args[0], args...)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
r := bufio.NewReader(&out)
|
||||
for {
|
||||
line, _, e1 := r.ReadLine()
|
||||
if e1 != nil {
|
||||
break
|
||||
}
|
||||
fmt.Println("[Child]", string(line))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
fmt.Printf("Using path: %s\n", filename)
|
||||
if child {
|
||||
fmt.Println("Child flag set.")
|
||||
}
|
||||
|
||||
stor, err := storage.OpenFile(filename, false)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not open storage: %s", err)
|
||||
os.Exit(10)
|
||||
}
|
||||
|
||||
if !child {
|
||||
fmt.Println("Executing child -- first test (expecting error)")
|
||||
err := runChild()
|
||||
if err == nil {
|
||||
fmt.Println("Expecting error from child")
|
||||
} else if err.Error() != "exit status 10" {
|
||||
fmt.Println("Got unexpected error from child:", err)
|
||||
} else {
|
||||
fmt.Printf("Got error from child: %s (expected)\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = stor.Close()
|
||||
if err != nil {
|
||||
fmt.Printf("Error when closing storage: %s", err)
|
||||
os.Exit(11)
|
||||
}
|
||||
|
||||
if !child {
|
||||
fmt.Println("Executing child -- second test")
|
||||
err := runChild()
|
||||
if err != nil {
|
||||
fmt.Println("Got unexpected error from child:", err)
|
||||
}
|
||||
}
|
||||
|
||||
os.RemoveAll(filename)
|
||||
}
|
||||
100
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
generated
vendored
100
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// Reader is the interface that wraps basic Get and NewIterator methods.
|
||||
// This interface implemented by both DB and Snapshot.
|
||||
type Reader interface {
|
||||
Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
|
||||
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
|
||||
}
|
||||
|
||||
type Sizes []uint64
|
||||
|
||||
// Sum returns sum of the sizes.
|
||||
func (p Sizes) Sum() (n uint64) {
|
||||
for _, s := range p {
|
||||
n += s
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Logging.
|
||||
func (db *DB) log(v ...interface{}) { db.s.log(v...) }
|
||||
func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
|
||||
|
||||
// Check and clean files.
|
||||
func (db *DB) checkAndCleanFiles() error {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
|
||||
tablesMap := make(map[uint64]bool)
|
||||
for _, tables := range v.tables {
|
||||
for _, t := range tables {
|
||||
tablesMap[t.file.Num()] = false
|
||||
}
|
||||
}
|
||||
|
||||
files, err := db.s.getFiles(storage.TypeAll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nTables int
|
||||
var rem []storage.File
|
||||
for _, f := range files {
|
||||
keep := true
|
||||
switch f.Type() {
|
||||
case storage.TypeManifest:
|
||||
keep = f.Num() >= db.s.manifestFile.Num()
|
||||
case storage.TypeJournal:
|
||||
if db.frozenJournalFile != nil {
|
||||
keep = f.Num() >= db.frozenJournalFile.Num()
|
||||
} else {
|
||||
keep = f.Num() >= db.journalFile.Num()
|
||||
}
|
||||
case storage.TypeTable:
|
||||
_, keep = tablesMap[f.Num()]
|
||||
if keep {
|
||||
tablesMap[f.Num()] = true
|
||||
nTables++
|
||||
}
|
||||
}
|
||||
|
||||
if !keep {
|
||||
rem = append(rem, f)
|
||||
}
|
||||
}
|
||||
|
||||
if nTables != len(tablesMap) {
|
||||
var missing []*storage.FileInfo
|
||||
for num, present := range tablesMap {
|
||||
if !present {
|
||||
missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
|
||||
db.logf("db@janitor table missing @%d", num)
|
||||
}
|
||||
}
|
||||
return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
|
||||
}
|
||||
|
||||
db.logf("db@janitor F·%d G·%d", len(files), len(rem))
|
||||
for _, f := range rem {
|
||||
db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
543
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
543
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
@ -1,543 +0,0 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reservefs.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var errFileOpen = errors.New("leveldb/storage: file still open")
|
||||
|
||||
type fileLock interface {
|
||||
release() error
|
||||
}
|
||||
|
||||
type fileStorageLock struct {
|
||||
fs *fileStorage
|
||||
}
|
||||
|
||||
func (lock *fileStorageLock) Release() {
|
||||
fs := lock.fs
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.slock == lock {
|
||||
fs.slock = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// fileStorage is a file-system backed storage.
|
||||
type fileStorage struct {
|
||||
path string
|
||||
|
||||
mu sync.Mutex
|
||||
flock fileLock
|
||||
slock *fileStorageLock
|
||||
logw *os.File
|
||||
buf []byte
|
||||
// Opened file counter; if open < 0 means closed.
|
||||
open int
|
||||
day int
|
||||
}
|
||||
|
||||
// OpenFile returns a new filesytem-backed storage implementation with the given
|
||||
// path. This also hold a file lock, so any subsequent attempt to open the same
|
||||
// path will fail.
|
||||
//
|
||||
// The storage must be closed after use, by calling Close method.
|
||||
func OpenFile(path string) (Storage, error) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
flock, err := newFileLock(filepath.Join(path, "LOCK"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
flock.release()
|
||||
}
|
||||
}()
|
||||
|
||||
rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old"))
|
||||
logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := &fileStorage{path: path, flock: flock, logw: logw}
|
||||
runtime.SetFinalizer(fs, (*fileStorage).Close)
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Lock() (util.Releaser, error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
if fs.slock != nil {
|
||||
return nil, ErrLocked
|
||||
}
|
||||
fs.slock = &fileStorageLock{fs: fs}
|
||||
return fs.slock, nil
|
||||
}
|
||||
|
||||
func itoa(buf []byte, i int, wid int) []byte {
|
||||
var u uint = uint(i)
|
||||
if u == 0 && wid <= 1 {
|
||||
return append(buf, '0')
|
||||
}
|
||||
|
||||
// Assemble decimal in reverse order.
|
||||
var b [32]byte
|
||||
bp := len(b)
|
||||
for ; u > 0 || wid > 0; u /= 10 {
|
||||
bp--
|
||||
wid--
|
||||
b[bp] = byte(u%10) + '0'
|
||||
}
|
||||
return append(buf, b[bp:]...)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) printDay(t time.Time) {
|
||||
if fs.day == t.Day() {
|
||||
return
|
||||
}
|
||||
fs.day = t.Day()
|
||||
fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
|
||||
}
|
||||
|
||||
func (fs *fileStorage) doLog(t time.Time, str string) {
|
||||
fs.printDay(t)
|
||||
hour, min, sec := t.Clock()
|
||||
msec := t.Nanosecond() / 1e3
|
||||
// time
|
||||
fs.buf = itoa(fs.buf[:0], hour, 2)
|
||||
fs.buf = append(fs.buf, ':')
|
||||
fs.buf = itoa(fs.buf, min, 2)
|
||||
fs.buf = append(fs.buf, ':')
|
||||
fs.buf = itoa(fs.buf, sec, 2)
|
||||
fs.buf = append(fs.buf, '.')
|
||||
fs.buf = itoa(fs.buf, msec, 6)
|
||||
fs.buf = append(fs.buf, ' ')
|
||||
// write
|
||||
fs.buf = append(fs.buf, []byte(str)...)
|
||||
fs.buf = append(fs.buf, '\n')
|
||||
fs.logw.Write(fs.buf)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Log(str string) {
|
||||
t := time.Now()
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return
|
||||
}
|
||||
fs.doLog(t, str)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) log(str string) {
|
||||
fs.doLog(time.Now(), str)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) GetFile(num uint64, t FileType) File {
|
||||
return &file{fs: fs, num: num, t: t}
|
||||
}
|
||||
|
||||
func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
dir, err := os.Open(fs.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fnn, err := dir.Readdirnames(0)
|
||||
// Close the dir first before checking for Readdirnames error.
|
||||
if err := dir.Close(); err != nil {
|
||||
fs.log(fmt.Sprintf("close dir: %v", err))
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
f := &file{fs: fs}
|
||||
for _, fn := range fnn {
|
||||
if f.parse(fn) && (f.t&t) != 0 {
|
||||
ff = append(ff, f)
|
||||
f = &file{fs: fs}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *fileStorage) GetManifest() (f File, err error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
dir, err := os.Open(fs.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fnn, err := dir.Readdirnames(0)
|
||||
// Close the dir first before checking for Readdirnames error.
|
||||
if err := dir.Close(); err != nil {
|
||||
fs.log(fmt.Sprintf("close dir: %v", err))
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Find latest CURRENT file.
|
||||
var rem []string
|
||||
var pend bool
|
||||
var cerr error
|
||||
for _, fn := range fnn {
|
||||
if strings.HasPrefix(fn, "CURRENT") {
|
||||
pend1 := len(fn) > 7
|
||||
// Make sure it is valid name for a CURRENT file, otherwise skip it.
|
||||
if pend1 {
|
||||
if fn[7] != '.' || len(fn) < 9 {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
|
||||
continue
|
||||
}
|
||||
if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(fs.path, fn)
|
||||
r, e1 := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if e1 != nil {
|
||||
return nil, e1
|
||||
}
|
||||
b, e1 := ioutil.ReadAll(r)
|
||||
if e1 != nil {
|
||||
r.Close()
|
||||
return nil, e1
|
||||
}
|
||||
f1 := &file{fs: fs}
|
||||
if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) {
|
||||
fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn))
|
||||
if pend1 {
|
||||
rem = append(rem, fn)
|
||||
}
|
||||
if !pend1 || cerr == nil {
|
||||
cerr = &ErrCorrupted{
|
||||
File: fsParseName(filepath.Base(fn)),
|
||||
Err: errors.New("leveldb/storage: corrupted or incomplete manifest file"),
|
||||
}
|
||||
}
|
||||
} else if f != nil && f1.Num() < f.Num() {
|
||||
fs.log(fmt.Sprintf("skipping %s: obsolete", fn))
|
||||
if pend1 {
|
||||
rem = append(rem, fn)
|
||||
}
|
||||
} else {
|
||||
f = f1
|
||||
pend = pend1
|
||||
}
|
||||
if err := r.Close(); err != nil {
|
||||
fs.log(fmt.Sprintf("close %s: %v", fn, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Don't remove any files if there is no valid CURRENT file.
|
||||
if f == nil {
|
||||
if cerr != nil {
|
||||
err = cerr
|
||||
} else {
|
||||
err = os.ErrNotExist
|
||||
}
|
||||
return
|
||||
}
|
||||
// Rename pending CURRENT file to an effective CURRENT.
|
||||
if pend {
|
||||
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num())
|
||||
if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
|
||||
fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err))
|
||||
}
|
||||
}
|
||||
// Remove obsolete or incomplete pending CURRENT files.
|
||||
for _, fn := range rem {
|
||||
path := filepath.Join(fs.path, fn)
|
||||
if err := os.Remove(path); err != nil {
|
||||
fs.log(fmt.Sprintf("remove %s: %v", fn, err))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *fileStorage) SetManifest(f File) (err error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
f2, ok := f.(*file)
|
||||
if !ok || f2.t != TypeManifest {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.log(fmt.Sprintf("CURRENT: %v", err))
|
||||
}
|
||||
}()
|
||||
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num())
|
||||
w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(w, f2.name())
|
||||
// Close the file first.
|
||||
if err := w.Close(); err != nil {
|
||||
fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return rename(path, filepath.Join(fs.path, "CURRENT"))
|
||||
}
|
||||
|
||||
func (fs *fileStorage) Close() error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
// Clear the finalizer.
|
||||
runtime.SetFinalizer(fs, nil)
|
||||
|
||||
if fs.open > 0 {
|
||||
fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
|
||||
}
|
||||
fs.open = -1
|
||||
e1 := fs.logw.Close()
|
||||
err := fs.flock.release()
|
||||
if err == nil {
|
||||
err = e1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type fileWrap struct {
|
||||
*os.File
|
||||
f *file
|
||||
}
|
||||
|
||||
func (fw fileWrap) Sync() error {
|
||||
if err := fw.File.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if fw.f.Type() == TypeManifest {
|
||||
// Also sync parent directory if file type is manifest.
|
||||
// See: https://code.google.com/p/leveldb/issues/detail?id=190.
|
||||
if err := syncDir(fw.f.fs.path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fw fileWrap) Close() error {
|
||||
f := fw.f
|
||||
f.fs.mu.Lock()
|
||||
defer f.fs.mu.Unlock()
|
||||
if !f.open {
|
||||
return ErrClosed
|
||||
}
|
||||
f.open = false
|
||||
f.fs.open--
|
||||
err := fw.File.Close()
|
||||
if err != nil {
|
||||
f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type file struct {
|
||||
fs *fileStorage
|
||||
num uint64
|
||||
t FileType
|
||||
open bool
|
||||
}
|
||||
|
||||
func (f *file) Open() (Reader, error) {
|
||||
f.fs.mu.Lock()
|
||||
defer f.fs.mu.Unlock()
|
||||
if f.fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
if f.open {
|
||||
return nil, errFileOpen
|
||||
}
|
||||
of, err := os.OpenFile(f.path(), os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
if f.hasOldName() && os.IsNotExist(err) {
|
||||
of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0)
|
||||
if err == nil {
|
||||
goto ok
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
ok:
|
||||
f.open = true
|
||||
f.fs.open++
|
||||
return fileWrap{of, f}, nil
|
||||
}
|
||||
|
||||
func (f *file) Create() (Writer, error) {
|
||||
f.fs.mu.Lock()
|
||||
defer f.fs.mu.Unlock()
|
||||
if f.fs.open < 0 {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
if f.open {
|
||||
return nil, errFileOpen
|
||||
}
|
||||
of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.open = true
|
||||
f.fs.open++
|
||||
return fileWrap{of, f}, nil
|
||||
}
|
||||
|
||||
func (f *file) Replace(newfile File) error {
|
||||
f.fs.mu.Lock()
|
||||
defer f.fs.mu.Unlock()
|
||||
if f.fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
newfile2, ok := newfile.(*file)
|
||||
if !ok {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
if f.open || newfile2.open {
|
||||
return errFileOpen
|
||||
}
|
||||
return rename(newfile2.path(), f.path())
|
||||
}
|
||||
|
||||
func (f *file) Type() FileType {
|
||||
return f.t
|
||||
}
|
||||
|
||||
func (f *file) Num() uint64 {
|
||||
return f.num
|
||||
}
|
||||
|
||||
func (f *file) Remove() error {
|
||||
f.fs.mu.Lock()
|
||||
defer f.fs.mu.Unlock()
|
||||
if f.fs.open < 0 {
|
||||
return ErrClosed
|
||||
}
|
||||
if f.open {
|
||||
return errFileOpen
|
||||
}
|
||||
err := os.Remove(f.path())
|
||||
if err != nil {
|
||||
f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err))
|
||||
}
|
||||
// Also try remove file with old name, just in case.
|
||||
if f.hasOldName() {
|
||||
if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) {
|
||||
f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err))
|
||||
err = e1
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *file) hasOldName() bool {
|
||||
return f.t == TypeTable
|
||||
}
|
||||
|
||||
func (f *file) oldName() string {
|
||||
switch f.t {
|
||||
case TypeTable:
|
||||
return fmt.Sprintf("%06d.sst", f.num)
|
||||
}
|
||||
return f.name()
|
||||
}
|
||||
|
||||
func (f *file) oldPath() string {
|
||||
return filepath.Join(f.fs.path, f.oldName())
|
||||
}
|
||||
|
||||
func (f *file) name() string {
|
||||
switch f.t {
|
||||
case TypeManifest:
|
||||
return fmt.Sprintf("MANIFEST-%06d", f.num)
|
||||
case TypeJournal:
|
||||
return fmt.Sprintf("%06d.log", f.num)
|
||||
case TypeTable:
|
||||
return fmt.Sprintf("%06d.ldb", f.num)
|
||||
case TypeTemp:
|
||||
return fmt.Sprintf("%06d.tmp", f.num)
|
||||
default:
|
||||
panic("invalid file type")
|
||||
}
|
||||
}
|
||||
|
||||
func (f *file) path() string {
|
||||
return filepath.Join(f.fs.path, f.name())
|
||||
}
|
||||
|
||||
func fsParseName(name string) *FileInfo {
|
||||
fi := &FileInfo{}
|
||||
var tail string
|
||||
_, err := fmt.Sscanf(name, "%d.%s", &fi.Num, &tail)
|
||||
if err == nil {
|
||||
switch tail {
|
||||
case "log":
|
||||
fi.Type = TypeJournal
|
||||
case "ldb", "sst":
|
||||
fi.Type = TypeTable
|
||||
case "tmp":
|
||||
fi.Type = TypeTemp
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return fi
|
||||
}
|
||||
n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fi.Num, &tail)
|
||||
if n == 1 {
|
||||
fi.Type = TypeManifest
|
||||
return fi
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) parse(name string) bool {
|
||||
fi := fsParseName(name)
|
||||
if fi == nil {
|
||||
return false
|
||||
}
|
||||
f.t = fi.Type
|
||||
f.num = fi.Num
|
||||
return true
|
||||
}
|
||||
203
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
generated
vendored
203
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
const typeShift = 3
|
||||
|
||||
type memStorageLock struct {
|
||||
ms *memStorage
|
||||
}
|
||||
|
||||
func (lock *memStorageLock) Release() {
|
||||
ms := lock.ms
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.slock == lock {
|
||||
ms.slock = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// memStorage is a memory-backed storage.
|
||||
type memStorage struct {
|
||||
mu sync.Mutex
|
||||
slock *memStorageLock
|
||||
files map[uint64]*memFile
|
||||
manifest *memFilePtr
|
||||
}
|
||||
|
||||
// NewMemStorage returns a new memory-backed storage implementation.
|
||||
func NewMemStorage() Storage {
|
||||
return &memStorage{
|
||||
files: make(map[uint64]*memFile),
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *memStorage) Lock() (util.Releaser, error) {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.slock != nil {
|
||||
return nil, ErrLocked
|
||||
}
|
||||
ms.slock = &memStorageLock{ms: ms}
|
||||
return ms.slock, nil
|
||||
}
|
||||
|
||||
func (*memStorage) Log(str string) {}
|
||||
|
||||
func (ms *memStorage) GetFile(num uint64, t FileType) File {
|
||||
return &memFilePtr{ms: ms, num: num, t: t}
|
||||
}
|
||||
|
||||
func (ms *memStorage) GetFiles(t FileType) ([]File, error) {
|
||||
ms.mu.Lock()
|
||||
var ff []File
|
||||
for x, _ := range ms.files {
|
||||
num, mt := x>>typeShift, FileType(x)&TypeAll
|
||||
if mt&t == 0 {
|
||||
continue
|
||||
}
|
||||
ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt})
|
||||
}
|
||||
ms.mu.Unlock()
|
||||
return ff, nil
|
||||
}
|
||||
|
||||
func (ms *memStorage) GetManifest() (File, error) {
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if ms.manifest == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return ms.manifest, nil
|
||||
}
|
||||
|
||||
func (ms *memStorage) SetManifest(f File) error {
|
||||
fm, ok := f.(*memFilePtr)
|
||||
if !ok || fm.t != TypeManifest {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
ms.mu.Lock()
|
||||
ms.manifest = fm
|
||||
ms.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*memStorage) Close() error { return nil }
|
||||
|
||||
type memReader struct {
|
||||
*bytes.Reader
|
||||
m *memFile
|
||||
}
|
||||
|
||||
func (mr *memReader) Close() error {
|
||||
return mr.m.Close()
|
||||
}
|
||||
|
||||
type memFile struct {
|
||||
bytes.Buffer
|
||||
ms *memStorage
|
||||
open bool
|
||||
}
|
||||
|
||||
func (*memFile) Sync() error { return nil }
|
||||
func (m *memFile) Close() error {
|
||||
m.ms.mu.Lock()
|
||||
m.open = false
|
||||
m.ms.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
type memFilePtr struct {
|
||||
ms *memStorage
|
||||
num uint64
|
||||
t FileType
|
||||
}
|
||||
|
||||
func (p *memFilePtr) x() uint64 {
|
||||
return p.Num()<<typeShift | uint64(p.Type())
|
||||
}
|
||||
|
||||
func (p *memFilePtr) Open() (Reader, error) {
|
||||
ms := p.ms
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if m, exist := ms.files[p.x()]; exist {
|
||||
if m.open {
|
||||
return nil, errFileOpen
|
||||
}
|
||||
m.open = true
|
||||
return &memReader{Reader: bytes.NewReader(m.Bytes()), m: m}, nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
func (p *memFilePtr) Create() (Writer, error) {
|
||||
ms := p.ms
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
m, exist := ms.files[p.x()]
|
||||
if exist {
|
||||
if m.open {
|
||||
return nil, errFileOpen
|
||||
}
|
||||
m.Reset()
|
||||
} else {
|
||||
m = &memFile{ms: ms}
|
||||
ms.files[p.x()] = m
|
||||
}
|
||||
m.open = true
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (p *memFilePtr) Replace(newfile File) error {
|
||||
p1, ok := newfile.(*memFilePtr)
|
||||
if !ok {
|
||||
return ErrInvalidFile
|
||||
}
|
||||
ms := p.ms
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
m1, exist := ms.files[p1.x()]
|
||||
if !exist {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
m0, exist := ms.files[p.x()]
|
||||
if (exist && m0.open) || m1.open {
|
||||
return errFileOpen
|
||||
}
|
||||
delete(ms.files, p1.x())
|
||||
ms.files[p.x()] = m1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *memFilePtr) Type() FileType {
|
||||
return p.t
|
||||
}
|
||||
|
||||
func (p *memFilePtr) Num() uint64 {
|
||||
return p.num
|
||||
}
|
||||
|
||||
func (p *memFilePtr) Remove() error {
|
||||
ms := p.ms
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
if _, exist := ms.files[p.x()]; exist {
|
||||
delete(ms.files, p.x())
|
||||
return nil
|
||||
}
|
||||
return os.ErrNotExist
|
||||
}
|
||||
21
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/util/pool.go
generated
vendored
21
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/util/pool.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Pool struct {
|
||||
sync.Pool
|
||||
}
|
||||
|
||||
func NewPool(cap int) *Pool {
|
||||
return &Pool{}
|
||||
}
|
||||
33
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
generated
vendored
33
_vendor/vendor/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !go1.3
|
||||
|
||||
package util
|
||||
|
||||
type Pool struct {
|
||||
pool chan interface{}
|
||||
}
|
||||
|
||||
func (p *Pool) Get() interface{} {
|
||||
select {
|
||||
case x := <-p.pool:
|
||||
return x
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) Put(x interface{}) {
|
||||
select {
|
||||
case p.pool <- x:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func NewPool(cap int) *Pool {
|
||||
return &Pool{pool: make(chan interface{}, cap)}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user