307 lines
7.5 KiB
Go
307 lines
7.5 KiB
Go
package checkpoints
|
|
|
|
import (
|
|
"path/filepath"
|
|
"testing"
|
|
|
|
. "github.com/pingcap/check"
|
|
"github.com/pingcap/tidb/br/pkg/lightning/checkpoints/checkpointspb"
|
|
"github.com/pingcap/tidb/br/pkg/lightning/mydump"
|
|
"github.com/pingcap/tidb/br/pkg/lightning/verification"
|
|
)
|
|
|
|
func Test(t *testing.T) {
|
|
TestingT(t)
|
|
}
|
|
|
|
var _ = Suite(&checkpointSuite{})
|
|
|
|
type checkpointSuite struct {
|
|
}
|
|
|
|
func (s *checkpointSuite) TestMergeStatusCheckpoint(c *C) {
|
|
cpd := NewTableCheckpointDiff()
|
|
|
|
m := StatusCheckpointMerger{EngineID: 0, Status: CheckpointStatusImported}
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
hasStatus: false,
|
|
engines: map[int32]engineCheckpointDiff{
|
|
0: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusImported,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
},
|
|
})
|
|
|
|
m = StatusCheckpointMerger{EngineID: -1, Status: CheckpointStatusLoaded}
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
hasStatus: false,
|
|
engines: map[int32]engineCheckpointDiff{
|
|
0: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusImported,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
-1: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusLoaded,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
},
|
|
})
|
|
|
|
m = StatusCheckpointMerger{EngineID: WholeTableEngineID, Status: CheckpointStatusClosed}
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
hasStatus: true,
|
|
status: CheckpointStatusClosed,
|
|
engines: map[int32]engineCheckpointDiff{
|
|
0: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusImported,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
-1: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusLoaded,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
},
|
|
})
|
|
|
|
m = StatusCheckpointMerger{EngineID: -1, Status: CheckpointStatusAllWritten}
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
hasStatus: true,
|
|
status: CheckpointStatusClosed,
|
|
engines: map[int32]engineCheckpointDiff{
|
|
0: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusImported,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
-1: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusAllWritten,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
},
|
|
})
|
|
}
|
|
|
|
func (s *checkpointSuite) TestMergeInvalidStatusCheckpoint(c *C) {
|
|
cpd := NewTableCheckpointDiff()
|
|
|
|
m := StatusCheckpointMerger{EngineID: 0, Status: CheckpointStatusLoaded}
|
|
m.MergeInto(cpd)
|
|
|
|
m = StatusCheckpointMerger{EngineID: -1, Status: CheckpointStatusAllWritten}
|
|
m.SetInvalid()
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
hasStatus: true,
|
|
status: CheckpointStatusAllWritten / 10,
|
|
engines: map[int32]engineCheckpointDiff{
|
|
0: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusLoaded,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
-1: {
|
|
hasStatus: true,
|
|
status: CheckpointStatusAllWritten / 10,
|
|
chunks: make(map[ChunkCheckpointKey]chunkCheckpointDiff),
|
|
},
|
|
},
|
|
})
|
|
}
|
|
|
|
func (s *checkpointSuite) TestMergeChunkCheckpoint(c *C) {
|
|
cpd := NewTableCheckpointDiff()
|
|
|
|
key := ChunkCheckpointKey{Path: "/tmp/path/1.sql", Offset: 0}
|
|
|
|
m := ChunkCheckpointMerger{
|
|
EngineID: 2,
|
|
Key: key,
|
|
Checksum: verification.MakeKVChecksum(700, 15, 1234567890),
|
|
Pos: 1055,
|
|
RowID: 31,
|
|
}
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
engines: map[int32]engineCheckpointDiff{
|
|
2: {
|
|
chunks: map[ChunkCheckpointKey]chunkCheckpointDiff{
|
|
key: {
|
|
pos: 1055,
|
|
rowID: 31,
|
|
checksum: verification.MakeKVChecksum(700, 15, 1234567890),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
|
|
m = ChunkCheckpointMerger{
|
|
EngineID: 2,
|
|
Key: key,
|
|
Checksum: verification.MakeKVChecksum(800, 20, 1357924680),
|
|
Pos: 1080,
|
|
RowID: 42,
|
|
}
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
engines: map[int32]engineCheckpointDiff{
|
|
2: {
|
|
chunks: map[ChunkCheckpointKey]chunkCheckpointDiff{
|
|
key: {
|
|
pos: 1080,
|
|
rowID: 42,
|
|
checksum: verification.MakeKVChecksum(800, 20, 1357924680),
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
}
|
|
|
|
func (s *checkpointSuite) TestRebaseCheckpoint(c *C) {
|
|
cpd := NewTableCheckpointDiff()
|
|
|
|
m := RebaseCheckpointMerger{AllocBase: 10000}
|
|
m.MergeInto(cpd)
|
|
|
|
c.Assert(cpd, DeepEquals, &TableCheckpointDiff{
|
|
hasRebase: true,
|
|
allocBase: 10000,
|
|
engines: make(map[int32]engineCheckpointDiff),
|
|
})
|
|
}
|
|
|
|
func (s *checkpointSuite) TestApplyDiff(c *C) {
|
|
cp := TableCheckpoint{
|
|
Status: CheckpointStatusLoaded,
|
|
AllocBase: 123,
|
|
Engines: map[int32]*EngineCheckpoint{
|
|
-1: {
|
|
Status: CheckpointStatusLoaded,
|
|
},
|
|
0: {
|
|
Status: CheckpointStatusLoaded,
|
|
Chunks: []*ChunkCheckpoint{
|
|
{
|
|
Key: ChunkCheckpointKey{Path: "/tmp/01.sql"},
|
|
Chunk: mydump.Chunk{
|
|
Offset: 0,
|
|
EndOffset: 20000,
|
|
PrevRowIDMax: 0,
|
|
RowIDMax: 1000,
|
|
},
|
|
},
|
|
{
|
|
Key: ChunkCheckpointKey{Path: "/tmp/04.sql"},
|
|
Chunk: mydump.Chunk{
|
|
Offset: 0,
|
|
EndOffset: 15000,
|
|
PrevRowIDMax: 1000,
|
|
RowIDMax: 1300,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
cpd := NewTableCheckpointDiff()
|
|
(&StatusCheckpointMerger{EngineID: -1, Status: CheckpointStatusImported}).MergeInto(cpd)
|
|
(&StatusCheckpointMerger{EngineID: WholeTableEngineID, Status: CheckpointStatusAllWritten}).MergeInto(cpd)
|
|
(&StatusCheckpointMerger{EngineID: 1234, Status: CheckpointStatusAnalyzeSkipped}).MergeInto(cpd)
|
|
(&RebaseCheckpointMerger{AllocBase: 11111}).MergeInto(cpd)
|
|
(&ChunkCheckpointMerger{
|
|
EngineID: 0,
|
|
Key: ChunkCheckpointKey{Path: "/tmp/01.sql"},
|
|
Checksum: verification.MakeKVChecksum(3333, 4444, 5555),
|
|
Pos: 6666,
|
|
RowID: 777,
|
|
}).MergeInto(cpd)
|
|
(&ChunkCheckpointMerger{
|
|
EngineID: 5678,
|
|
Key: ChunkCheckpointKey{Path: "/tmp/04.sql"},
|
|
Pos: 9999,
|
|
RowID: 888,
|
|
}).MergeInto(cpd)
|
|
(&ChunkCheckpointMerger{
|
|
EngineID: 0,
|
|
Key: ChunkCheckpointKey{Path: "/tmp/03.sql"},
|
|
Pos: 3636,
|
|
RowID: 2222,
|
|
}).MergeInto(cpd)
|
|
(&ChunkCheckpointMerger{
|
|
EngineID: 0,
|
|
Key: ChunkCheckpointKey{Path: "/tmp/10.sql"},
|
|
Pos: 4949,
|
|
RowID: 444,
|
|
}).MergeInto(cpd)
|
|
|
|
cp.Apply(cpd)
|
|
|
|
c.Assert(cp, DeepEquals, TableCheckpoint{
|
|
Status: CheckpointStatusAllWritten,
|
|
AllocBase: 11111,
|
|
Engines: map[int32]*EngineCheckpoint{
|
|
-1: {
|
|
Status: CheckpointStatusImported,
|
|
},
|
|
0: {
|
|
Status: CheckpointStatusLoaded,
|
|
Chunks: []*ChunkCheckpoint{
|
|
{
|
|
Key: ChunkCheckpointKey{Path: "/tmp/01.sql"},
|
|
Chunk: mydump.Chunk{
|
|
Offset: 6666,
|
|
EndOffset: 20000,
|
|
PrevRowIDMax: 777,
|
|
RowIDMax: 1000,
|
|
},
|
|
Checksum: verification.MakeKVChecksum(3333, 4444, 5555),
|
|
},
|
|
{
|
|
Key: ChunkCheckpointKey{Path: "/tmp/04.sql"},
|
|
Chunk: mydump.Chunk{
|
|
Offset: 0,
|
|
EndOffset: 15000,
|
|
PrevRowIDMax: 1000,
|
|
RowIDMax: 1300,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
})
|
|
}
|
|
|
|
func (s *checkpointSuite) TestCheckpointMarshallUnmarshall(c *C) {
|
|
path := filepath.Join(c.MkDir(), "filecheckpoint")
|
|
fileChkp := NewFileCheckpointsDB(path)
|
|
fileChkp.checkpoints.Checkpoints["a"] = &checkpointspb.TableCheckpointModel{
|
|
Status: uint32(CheckpointStatusLoaded),
|
|
Engines: map[int32]*checkpointspb.EngineCheckpointModel{},
|
|
}
|
|
fileChkp.Close()
|
|
|
|
fileChkp2 := NewFileCheckpointsDB(path)
|
|
// if not recover empty map explicitly, it will become nil
|
|
c.Assert(fileChkp2.checkpoints.Checkpoints["a"].Engines, NotNil)
|
|
}
|