feat: update restore to support InfluxDB 2.0.x (#185)

This commit is contained in:
Daniel Moran
2021-07-09 15:36:44 -04:00
committed by GitHub
parent 95f190bf64
commit c3feea5900
17 changed files with 380 additions and 161 deletions

View File

@ -11,8 +11,6 @@ import (
"mime/multipart"
"os"
"path/filepath"
"regexp"
"strconv"
"time"
"github.com/influxdata/influx-cli/v2/api"
@ -74,7 +72,7 @@ func (c *Client) Backup(ctx context.Context, params *Params) error {
c.baseName = time.Now().UTC().Format(backupFilenamePattern)
// The APIs we use to back up metadata depends on the server's version.
legacyServer, err := c.serverIsLegacy(ctx)
legacyServer, err := br.ServerIsLegacy(ctx, c.HealthApi)
if err != nil {
return err
}
@ -96,40 +94,6 @@ func (c *Client) Backup(ctx context.Context, params *Params) error {
return nil
}
var semverRegex = regexp.MustCompile(`(\d+)\.(\d+)\.(\d+).*`)
// serverIsLegacy checks if the InfluxDB server targeted by the backup is running v2.0.x,
// which used different APIs for backups.
func (c Client) serverIsLegacy(ctx context.Context) (bool, error) {
res, err := c.GetHealth(ctx).Execute()
if err != nil {
return false, fmt.Errorf("API compatibility check failed: %w", err)
}
var version string
if res.Version != nil {
version = *res.Version
}
matches := semverRegex.FindSubmatch([]byte(version))
if matches == nil {
// Assume non-semver versions are only reported by nightlies & dev builds, which
// should now support the new APIs.
log.Printf("WARN: Couldn't parse version %q reported by server, assuming latest backup APIs are supported", version)
return false, nil
}
// matches[0] is the entire matched string, capture groups start at 1.
majorStr, minorStr := matches[1], matches[2]
// Ignore the err values here because the regex-match ensures we can parse the captured
// groups as integers.
major, _ := strconv.Atoi(string(majorStr))
minor, _ := strconv.Atoi(string(minorStr))
if major < 2 {
return false, fmt.Errorf("InfluxDB v%d does not support the APIs required for backups", major)
}
return minor == 0, nil
}
// downloadMetadata downloads a snapshot of the KV store, SQL DB, and bucket
// manifests from the server. KV and SQL are written to local files. Bucket manifests
// are parsed into a slice for additional processing.

View File

@ -337,61 +337,3 @@ func (e *notFoundErr) Error() string {
func (e *notFoundErr) ErrorCode() api.ErrorCode {
return api.ERRORCODE_NOT_FOUND
}
func TestBackup_ServerIsLegacy(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
versionStr *string
legacy bool
wantErr string
}{
{
name: "2.0.x",
versionStr: api.PtrString("2.0.7"),
legacy: true,
},
{
name: "2.1.x",
versionStr: api.PtrString("2.1.0-RC1"),
},
{
name: "nightly",
versionStr: api.PtrString("nightly-2020-01-01"),
},
{
name: "dev",
versionStr: api.PtrString("some.custom-version.2"),
},
{
name: "1.x",
versionStr: api.PtrString("1.9.3"),
wantErr: "InfluxDB v1 does not support the APIs",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
healthApi := mock.NewMockHealthApi(ctrl)
healthApi.EXPECT().GetHealth(gomock.Any()).Return(api.ApiGetHealthRequest{ApiService: healthApi})
healthApi.EXPECT().GetHealthExecute(gomock.Any()).Return(api.HealthCheck{Version: tc.versionStr}, nil)
client := Client{HealthApi: healthApi}
isLegacy, err := client.serverIsLegacy(context.Background())
if tc.wantErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.wantErr)
return
}
require.NoError(t, err)
require.Equal(t, tc.legacy, isLegacy)
})
}
}

View File

@ -8,7 +8,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influx-cli/v2/api"
"github.com/influxdata/influx-cli/v2/clients/backup/internal"
br "github.com/influxdata/influx-cli/v2/internal/backup_restore"
"go.etcd.io/bbolt"
)
@ -160,7 +160,7 @@ func extractBucketManifest(boltPath string) ([]api.BucketMetadataManifest, error
return errors.New("v1 database info not found in local KV store")
}
var pb internal.Data
var pb br.Data
if err := proto.Unmarshal(fullMeta, &pb); err != nil {
return fmt.Errorf("failed to unmarshal v1 database info: %w", err)
}
@ -193,7 +193,7 @@ func extractBucketManifest(boltPath string) ([]api.BucketMetadataManifest, error
return manifests, nil
}
func unmarshalRawDBI(rawDBI internal.DatabaseInfo) influxdbV1DatabaseInfo {
func unmarshalRawDBI(rawDBI br.DatabaseInfo) influxdbV1DatabaseInfo {
dbi := influxdbV1DatabaseInfo{
Name: rawDBI.GetName(),
DefaultRetentionPolicy: rawDBI.GetDefaultRetentionPolicy(),
@ -208,7 +208,7 @@ func unmarshalRawDBI(rawDBI internal.DatabaseInfo) influxdbV1DatabaseInfo {
return dbi
}
func unmarshalRawRPI(rawRPI internal.RetentionPolicyInfo) influxdbV1RetentionPolicyInfo {
func unmarshalRawRPI(rawRPI br.RetentionPolicyInfo) influxdbV1RetentionPolicyInfo {
rpi := influxdbV1RetentionPolicyInfo{
Name: rawRPI.GetName(),
ReplicaN: int32(rawRPI.GetReplicaN()),
@ -236,7 +236,7 @@ func unmarshalRawRPI(rawRPI internal.RetentionPolicyInfo) influxdbV1RetentionPol
return rpi
}
func unmarshalRawSGI(rawSGI internal.ShardGroupInfo) influxdbV1ShardGroupInfo {
func unmarshalRawSGI(rawSGI br.ShardGroupInfo) influxdbV1ShardGroupInfo {
sgi := influxdbV1ShardGroupInfo{
ID: int64(rawSGI.GetID()),
StartTime: time.Unix(0, rawSGI.GetStartTime()).UTC(),
@ -254,7 +254,7 @@ func unmarshalRawSGI(rawSGI internal.ShardGroupInfo) influxdbV1ShardGroupInfo {
return sgi
}
func unmarshalRawShard(rawShard internal.ShardInfo) influxdbV1ShardInfo {
func unmarshalRawShard(rawShard br.ShardInfo) influxdbV1ShardInfo {
si := influxdbV1ShardInfo{
ID: int64(rawShard.GetID()),
}

View File

@ -1,12 +0,0 @@
# V1 Meta Protobufs
For compatibility with backups made via the v2.0.x `influx` CLI, we include logic
for opening & reading backed-up KV stores to derive bucket manifests. Part of that
process requires reading & unmarshalling V1 database info, serialized as protobuf.
To support that requirement, we've copied the `meta.proto` definition out of `influxdb`
and into this repository. This file isn't intended to be modified.
If `meta.pb.go` ever needs to be re-generated, follow these steps:
1. Install `protoc` (i.e. via `brew install protobuf`)
2. Run `go install github.com/gogo/protobuf/protoc-gen-gogo` from within this repository
3. Run `go generate <path to clients/backup>`

File diff suppressed because it is too large Load Diff

View File

@ -1,397 +0,0 @@
// NOTE: This is a snapshot of the schema used to serialize V1 database info
// in the 2.0.x line of InfluxDB. The copy is here so we can support backing
// up from older DB versions, it's not intended to be kept up-to-date.
package internal;
//========================================================================
//
// Metadata
//
//========================================================================
message Data {
required uint64 Term = 1;
required uint64 Index = 2;
required uint64 ClusterID = 3;
repeated NodeInfo Nodes = 4;
repeated DatabaseInfo Databases = 5;
repeated UserInfo Users = 6;
required uint64 MaxNodeID = 7;
required uint64 MaxShardGroupID = 8;
required uint64 MaxShardID = 9;
// added for 0.10.0
repeated NodeInfo DataNodes = 10;
repeated NodeInfo MetaNodes = 11;
}
message NodeInfo {
required uint64 ID = 1;
required string Host = 2;
optional string TCPHost = 3;
}
message DatabaseInfo {
required string Name = 1;
required string DefaultRetentionPolicy = 2;
repeated RetentionPolicyInfo RetentionPolicies = 3;
repeated ContinuousQueryInfo ContinuousQueries = 4;
}
message RetentionPolicySpec {
optional string Name = 1;
optional int64 Duration = 2;
optional int64 ShardGroupDuration = 3;
optional uint32 ReplicaN = 4;
}
message RetentionPolicyInfo {
required string Name = 1;
required int64 Duration = 2;
required int64 ShardGroupDuration = 3;
required uint32 ReplicaN = 4;
repeated ShardGroupInfo ShardGroups = 5;
repeated SubscriptionInfo Subscriptions = 6;
}
message ShardGroupInfo {
required uint64 ID = 1;
required int64 StartTime = 2;
required int64 EndTime = 3;
required int64 DeletedAt = 4;
repeated ShardInfo Shards = 5;
optional int64 TruncatedAt = 6;
}
message ShardInfo {
required uint64 ID = 1;
repeated uint64 OwnerIDs = 2 [deprecated=true];
repeated ShardOwner Owners = 3;
}
message SubscriptionInfo{
required string Name = 1;
required string Mode = 2;
repeated string Destinations = 3;
}
message ShardOwner {
required uint64 NodeID = 1;
}
message ContinuousQueryInfo {
required string Name = 1;
required string Query = 2;
}
message UserInfo {
required string Name = 1;
required string Hash = 2;
required bool Admin = 3;
repeated UserPrivilege Privileges = 4;
}
message UserPrivilege {
required string Database = 1;
required int32 Privilege = 2;
}
//========================================================================
//
// COMMANDS
//
//========================================================================
message Command {
extensions 100 to max;
enum Type {
CreateNodeCommand = 1;
DeleteNodeCommand = 2;
CreateDatabaseCommand = 3;
DropDatabaseCommand = 4;
CreateRetentionPolicyCommand = 5;
DropRetentionPolicyCommand = 6;
SetDefaultRetentionPolicyCommand = 7;
UpdateRetentionPolicyCommand = 8;
CreateShardGroupCommand = 9;
DeleteShardGroupCommand = 10;
CreateContinuousQueryCommand = 11;
DropContinuousQueryCommand = 12;
CreateUserCommand = 13;
DropUserCommand = 14;
UpdateUserCommand = 15;
SetPrivilegeCommand = 16;
SetDataCommand = 17;
SetAdminPrivilegeCommand = 18;
UpdateNodeCommand = 19;
CreateSubscriptionCommand = 21;
DropSubscriptionCommand = 22;
RemovePeerCommand = 23;
CreateMetaNodeCommand = 24;
CreateDataNodeCommand = 25;
UpdateDataNodeCommand = 26;
DeleteMetaNodeCommand = 27;
DeleteDataNodeCommand = 28;
SetMetaNodeCommand = 29;
DropShardCommand = 30;
}
required Type type = 1;
}
// This isn't used in >= 0.10.0. Kept around for upgrade purposes. Instead
// look at CreateDataNodeCommand and CreateMetaNodeCommand
message CreateNodeCommand {
extend Command {
optional CreateNodeCommand command = 101;
}
required string Host = 1;
required uint64 Rand = 2;
}
message DeleteNodeCommand {
extend Command {
optional DeleteNodeCommand command = 102;
}
required uint64 ID = 1;
required bool Force = 2;
}
message CreateDatabaseCommand {
extend Command {
optional CreateDatabaseCommand command = 103;
}
required string Name = 1;
optional RetentionPolicyInfo RetentionPolicy = 2;
}
message DropDatabaseCommand {
extend Command {
optional DropDatabaseCommand command = 104;
}
required string Name = 1;
}
message CreateRetentionPolicyCommand {
extend Command {
optional CreateRetentionPolicyCommand command = 105;
}
required string Database = 1;
required RetentionPolicyInfo RetentionPolicy = 2;
}
message DropRetentionPolicyCommand {
extend Command {
optional DropRetentionPolicyCommand command = 106;
}
required string Database = 1;
required string Name = 2;
}
message SetDefaultRetentionPolicyCommand {
extend Command {
optional SetDefaultRetentionPolicyCommand command = 107;
}
required string Database = 1;
required string Name = 2;
}
message UpdateRetentionPolicyCommand {
extend Command {
optional UpdateRetentionPolicyCommand command = 108;
}
required string Database = 1;
required string Name = 2;
optional string NewName = 3;
optional int64 Duration = 4;
optional uint32 ReplicaN = 5;
}
message CreateShardGroupCommand {
extend Command {
optional CreateShardGroupCommand command = 109;
}
required string Database = 1;
required string Policy = 2;
required int64 Timestamp = 3;
}
message DeleteShardGroupCommand {
extend Command {
optional DeleteShardGroupCommand command = 110;
}
required string Database = 1;
required string Policy = 2;
required uint64 ShardGroupID = 3;
}
message CreateContinuousQueryCommand {
extend Command {
optional CreateContinuousQueryCommand command = 111;
}
required string Database = 1;
required string Name = 2;
required string Query = 3;
}
message DropContinuousQueryCommand {
extend Command {
optional DropContinuousQueryCommand command = 112;
}
required string Database = 1;
required string Name = 2;
}
message CreateUserCommand {
extend Command {
optional CreateUserCommand command = 113;
}
required string Name = 1;
required string Hash = 2;
required bool Admin = 3;
}
message DropUserCommand {
extend Command {
optional DropUserCommand command = 114;
}
required string Name = 1;
}
message UpdateUserCommand {
extend Command {
optional UpdateUserCommand command = 115;
}
required string Name = 1;
required string Hash = 2;
}
message SetPrivilegeCommand {
extend Command {
optional SetPrivilegeCommand command = 116;
}
required string Username = 1;
required string Database = 2;
required int32 Privilege = 3;
}
message SetDataCommand {
extend Command {
optional SetDataCommand command = 117;
}
required Data Data = 1;
}
message SetAdminPrivilegeCommand {
extend Command {
optional SetAdminPrivilegeCommand command = 118;
}
required string Username = 1;
required bool Admin = 2;
}
message UpdateNodeCommand {
extend Command {
optional UpdateNodeCommand command = 119;
}
required uint64 ID = 1;
required string Host = 2;
}
message CreateSubscriptionCommand {
extend Command {
optional CreateSubscriptionCommand command = 121;
}
required string Name = 1;
required string Database = 2;
required string RetentionPolicy = 3;
required string Mode = 4;
repeated string Destinations = 5;
}
message DropSubscriptionCommand {
extend Command {
optional DropSubscriptionCommand command = 122;
}
required string Name = 1;
required string Database = 2;
required string RetentionPolicy = 3;
}
message RemovePeerCommand {
extend Command {
optional RemovePeerCommand command = 123;
}
optional uint64 ID = 1;
required string Addr = 2;
}
message CreateMetaNodeCommand {
extend Command {
optional CreateMetaNodeCommand command = 124;
}
required string HTTPAddr = 1;
required string TCPAddr = 2;
required uint64 Rand = 3;
}
message CreateDataNodeCommand {
extend Command {
optional CreateDataNodeCommand command = 125;
}
required string HTTPAddr = 1;
required string TCPAddr = 2;
}
message UpdateDataNodeCommand {
extend Command {
optional UpdateDataNodeCommand command = 126;
}
required uint64 ID = 1;
required string Host = 2;
required string TCPHost = 3;
}
message DeleteMetaNodeCommand {
extend Command {
optional DeleteMetaNodeCommand command = 127;
}
required uint64 ID = 1;
}
message DeleteDataNodeCommand {
extend Command {
optional DeleteDataNodeCommand command = 128;
}
required uint64 ID = 1;
}
message Response {
required bool OK = 1;
optional string Error = 2;
optional uint64 Index = 3;
}
// SetMetaNodeCommand is for the initial metanode in a cluster or
// if the single host restarts and its hostname changes, this will update it
message SetMetaNodeCommand {
extend Command {
optional SetMetaNodeCommand command = 129;
}
required string HTTPAddr = 1;
required string TCPAddr = 2;
required uint64 Rand = 3;
}
message DropShardCommand {
extend Command {
optional DropShardCommand command = 130;
}
required uint64 ID = 1;
}

View File

@ -94,12 +94,12 @@ func ConvertShard(manifest api.ShardManifest, getShard func(shardId int64) (*br.
m := br.ManifestShardEntry{
ID: manifest.Id,
ShardOwners: make([]br.ShardOwner, len(manifest.ShardOwners)),
ShardOwners: make([]br.ShardOwnerEntry, len(manifest.ShardOwners)),
ManifestFileEntry: *shardFileInfo,
}
for i, o := range manifest.ShardOwners {
m.ShardOwners[i] = br.ShardOwner{
m.ShardOwners[i] = br.ShardOwnerEntry{
NodeID: o.NodeID,
}
}

View File

@ -123,7 +123,7 @@ func TestConvertBucketManifest(t *testing.T) {
Shards: []br.ManifestShardEntry{
{
ID: 10,
ShardOwners: []br.ShardOwner{{NodeID: 1}},
ShardOwners: []br.ShardOwnerEntry{{NodeID: 1}},
ManifestFileEntry: br.ManifestFileEntry{
FileName: "10.gz",
Size: 1000,
@ -140,7 +140,7 @@ func TestConvertBucketManifest(t *testing.T) {
Shards: []br.ManifestShardEntry{
{
ID: 30,
ShardOwners: []br.ShardOwner{},
ShardOwners: []br.ShardOwnerEntry{},
ManifestFileEntry: br.ManifestFileEntry{
FileName: "30.gz",
Size: 3000,