feat: reimplement influx backup using new APIs (#116)

This commit is contained in:
Daniel Moran 2021-06-11 09:50:39 -04:00 committed by GitHub
parent ea2cbab67b
commit 943e8fba31
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1462 additions and 50 deletions

View File

@ -558,7 +558,11 @@ func strlen(s string) int {
type GenericOpenAPIError struct {
body []byte
error string
model error
model ApiError
}
func (e GenericOpenAPIError) Body() []byte {
return e.body
}
// Error returns non-empty string if there was an error.
@ -569,12 +573,14 @@ func (e GenericOpenAPIError) Error() string {
return e.error
}
// Body returns the raw bytes of the response
func (e GenericOpenAPIError) Body() []byte {
return e.body
}
// Model returns the unpacked model of the error
func (e GenericOpenAPIError) Model() interface{} {
func (e GenericOpenAPIError) Model() ApiError {
return e.model
}
func (e GenericOpenAPIError) ErrorCode() ErrorCode {
if e.model == nil {
return ERRORCODE_INTERNAL_ERROR
}
return e.model.ErrorCode()
}

View File

@ -149,8 +149,12 @@ components:
$ref: "./openapi/src/common/schemas/WritePrecision.yml"
LineProtocolError:
$ref: "./openapi/src/common/schemas/LineProtocolError.yml"
LineProtocolErrorCode:
$ref: "./openapi/src/common/schemas/LineProtocolErrorCode.yml"
LineProtocolLengthError:
$ref: "./openapi/src/common/schemas/LineProtocolLengthError.yml"
LineProtocolLengthErrorCode:
$ref: "./openapi/src/common/schemas/LineProtocolLengthErrorCode.yml"
SchemaType:
$ref: "./openapi/src/common/schemas/SchemaType.yml"
ColumnDataType:

@ -1 +1 @@
Subproject commit f5b7af45fce2cfd074ba97bcafe907cba5f66d7f
Subproject commit 3d27d5c5b8d28a20607fe68f50e98e067e7e3371

View File

@ -5,6 +5,11 @@ import (
"strings"
)
type ApiError interface {
error
ErrorCode() ErrorCode
}
// Extensions to let our API error types be used as "standard" errors.
func (o *Error) Error() string {
@ -22,6 +27,10 @@ func (o *Error) Error() string {
return fmt.Sprintf("<%s>", o.Code)
}
func (o *Error) ErrorCode() ErrorCode {
return o.Code
}
func (o *HealthCheck) Error() string {
if o.Status == HEALTHCHECKSTATUS_PASS {
// Make sure we aren't misusing HealthCheck responses.
@ -36,10 +45,47 @@ func (o *HealthCheck) Error() string {
return fmt.Sprintf("health check failed: %s", message)
}
func (o *HealthCheck) ErrorCode() ErrorCode {
if o.Status == HEALTHCHECKSTATUS_PASS {
// Make sure we aren't misusing HealthCheck responses.
panic("successful healthcheck used as an error!")
}
return ERRORCODE_INTERNAL_ERROR
}
func (o *LineProtocolError) Error() string {
return o.Message
}
func (o *LineProtocolError) ErrorCode() ErrorCode {
switch o.Code {
case LINEPROTOCOLERRORCODE_CONFLICT:
return ERRORCODE_CONFLICT
case LINEPROTOCOLERRORCODE_EMPTY_VALUE:
return ERRORCODE_EMPTY_VALUE
case LINEPROTOCOLERRORCODE_NOT_FOUND:
return ERRORCODE_NOT_FOUND
case LINEPROTOCOLERRORCODE_UNAVAILABLE:
return ERRORCODE_UNAVAILABLE
case LINEPROTOCOLERRORCODE_INTERNAL_ERROR:
return ERRORCODE_INTERNAL_ERROR
case LINEPROTOCOLERRORCODE_INVALID:
fallthrough
default:
return ERRORCODE_INVALID
}
}
func (o *LineProtocolLengthError) Error() string {
return o.Message
}
func (o *LineProtocolLengthError) ErrorCode() ErrorCode {
switch o.Code {
case LINEPROTOCOLLENGTHERRORCODE_INVALID:
fallthrough
default:
return ERRORCODE_INVALID
}
}

View File

@ -16,8 +16,7 @@ import (
// LineProtocolError struct for LineProtocolError
type LineProtocolError struct {
// Code is the machine-readable error code.
Code string `json:"code"`
Code LineProtocolErrorCode `json:"code"`
// Message is a human-readable message.
Message string `json:"message"`
// Op describes the logical code operation during error. Useful for debugging.
@ -32,7 +31,7 @@ type LineProtocolError struct {
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewLineProtocolError(code string, message string, op string, err string) *LineProtocolError {
func NewLineProtocolError(code LineProtocolErrorCode, message string, op string, err string) *LineProtocolError {
this := LineProtocolError{}
this.Code = code
this.Message = message
@ -50,9 +49,9 @@ func NewLineProtocolErrorWithDefaults() *LineProtocolError {
}
// GetCode returns the Code field value
func (o *LineProtocolError) GetCode() string {
func (o *LineProtocolError) GetCode() LineProtocolErrorCode {
if o == nil {
var ret string
var ret LineProtocolErrorCode
return ret
}
@ -61,7 +60,7 @@ func (o *LineProtocolError) GetCode() string {
// GetCodeOk returns a tuple with the Code field value
// and a boolean to check if the value has been set.
func (o *LineProtocolError) GetCodeOk() (*string, bool) {
func (o *LineProtocolError) GetCodeOk() (*LineProtocolErrorCode, bool) {
if o == nil {
return nil, false
}
@ -69,7 +68,7 @@ func (o *LineProtocolError) GetCodeOk() (*string, bool) {
}
// SetCode sets field value
func (o *LineProtocolError) SetCode(v string) {
func (o *LineProtocolError) SetCode(v LineProtocolErrorCode) {
o.Code = v
}

View File

@ -0,0 +1,87 @@
/*
* Subset of Influx API covered by Influx CLI
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* API version: 2.0.0
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package api
import (
"encoding/json"
"fmt"
)
// LineProtocolErrorCode Code is the machine-readable error code.
type LineProtocolErrorCode string
// List of LineProtocolErrorCode
const (
LINEPROTOCOLERRORCODE_INTERNAL_ERROR LineProtocolErrorCode = "internal error"
LINEPROTOCOLERRORCODE_NOT_FOUND LineProtocolErrorCode = "not found"
LINEPROTOCOLERRORCODE_CONFLICT LineProtocolErrorCode = "conflict"
LINEPROTOCOLERRORCODE_INVALID LineProtocolErrorCode = "invalid"
LINEPROTOCOLERRORCODE_EMPTY_VALUE LineProtocolErrorCode = "empty value"
LINEPROTOCOLERRORCODE_UNAVAILABLE LineProtocolErrorCode = "unavailable"
)
func (v *LineProtocolErrorCode) UnmarshalJSON(src []byte) error {
var value string
err := json.Unmarshal(src, &value)
if err != nil {
return err
}
enumTypeValue := LineProtocolErrorCode(value)
for _, existing := range []LineProtocolErrorCode{"internal error", "not found", "conflict", "invalid", "empty value", "unavailable"} {
if existing == enumTypeValue {
*v = enumTypeValue
return nil
}
}
return fmt.Errorf("%+v is not a valid LineProtocolErrorCode", value)
}
// Ptr returns reference to LineProtocolErrorCode value
func (v LineProtocolErrorCode) Ptr() *LineProtocolErrorCode {
return &v
}
type NullableLineProtocolErrorCode struct {
value *LineProtocolErrorCode
isSet bool
}
func (v NullableLineProtocolErrorCode) Get() *LineProtocolErrorCode {
return v.value
}
func (v *NullableLineProtocolErrorCode) Set(val *LineProtocolErrorCode) {
v.value = val
v.isSet = true
}
func (v NullableLineProtocolErrorCode) IsSet() bool {
return v.isSet
}
func (v *NullableLineProtocolErrorCode) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableLineProtocolErrorCode(val *LineProtocolErrorCode) *NullableLineProtocolErrorCode {
return &NullableLineProtocolErrorCode{value: val, isSet: true}
}
func (v NullableLineProtocolErrorCode) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableLineProtocolErrorCode) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}

View File

@ -16,8 +16,7 @@ import (
// LineProtocolLengthError struct for LineProtocolLengthError
type LineProtocolLengthError struct {
// Code is the machine-readable error code.
Code string `json:"code"`
Code LineProtocolLengthErrorCode `json:"code"`
// Message is a human-readable message.
Message string `json:"message"`
// Max length in bytes for a body of line-protocol.
@ -28,7 +27,7 @@ type LineProtocolLengthError struct {
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewLineProtocolLengthError(code string, message string, maxLength int32) *LineProtocolLengthError {
func NewLineProtocolLengthError(code LineProtocolLengthErrorCode, message string, maxLength int32) *LineProtocolLengthError {
this := LineProtocolLengthError{}
this.Code = code
this.Message = message
@ -45,9 +44,9 @@ func NewLineProtocolLengthErrorWithDefaults() *LineProtocolLengthError {
}
// GetCode returns the Code field value
func (o *LineProtocolLengthError) GetCode() string {
func (o *LineProtocolLengthError) GetCode() LineProtocolLengthErrorCode {
if o == nil {
var ret string
var ret LineProtocolLengthErrorCode
return ret
}
@ -56,7 +55,7 @@ func (o *LineProtocolLengthError) GetCode() string {
// GetCodeOk returns a tuple with the Code field value
// and a boolean to check if the value has been set.
func (o *LineProtocolLengthError) GetCodeOk() (*string, bool) {
func (o *LineProtocolLengthError) GetCodeOk() (*LineProtocolLengthErrorCode, bool) {
if o == nil {
return nil, false
}
@ -64,7 +63,7 @@ func (o *LineProtocolLengthError) GetCodeOk() (*string, bool) {
}
// SetCode sets field value
func (o *LineProtocolLengthError) SetCode(v string) {
func (o *LineProtocolLengthError) SetCode(v LineProtocolLengthErrorCode) {
o.Code = v
}

View File

@ -0,0 +1,82 @@
/*
* Subset of Influx API covered by Influx CLI
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* API version: 2.0.0
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package api
import (
"encoding/json"
"fmt"
)
// LineProtocolLengthErrorCode Code is the machine-readable error code.
type LineProtocolLengthErrorCode string
// List of LineProtocolLengthErrorCode
const (
LINEPROTOCOLLENGTHERRORCODE_INVALID LineProtocolLengthErrorCode = "invalid"
)
func (v *LineProtocolLengthErrorCode) UnmarshalJSON(src []byte) error {
var value string
err := json.Unmarshal(src, &value)
if err != nil {
return err
}
enumTypeValue := LineProtocolLengthErrorCode(value)
for _, existing := range []LineProtocolLengthErrorCode{"invalid"} {
if existing == enumTypeValue {
*v = enumTypeValue
return nil
}
}
return fmt.Errorf("%+v is not a valid LineProtocolLengthErrorCode", value)
}
// Ptr returns reference to LineProtocolLengthErrorCode value
func (v LineProtocolLengthErrorCode) Ptr() *LineProtocolLengthErrorCode {
return &v
}
type NullableLineProtocolLengthErrorCode struct {
value *LineProtocolLengthErrorCode
isSet bool
}
func (v NullableLineProtocolLengthErrorCode) Get() *LineProtocolLengthErrorCode {
return v.value
}
func (v *NullableLineProtocolLengthErrorCode) Set(val *LineProtocolLengthErrorCode) {
v.value = val
v.isSet = true
}
func (v NullableLineProtocolLengthErrorCode) IsSet() bool {
return v.isSet
}
func (v *NullableLineProtocolLengthErrorCode) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableLineProtocolLengthErrorCode(val *LineProtocolLengthErrorCode) *NullableLineProtocolLengthErrorCode {
return &NullableLineProtocolLengthErrorCode{value: val, isSet: true}
}
func (v NullableLineProtocolLengthErrorCode) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableLineProtocolLengthErrorCode) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}

View File

@ -307,13 +307,13 @@ func (c *APIClient) prepareRequest(
// Generate a new request
if body != nil {
var b io.Reader = body
if enc, ok := headerParams["Content-Encoding"]; ok && enc == "gzip" {
b, err = compressWithGzip(b)
if err != nil {
return nil, err
}
}
var b io.Reader = body
if enc, ok := headerParams["Content-Encoding"]; ok && enc == "gzip" {
b, err = compressWithGzip(b)
if err != nil {
return nil, err
}
}
localVarRequest, err = http.NewRequest(method, url.String(), b)
} else {
localVarRequest, err = http.NewRequest(method, url.String(), nil)
@ -486,17 +486,17 @@ func setBody(body interface{}, contentType string) (bodyBuf *bytes.Buffer, err e
}
func compressWithGzip(data io.Reader) (io.Reader, error) {
pr, pw := io.Pipe()
gw := gzip.NewWriter(pw)
var err error
pr, pw := io.Pipe()
gw := gzip.NewWriter(pw)
var err error
go func() {
_, err = io.Copy(gw, data)
gw.Close()
pw.Close()
}()
go func() {
_, err = io.Copy(gw, data)
gw.Close()
pw.Close()
}()
return pr, err
return pr, err
}
// detectContentType method is used to figure out `Request.Body` content type for request header
@ -576,25 +576,31 @@ func strlen(s string) int {
// GenericOpenAPIError Provides access to the body, error and model on returned errors.
type GenericOpenAPIError struct {
body []byte
body []byte
error string
model error
model ApiError
}
// Error returns non-empty string if there was an error.
func (e GenericOpenAPIError) Error() string {
if e.model != nil {
return e.model.Error()
}
return e.error
}
// Body returns the raw bytes of the response
func (e GenericOpenAPIError) Body() []byte {
return e.body
}
// Error returns non-empty string if there was an error.
func (e GenericOpenAPIError) Error() string {
if e.model != nil {
return e.model.Error()
}
return e.error
}
// Model returns the unpacked model of the error
func (e GenericOpenAPIError) Model() interface{} {
func (e GenericOpenAPIError) Model() ApiError {
return e.model
}
func (e GenericOpenAPIError) ErrorCode() ErrorCode {
if e.model == nil {
return ERRORCODE_INTERNAL_ERROR
}
return e.model.ErrorCode()
}

283
clients/backup/backup.go Normal file
View File

@ -0,0 +1,283 @@
package backup
import (
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"log"
"mime"
"mime/multipart"
"os"
"path/filepath"
"time"
"github.com/influxdata/influx-cli/v2/api"
"github.com/influxdata/influx-cli/v2/clients"
)
type Client struct {
clients.CLI
api.BackupApi
// Local state tracked across steps in the backup process.
baseName string
bucketMetadata []api.BucketMetadataManifest
manifest Manifest
}
type Params struct {
// Organization containing TSM data to back up.
// If not set, all orgs will be included.
OrgID string
Org string
// Bucket containing TSM data to back up.
// If not set, all buckets within the org filter will be included.
BucketID string
Bucket string
// Path to the directory where backup files should be written.
Path string
// Compression to use for local copies of snapshot files.
Compression FileCompression
}
func (p *Params) matches(bkt api.BucketMetadataManifest) bool {
if p.OrgID != "" && bkt.OrganizationID != p.OrgID {
return false
}
if p.Org != "" && bkt.OrganizationName != p.Org {
return false
}
if p.BucketID != "" && bkt.BucketID != p.BucketID {
return false
}
if p.Bucket != "" && bkt.BucketName != p.Bucket {
return false
}
return true
}
const backupFilenamePattern = "20060102T150405Z"
func (c *Client) Backup(ctx context.Context, params *Params) error {
if err := os.MkdirAll(params.Path, 0777); err != nil {
return err
}
c.baseName = time.Now().UTC().Format(backupFilenamePattern)
if err := c.downloadMetadata(ctx, params); err != nil {
return fmt.Errorf("failed to backup metadata: %w", err)
}
if err := c.downloadBucketData(ctx, params); err != nil {
return fmt.Errorf("failed to backup bucket data: %w", err)
}
if err := c.writeManifest(params); err != nil {
return fmt.Errorf("failed to write backup manifest: %w", err)
}
return nil
}
// downloadMetadata downloads a snapshot of the KV store, SQL DB, and bucket
// manifests from the server. KV and SQL are written to local files. Bucket manifests
// are parsed into a slice for additional processing.
func (c *Client) downloadMetadata(ctx context.Context, params *Params) error {
log.Println("INFO: Downloading metadata snapshot")
rawResp, err := c.GetBackupMetadata(ctx).AcceptEncoding("gzip").Execute()
if err != nil {
return fmt.Errorf("failed to download metadata snapshot: %w", err)
}
kvName := fmt.Sprintf("%s.bolt", c.baseName)
sqlName := fmt.Sprintf("%s.sqlite", c.baseName)
_, contentParams, err := mime.ParseMediaType(rawResp.Header.Get("Content-Type"))
if err != nil {
return err
}
body, err := api.GunzipIfNeeded(rawResp)
if err != nil {
return err
}
defer body.Close()
writeFile := func(from io.Reader, to string) (ManifestFileEntry, error) {
toPath := filepath.Join(params.Path, to)
if params.Compression == GzipCompression {
toPath = toPath + ".gz"
}
// Closure here so we can clean up file resources via `defer` without
// returning from the whole function.
if err := func() error {
out, err := os.OpenFile(toPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer out.Close()
var outW io.Writer = out
if params.Compression == GzipCompression {
gw := gzip.NewWriter(out)
defer gw.Close()
outW = gw
}
_, err = io.Copy(outW, from)
return err
}(); err != nil {
return ManifestFileEntry{}, err
}
fi, err := os.Stat(toPath)
if err != nil {
return ManifestFileEntry{}, err
}
return ManifestFileEntry{
FileName: fi.Name(),
Size: fi.Size(),
Compression: params.Compression,
}, nil
}
mr := multipart.NewReader(body, contentParams["boundary"])
for {
part, err := mr.NextPart()
if err == io.EOF {
break
} else if err != nil {
return err
}
_, partParams, err := mime.ParseMediaType(part.Header.Get("Content-Disposition"))
if err != nil {
return err
}
switch name := partParams["name"]; name {
case "kv":
fi, err := writeFile(part, kvName)
if err != nil {
return fmt.Errorf("failed to save local copy of KV backup to %q: %w", kvName, err)
}
c.manifest.KV = fi
case "sql":
fi, err := writeFile(part, sqlName)
if err != nil {
return fmt.Errorf("failed to save local copy of SQL backup to %q: %w", sqlName, err)
}
c.manifest.SQL = fi
case "buckets":
if err := json.NewDecoder(part).Decode(&c.bucketMetadata); err != nil {
return fmt.Errorf("failed to decode bucket manifest from backup: %w", err)
}
default:
return fmt.Errorf("response contained unexpected part %q", name)
}
}
return nil
}
// downloadBucketData downloads TSM snapshots for each shard in the buckets matching
// the filter parameters provided over the CLI. Snapshots are written to local files.
//
// Bucket metadata must be pre-seeded via downloadMetadata before this method is called.
func (c *Client) downloadBucketData(ctx context.Context, params *Params) error {
c.manifest.Buckets = make([]ManifestBucketEntry, 0, len(c.bucketMetadata))
for _, b := range c.bucketMetadata {
if !params.matches(b) {
continue
}
bktManifest, err := ConvertBucketManifest(b, func(shardId int64) (*ManifestFileEntry, error) {
return c.downloadShardData(ctx, params, shardId)
})
if err != nil {
return err
}
c.manifest.Buckets = append(c.manifest.Buckets, bktManifest)
}
return nil
}
// downloadShardData downloads the TSM snapshot for a single shard. The snapshot is written
// to a local file, and its metadata is returned for aggregation.
func (c Client) downloadShardData(ctx context.Context, params *Params, shardId int64) (*ManifestFileEntry, error) {
log.Printf("INFO: Backing up TSM for shard %d", shardId)
res, err := c.GetBackupShardId(ctx, shardId).AcceptEncoding("gzip").Execute()
if err != nil {
if apiError, ok := err.(api.ApiError); ok {
if apiError.ErrorCode() == api.ERRORCODE_NOT_FOUND {
log.Printf("WARN: Shard %d removed during backup", shardId)
return nil, nil
}
}
return nil, err
}
defer res.Body.Close()
fileName := fmt.Sprintf("%s.%d.tar", c.baseName, shardId)
if params.Compression == GzipCompression {
fileName = fileName + ".gz"
}
path := filepath.Join(params.Path, fileName)
// Closure here so we can clean up file resources via `defer` without
// returning from the whole function.
if err := func() error {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
var inR io.Reader = res.Body
var outW io.Writer = f
// Make sure the locally-written data is compressed according to the user's request.
if params.Compression == GzipCompression && res.Header.Get("Content-Encoding") != "gzip" {
gzw := gzip.NewWriter(outW)
defer gzw.Close()
outW = gzw
}
if params.Compression == NoCompression && res.Header.Get("Content-Encoding") == "gzip" {
gzr, err := gzip.NewReader(inR)
if err != nil {
return err
}
defer gzr.Close()
inR = gzr
}
_, err = io.Copy(outW, inR)
return err
}(); err != nil {
return nil, err
}
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
return &ManifestFileEntry{
FileName: fi.Name(),
Size: fi.Size(),
Compression: params.Compression,
}, nil
}
// writeManifest writes a description of all files downloaded as part of the backup process
// to the backup folder, encoded as JSON.
func (c Client) writeManifest(params *Params) error {
manifestPath := filepath.Join(params.Path, fmt.Sprintf("%s.manifest", c.baseName))
f, err := os.OpenFile(manifestPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
return enc.Encode(c.manifest)
}

View File

@ -0,0 +1,355 @@
package backup
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
"github.com/golang/mock/gomock"
"github.com/influxdata/influx-cli/v2/api"
"github.com/influxdata/influx-cli/v2/clients"
"github.com/influxdata/influx-cli/v2/internal/mock"
"github.com/stretchr/testify/require"
)
func TestBackup_DownloadMetadata(t *testing.T) {
t.Parallel()
fakeKV := strings.Repeat("I'm the bolt DB\n", 1234)
fakeSQL := strings.Repeat("I'm the SQL!\n", 1234)
bucketMetadata := []api.BucketMetadataManifest{
{
OrganizationID: "123",
OrganizationName: "org",
BucketID: "456",
BucketName: "bucket1",
DefaultRetentionPolicy: "foo",
RetentionPolicies: []api.RetentionPolicyManifest{
{Name: "foo"},
{Name: "bar"},
},
},
{
OrganizationID: "123",
OrganizationName: "org",
BucketID: "789",
BucketName: "bucket2",
DefaultRetentionPolicy: "baz",
RetentionPolicies: []api.RetentionPolicyManifest{
{Name: "qux"},
{Name: "baz"},
},
},
}
testCases := []struct {
name string
compression FileCompression
responseCompression FileCompression
}{
{
name: "no gzip",
compression: NoCompression,
responseCompression: NoCompression,
},
{
name: "response gzip, no local gzip",
compression: NoCompression,
responseCompression: GzipCompression,
},
{
name: "no response gzip, local gzip",
compression: GzipCompression,
responseCompression: NoCompression,
},
{
name: "all gzip",
compression: GzipCompression,
responseCompression: GzipCompression,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
backupApi := mock.NewMockBackupApi(ctrl)
backupApi.EXPECT().GetBackupMetadata(gomock.Any()).
Return(api.ApiGetBackupMetadataRequest{ApiService: backupApi})
backupApi.EXPECT().GetBackupMetadataExecute(gomock.Any()).
DoAndReturn(func(request api.ApiGetBackupMetadataRequest) (*http.Response, error) {
out := bytes.Buffer{}
var outW io.Writer = &out
if tc.responseCompression == GzipCompression {
gzw := gzip.NewWriter(outW)
defer gzw.Close()
outW = gzw
}
parts := []struct {
name string
contentType string
writeFn func(io.Writer) error
}{
{
name: "kv",
contentType: "application/octet-stream",
writeFn: func(w io.Writer) error {
_, err := w.Write([]byte(fakeKV))
return err
},
},
{
name: "sql",
contentType: "application/octet-stream",
writeFn: func(w io.Writer) error {
_, err := w.Write([]byte(fakeSQL))
return err
},
},
{
name: "buckets",
contentType: "application/json",
writeFn: func(w io.Writer) error {
enc := json.NewEncoder(w)
return enc.Encode(bucketMetadata)
},
},
}
writer := multipart.NewWriter(outW)
for _, part := range parts {
pw, err := writer.CreatePart(map[string][]string{
"Content-Type": {part.contentType},
"Content-Disposition": {fmt.Sprintf("attachment; name=%s", part.name)},
})
require.NoError(t, err)
require.NoError(t, part.writeFn(pw))
}
require.NoError(t, writer.Close())
res := http.Response{Header: http.Header{}, Body: ioutil.NopCloser(&out)}
res.Header.Add("Content-Type", fmt.Sprintf("multipart/mixed; boundary=%s", writer.Boundary()))
if tc.responseCompression == GzipCompression {
res.Header.Add("Content-Encoding", "gzip")
}
return &res, nil
})
stdio := mock.NewMockStdIO(ctrl)
writtenBytes := bytes.Buffer{}
stdio.EXPECT().Write(gomock.Any()).DoAndReturn(writtenBytes.Write).AnyTimes()
log.SetOutput(stdio)
cli := Client{
CLI: clients.CLI{StdIO: stdio},
BackupApi: backupApi,
baseName: "test",
}
out, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(out)
params := Params{
Path: out,
Compression: tc.compression,
}
require.NoError(t, cli.downloadMetadata(context.Background(), &params))
require.Equal(t, bucketMetadata, cli.bucketMetadata)
localKv, err := os.Open(filepath.Join(out, cli.manifest.KV.FileName))
require.NoError(t, err)
defer localKv.Close()
var kvReader io.Reader = localKv
if tc.compression == GzipCompression {
gzr, err := gzip.NewReader(kvReader)
require.NoError(t, err)
defer gzr.Close()
kvReader = gzr
}
kvBytes, err := ioutil.ReadAll(kvReader)
require.NoError(t, err)
require.Equal(t, fakeKV, string(kvBytes))
localSql, err := os.Open(filepath.Join(out, cli.manifest.SQL.FileName))
require.NoError(t, err)
defer localSql.Close()
var sqlReader io.Reader = localSql
if tc.compression == GzipCompression {
gzr, err := gzip.NewReader(sqlReader)
require.NoError(t, err)
defer gzr.Close()
sqlReader = gzr
}
sqlBytes, err := ioutil.ReadAll(sqlReader)
require.NoError(t, err)
require.Equal(t, fakeSQL, string(sqlBytes))
})
}
}
func TestBackup_DownloadShardData(t *testing.T) {
t.Parallel()
fakeTsm := strings.Repeat("Time series data!\n", 1024)
testCases := []struct {
name string
compression FileCompression
responseCompression FileCompression
}{
{
name: "no gzip",
compression: NoCompression,
responseCompression: NoCompression,
},
{
name: "response gzip, no local gzip",
compression: NoCompression,
responseCompression: GzipCompression,
},
{
name: "no response gzip, local gzip",
compression: GzipCompression,
responseCompression: NoCompression,
},
{
name: "all gzip",
compression: GzipCompression,
responseCompression: GzipCompression,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
backupApi := mock.NewMockBackupApi(ctrl)
req := api.ApiGetBackupShardIdRequest{ApiService: backupApi}.ShardID(1)
backupApi.EXPECT().GetBackupShardId(gomock.Any(), gomock.Eq(req.GetShardID())).Return(req)
backupApi.EXPECT().GetBackupShardIdExecute(gomock.Any()).
DoAndReturn(func(api.ApiGetBackupShardIdRequest) (*http.Response, error) {
out := bytes.Buffer{}
var outW io.Writer = &out
if tc.responseCompression == GzipCompression {
gzw := gzip.NewWriter(outW)
defer gzw.Close()
outW = gzw
}
_, err := outW.Write([]byte(fakeTsm))
require.NoError(t, err)
res := http.Response{Header: http.Header{}, Body: ioutil.NopCloser(&out)}
res.Header.Add("Content-Type", "application/octet-stream")
if tc.responseCompression == GzipCompression {
res.Header.Add("Content-Encoding", "gzip")
}
return &res, nil
})
stdio := mock.NewMockStdIO(ctrl)
writtenBytes := bytes.Buffer{}
stdio.EXPECT().Write(gomock.Any()).DoAndReturn(writtenBytes.Write).AnyTimes()
log.SetOutput(stdio)
cli := Client{
CLI: clients.CLI{StdIO: stdio},
BackupApi: backupApi,
baseName: "test",
}
out, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(out)
params := Params{
Path: out,
Compression: tc.compression,
}
metadata, err := cli.downloadShardData(context.Background(), &params, req.GetShardID())
require.NoError(t, err)
require.NotNil(t, metadata)
localShard, err := os.Open(filepath.Join(out, metadata.FileName))
require.NoError(t, err)
defer localShard.Close()
var shardReader io.Reader = localShard
if tc.compression == GzipCompression {
gzr, err := gzip.NewReader(shardReader)
require.NoError(t, err)
defer gzr.Close()
shardReader = gzr
}
shardBytes, err := ioutil.ReadAll(shardReader)
require.NoError(t, err)
require.Equal(t, fakeTsm, string(shardBytes))
})
}
t.Run("shard deleted", func(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
backupApi := mock.NewMockBackupApi(ctrl)
req := api.ApiGetBackupShardIdRequest{ApiService: backupApi}.ShardID(1)
backupApi.EXPECT().GetBackupShardId(gomock.Any(), gomock.Eq(req.GetShardID())).Return(req)
backupApi.EXPECT().GetBackupShardIdExecute(gomock.Any()).Return(nil, &notFoundErr{})
stdio := mock.NewMockStdIO(ctrl)
writtenBytes := bytes.Buffer{}
stdio.EXPECT().Write(gomock.Any()).DoAndReturn(writtenBytes.Write).AnyTimes()
log.SetOutput(stdio)
cli := Client{
CLI: clients.CLI{StdIO: stdio},
BackupApi: backupApi,
baseName: "test",
}
out, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(out)
params := Params{
Path: out,
}
metadata, err := cli.downloadShardData(context.Background(), &params, req.GetShardID())
require.NoError(t, err)
require.Nil(t, metadata)
require.Contains(t, writtenBytes.String(), fmt.Sprintf("WARN: Shard %d removed during backup", req.GetShardID()))
})
}
type notFoundErr struct{}
func (e *notFoundErr) Error() string {
return "not found"
}
func (e *notFoundErr) ErrorCode() api.ErrorCode {
return api.ERRORCODE_NOT_FOUND
}

192
clients/backup/manifest.go Normal file
View File

@ -0,0 +1,192 @@
package backup
import (
"fmt"
"time"
"github.com/influxdata/influx-cli/v2/api"
)
type FileCompression int
const (
NoCompression FileCompression = iota
GzipCompression
)
func (c *FileCompression) Set(v string) error {
switch v {
case "none":
*c = NoCompression
case "gzip":
*c = GzipCompression
default:
return fmt.Errorf("unsupported format: %q", v)
}
return nil
}
func (c FileCompression) String() string {
switch c {
case NoCompression:
return "none"
case GzipCompression:
return "gzip"
default:
panic("Impossible!")
}
}
type Manifest struct {
KV ManifestFileEntry `json:"kv"`
SQL ManifestFileEntry `json:"sql"`
Buckets []ManifestBucketEntry `json:"buckets"`
}
type ManifestFileEntry struct {
FileName string `json:"fileName"`
Size int64 `json:"size"`
Compression FileCompression `json:"compression"`
}
func ConvertBucketManifest(manifest api.BucketMetadataManifest, getShard func(shardId int64) (*ManifestFileEntry, error)) (ManifestBucketEntry, error) {
m := ManifestBucketEntry{
OrganizationID: manifest.OrganizationID,
OrganizationName: manifest.OrganizationName,
BucketID: manifest.BucketID,
BucketName: manifest.BucketName,
DefaultRetentionPolicy: manifest.DefaultRetentionPolicy,
RetentionPolicies: make([]ManifestRetentionPolicy, len(manifest.RetentionPolicies)),
}
for i, rp := range manifest.RetentionPolicies {
var err error
m.RetentionPolicies[i], err = ConvertRetentionPolicy(rp, getShard)
if err != nil {
return ManifestBucketEntry{}, err
}
}
return m, nil
}
func ConvertRetentionPolicy(manifest api.RetentionPolicyManifest, getShard func(shardId int64) (*ManifestFileEntry, error)) (ManifestRetentionPolicy, error) {
m := ManifestRetentionPolicy{
Name: manifest.Name,
ReplicaN: manifest.ReplicaN,
Duration: manifest.Duration,
ShardGroupDuration: manifest.ShardGroupDuration,
ShardGroups: make([]ManifestShardGroup, len(manifest.ShardGroups)),
Subscriptions: make([]ManifestSubscription, len(manifest.Subscriptions)),
}
for i, sg := range manifest.ShardGroups {
var err error
m.ShardGroups[i], err = ConvertShardGroup(sg, getShard)
if err != nil {
return ManifestRetentionPolicy{}, err
}
}
for i, s := range manifest.Subscriptions {
m.Subscriptions[i] = ManifestSubscription{
Name: s.Name,
Mode: s.Mode,
Destinations: s.Destinations,
}
}
return m, nil
}
func ConvertShardGroup(manifest api.ShardGroupManifest, getShard func(shardId int64) (*ManifestFileEntry, error)) (ManifestShardGroup, error) {
m := ManifestShardGroup{
ID: manifest.Id,
StartTime: manifest.StartTime,
EndTime: manifest.EndTime,
DeletedAt: manifest.DeletedAt,
TruncatedAt: manifest.TruncatedAt,
Shards: make([]ManifestShardEntry, 0, len(manifest.Shards)),
}
for _, sh := range manifest.Shards {
maybeShard, err := ConvertShard(sh, getShard)
if err != nil {
return ManifestShardGroup{}, err
}
// Shard deleted mid-backup.
if maybeShard == nil {
continue
}
m.Shards = append(m.Shards, *maybeShard)
}
return m, nil
}
func ConvertShard(manifest api.ShardManifest, getShard func(shardId int64) (*ManifestFileEntry, error)) (*ManifestShardEntry, error) {
shardFileInfo, err := getShard(manifest.Id)
if err != nil {
return nil, fmt.Errorf("failed to download snapshot of shard %d: %w", manifest.Id, err)
}
if shardFileInfo == nil {
return nil, nil
}
m := ManifestShardEntry{
ID: manifest.Id,
ShardOwners: make([]ShardOwner, len(manifest.ShardOwners)),
ManifestFileEntry: *shardFileInfo,
}
for i, o := range manifest.ShardOwners {
m.ShardOwners[i] = ShardOwner{
NodeID: o.NodeID,
}
}
return &m, nil
}
type ManifestBucketEntry struct {
OrganizationID string `json:"organizationID"`
OrganizationName string `json:"organizationName"`
BucketID string `json:"bucketID"`
BucketName string `json:"bucketName"`
DefaultRetentionPolicy string `json:"defaultRetentionPolicy"`
RetentionPolicies []ManifestRetentionPolicy `json:"retentionPolicies"`
}
type ManifestRetentionPolicy struct {
Name string `json:"name"`
ReplicaN int32 `json:"replicaN"`
Duration int64 `json:"duration"`
ShardGroupDuration int64 `json:"shardGroupDuration"`
ShardGroups []ManifestShardGroup `json:"shardGroups"`
Subscriptions []ManifestSubscription `json:"subscriptions"`
}
type ManifestShardGroup struct {
ID int64 `json:"id"`
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
DeletedAt *time.Time `json:"deletedAt,omitempty"`
TruncatedAt *time.Time `json:"truncatedAt,omitempty"`
Shards []ManifestShardEntry `json:"shards"`
}
type ManifestShardEntry struct {
ID int64 `json:"id"`
ShardOwners []ShardOwner `json:"shardOwners"`
ManifestFileEntry
}
type ShardOwner struct {
NodeID int64 `json:"nodeID"`
}
type ManifestSubscription struct {
Name string `json:"name"`
Mode string `json:"mode"`
Destinations []string `json:"destinations"`
}

View File

@ -0,0 +1,184 @@
package backup_test
import (
"fmt"
"testing"
"time"
"github.com/influxdata/influx-cli/v2/api"
"github.com/influxdata/influx-cli/v2/clients/backup"
"github.com/stretchr/testify/require"
)
func TestConvertBucketManifest(t *testing.T) {
t.Parallel()
now := time.Now()
manifest := api.BucketMetadataManifest{
OrganizationID: "123",
OrganizationName: "org",
BucketID: "456",
BucketName: "bucket",
DefaultRetentionPolicy: "foo",
RetentionPolicies: []api.RetentionPolicyManifest{
{
Name: "foo",
ReplicaN: 1,
Duration: 100,
ShardGroupDuration: 10,
ShardGroups: []api.ShardGroupManifest{
{
Id: 1,
StartTime: now,
EndTime: now,
TruncatedAt: &now,
Shards: []api.ShardManifest{
{
Id: 10,
ShardOwners: []api.ShardOwner{{NodeID: 1}},
},
{
Id: 20,
ShardOwners: []api.ShardOwner{{NodeID: 2}, {NodeID: 3}},
},
},
},
{
Id: 2,
StartTime: now,
EndTime: now,
DeletedAt: &now,
Shards: []api.ShardManifest{
{
Id: 30,
},
},
},
},
Subscriptions: []api.SubscriptionManifest{},
},
{
Name: "bar",
ReplicaN: 3,
Duration: 9999,
ShardGroupDuration: 1,
ShardGroups: []api.ShardGroupManifest{
{
Id: 3,
StartTime: now,
EndTime: now,
Shards: []api.ShardManifest{},
},
},
Subscriptions: []api.SubscriptionManifest{
{
Name: "test",
Mode: "on",
Destinations: []string{"here", "there", "everywhere"},
},
{
Name: "test2",
Mode: "off",
Destinations: []string{},
},
},
},
},
}
fakeGetShard := func(id int64) (*backup.ManifestFileEntry, error) {
if id == 20 {
return nil, nil
}
return &backup.ManifestFileEntry{
FileName: fmt.Sprintf("%d.gz", id),
Size: id * 100,
Compression: backup.GzipCompression,
}, nil
}
converted, err := backup.ConvertBucketManifest(manifest, fakeGetShard)
require.NoError(t, err)
expected := backup.ManifestBucketEntry{
OrganizationID: "123",
OrganizationName: "org",
BucketID: "456",
BucketName: "bucket",
DefaultRetentionPolicy: "foo",
RetentionPolicies: []backup.ManifestRetentionPolicy{
{
Name: "foo",
ReplicaN: 1,
Duration: 100,
ShardGroupDuration: 10,
ShardGroups: []backup.ManifestShardGroup{
{
ID: 1,
StartTime: now,
EndTime: now,
TruncatedAt: &now,
Shards: []backup.ManifestShardEntry{
{
ID: 10,
ShardOwners: []backup.ShardOwner{{NodeID: 1}},
ManifestFileEntry: backup.ManifestFileEntry{
FileName: "10.gz",
Size: 1000,
Compression: backup.GzipCompression,
},
},
},
},
{
ID: 2,
StartTime: now,
EndTime: now,
DeletedAt: &now,
Shards: []backup.ManifestShardEntry{
{
ID: 30,
ShardOwners: []backup.ShardOwner{},
ManifestFileEntry: backup.ManifestFileEntry{
FileName: "30.gz",
Size: 3000,
Compression: backup.GzipCompression,
},
},
},
},
},
Subscriptions: []backup.ManifestSubscription{},
},
{
Name: "bar",
ReplicaN: 3,
Duration: 9999,
ShardGroupDuration: 1,
ShardGroups: []backup.ManifestShardGroup{
{
ID: 3,
StartTime: now,
EndTime: now,
Shards: []backup.ManifestShardEntry{},
},
},
Subscriptions: []backup.ManifestSubscription{
{
Name: "test",
Mode: "on",
Destinations: []string{"here", "there", "everywhere"},
},
{
Name: "test2",
Mode: "off",
Destinations: []string{},
},
},
},
},
}
require.Equal(t, expected, converted)
}

72
cmd/influx/backup.go Normal file
View File

@ -0,0 +1,72 @@
package main
import (
"errors"
"github.com/influxdata/influx-cli/v2/clients/backup"
"github.com/influxdata/influx-cli/v2/pkg/cli/middleware"
"github.com/urfave/cli/v2"
)
func newBackupCmd() *cli.Command {
var params backup.Params
// Default to gzipping local files.
params.Compression = backup.GzipCompression
return &cli.Command{
Name: "backup",
Usage: "Backup database",
Description: `Backs up InfluxDB to a directory
Examples:
# backup all data
influx backup /path/to/backup
`,
ArgsUsage: "path",
Before: middleware.WithBeforeFns(withCli(), withApi(true)),
Flags: append(
commonFlagsNoPrint(),
&cli.StringFlag{
Name: "org-id",
Usage: "The ID of the organization",
EnvVars: []string{"INFLUX_ORG_ID"},
Destination: &params.OrgID,
},
&cli.StringFlag{
Name: "org",
Usage: "The name of the organization",
Aliases: []string{"o"},
EnvVars: []string{"INFLUX_ORG"},
Destination: &params.Org,
},
&cli.StringFlag{
Name: "bucket-id",
Usage: "The ID of the bucket to backup",
Destination: &params.BucketID,
},
&cli.StringFlag{
Name: "bucket",
Usage: "The name of the bucket to backup",
Aliases: []string{"b"},
Destination: &params.Bucket,
},
&cli.GenericFlag{
Name: "compression",
Usage: "Compression to use for local backup files, either 'none' or 'gzip'",
Value: &params.Compression,
},
),
Action: func(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("backup path must be specified as a single positional argument")
}
params.Path = ctx.Args().Get(0)
client := backup.Client{
CLI: getCLI(ctx),
BackupApi: getAPI(ctx).BackupApi,
}
return client.Backup(ctx.Context, &params)
},
}
}

View File

@ -44,6 +44,7 @@ var app = cli.App{
newDeleteCmd(),
newUserCmd(),
newTaskCommand(),
newBackupCmd(),
},
}

View File

@ -0,0 +1,95 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/influxdata/influx-cli/v2/api (interfaces: BackupApi)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
http "net/http"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
api "github.com/influxdata/influx-cli/v2/api"
)
// MockBackupApi is a mock of BackupApi interface.
type MockBackupApi struct {
ctrl *gomock.Controller
recorder *MockBackupApiMockRecorder
}
// MockBackupApiMockRecorder is the mock recorder for MockBackupApi.
type MockBackupApiMockRecorder struct {
mock *MockBackupApi
}
// NewMockBackupApi creates a new mock instance.
func NewMockBackupApi(ctrl *gomock.Controller) *MockBackupApi {
mock := &MockBackupApi{ctrl: ctrl}
mock.recorder = &MockBackupApiMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockBackupApi) EXPECT() *MockBackupApiMockRecorder {
return m.recorder
}
// GetBackupMetadata mocks base method.
func (m *MockBackupApi) GetBackupMetadata(arg0 context.Context) api.ApiGetBackupMetadataRequest {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBackupMetadata", arg0)
ret0, _ := ret[0].(api.ApiGetBackupMetadataRequest)
return ret0
}
// GetBackupMetadata indicates an expected call of GetBackupMetadata.
func (mr *MockBackupApiMockRecorder) GetBackupMetadata(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackupMetadata", reflect.TypeOf((*MockBackupApi)(nil).GetBackupMetadata), arg0)
}
// GetBackupMetadataExecute mocks base method.
func (m *MockBackupApi) GetBackupMetadataExecute(arg0 api.ApiGetBackupMetadataRequest) (*http.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBackupMetadataExecute", arg0)
ret0, _ := ret[0].(*http.Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetBackupMetadataExecute indicates an expected call of GetBackupMetadataExecute.
func (mr *MockBackupApiMockRecorder) GetBackupMetadataExecute(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackupMetadataExecute", reflect.TypeOf((*MockBackupApi)(nil).GetBackupMetadataExecute), arg0)
}
// GetBackupShardId mocks base method.
func (m *MockBackupApi) GetBackupShardId(arg0 context.Context, arg1 int64) api.ApiGetBackupShardIdRequest {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBackupShardId", arg0, arg1)
ret0, _ := ret[0].(api.ApiGetBackupShardIdRequest)
return ret0
}
// GetBackupShardId indicates an expected call of GetBackupShardId.
func (mr *MockBackupApiMockRecorder) GetBackupShardId(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackupShardId", reflect.TypeOf((*MockBackupApi)(nil).GetBackupShardId), arg0, arg1)
}
// GetBackupShardIdExecute mocks base method.
func (m *MockBackupApi) GetBackupShardIdExecute(arg0 api.ApiGetBackupShardIdRequest) (*http.Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBackupShardIdExecute", arg0)
ret0, _ := ret[0].(*http.Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetBackupShardIdExecute indicates an expected call of GetBackupShardIdExecute.
func (mr *MockBackupApiMockRecorder) GetBackupShardIdExecute(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackupShardIdExecute", reflect.TypeOf((*MockBackupApi)(nil).GetBackupShardIdExecute), arg0)
}

View File

@ -10,6 +10,7 @@ package mock
//go:generate go run github.com/golang/mock/mockgen -package mock -destination api_query.gen.go github.com/influxdata/influx-cli/v2/api QueryApi
//go:generate go run github.com/golang/mock/mockgen -package mock -destination api_users.gen.go github.com/influxdata/influx-cli/v2/api UsersApi
//go:generate go run github.com/golang/mock/mockgen -package mock -destination api_delete.gen.go github.com/influxdata/influx-cli/v2/api DeleteApi
//go:generate go run github.com/golang/mock/mockgen -package mock -destination api_backup.gen.go github.com/influxdata/influx-cli/v2/api BackupApi
// Other mocks
//go:generate go run github.com/golang/mock/mockgen -package mock -destination config.gen.go -mock_names Service=MockConfigService github.com/influxdata/influx-cli/v2/internal/config Service