feat: add virtual column to DBRP printing (#435)

* feat: add virtual column to DBRP printing

* fix: update DBRP tests with new virtual column

* chore: update to latest openapi
This commit is contained in:
Andrew Lee 2022-08-03 16:14:07 -06:00 committed by GitHub
parent fbbe9743f2
commit 7bdad28ee0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 144 additions and 65 deletions

View File

@ -28,10 +28,14 @@ var (
type BackupApi interface {
/*
* GetBackupKV Download snapshot of metadata stored in the server's embedded KV store. Should not be used in versions greater than 2.1.x, as it doesn't include metadata stored in embedded SQL.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiGetBackupKVRequest
*/
* GetBackupKV Download snapshot of metadata stored in the server's embedded KV store. Don't use with InfluxDB versions greater than InfluxDB 2.1.x.
* Retrieves a snapshot of metadata stored in the server's embedded KV store.
InfluxDB versions greater than 2.1.x don't include metadata stored in embedded SQL;
avoid using this endpoint with versions greater than 2.1.x.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiGetBackupKVRequest
*/
GetBackupKV(ctx _context.Context) ApiGetBackupKVRequest
/*
@ -118,10 +122,14 @@ func (r ApiGetBackupKVRequest) ExecuteWithHttpInfo() (*_nethttp.Response, *_neth
}
/*
* GetBackupKV Download snapshot of metadata stored in the server's embedded KV store. Should not be used in versions greater than 2.1.x, as it doesn't include metadata stored in embedded SQL.
* GetBackupKV Download snapshot of metadata stored in the server's embedded KV store. Don't use with InfluxDB versions greater than InfluxDB 2.1.x.
* Retrieves a snapshot of metadata stored in the server's embedded KV store.
InfluxDB versions greater than 2.1.x don't include metadata stored in embedded SQL;
avoid using this endpoint with versions greater than 2.1.x.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @return ApiGetBackupKVRequest
*/
*/
func (a *BackupApiService) GetBackupKV(ctx _context.Context) ApiGetBackupKVRequest {
return ApiGetBackupKVRequest{
ApiService: a,

View File

@ -112,10 +112,10 @@ type TasksApi interface {
/*
* GetTasksID Retrieve a task
* Retrieves a [task]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#task)
by task ID.
by ID.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID The task ID.
* @param taskID The ID of the task to retrieve.
* @return ApiGetTasksIDRequest
*/
GetTasksID(ctx _context.Context, taskID string) ApiGetTasksIDRequest
@ -165,11 +165,16 @@ type TasksApi interface {
GetTasksIDLogsExecuteWithHttpInfo(r ApiGetTasksIDLogsRequest) (Logs, *_nethttp.Response, error)
/*
* GetTasksIDRuns List runs for a task
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID The ID of the task to get runs for.
* @return ApiGetTasksIDRunsRequest
*/
* GetTasksIDRuns List runs for a task
* Retrieves a list of runs for a [task]({{% INFLUXDB_DOCS_URL %}}/process-data/).
To limit which task runs are returned, pass query parameters in your request.
If no query parameters are passed, InfluxDB returns all task runs up to the default `limit`.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID The ID of the task to get runs for. Only returns runs for this task.
* @return ApiGetTasksIDRunsRequest
*/
GetTasksIDRuns(ctx _context.Context, taskID string) ApiGetTasksIDRunsRequest
/*
@ -216,8 +221,8 @@ type TasksApi interface {
/*
* GetTasksIDRunsIDLogs Retrieve all logs for a run
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID ID of task to get logs for.
* @param runID ID of run to get logs for.
* @param taskID The ID of the task to get logs for.
* @param runID The ID of the run to get logs for.
* @return ApiGetTasksIDRunsIDLogsRequest
*/
GetTasksIDRunsIDLogs(ctx _context.Context, taskID string, runID string) ApiGetTasksIDRunsIDLogsRequest
@ -241,7 +246,7 @@ type TasksApi interface {
* Updates a task and then cancels all scheduled runs of the task.
Use this endpoint to modify task properties (for example: `cron`, `name`, `flux`, `status`).
Once InfluxDB applies the update, it cancels all scheduled runs of the task.
Once InfluxDB applies the update, it cancels all previously scheduled runs of the task.
To update a task, pass an object that contains the updated key-value pairs.
To activate or inactivate a task, set the `status` property.
@ -283,7 +288,8 @@ type TasksApi interface {
#### Related guides
- [Create a task]({{% INFLUXDB_DOCS_URL %}}/process-data/manage-tasks/create-task/).
- [Get started with tasks]({{% INFLUXDB_DOCS_URL %}}/process-data/get-started/)
- [Create a task]({{% INFLUXDB_DOCS_URL %}}/process-data/manage-tasks/create-task/)
- [Common tasks]({{% INFLUXDB_DOCS_URL %}}/process-data/common-tasks/)
- [Task configuration options]({{% INFLUXDB_DOCS_URL %}}/process-data/task-options/)
@ -307,11 +313,15 @@ type TasksApi interface {
PostTasksExecuteWithHttpInfo(r ApiPostTasksRequest) (Task, *_nethttp.Response, error)
/*
* PostTasksIDRuns Manually start a task run, overriding the current schedule
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID
* @return ApiPostTasksIDRunsRequest
*/
* PostTasksIDRuns Start a task run, overriding the schedule
* Schedules a task run to start immediately, ignoring scheduled runs.
Use this endpoint to manually start a task run.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID
* @return ApiPostTasksIDRunsRequest
*/
PostTasksIDRuns(ctx _context.Context, taskID string) ApiPostTasksIDRunsRequest
/*
@ -1010,10 +1020,10 @@ func (r ApiGetTasksIDRequest) ExecuteWithHttpInfo() (Task, *_nethttp.Response, e
/*
* GetTasksID Retrieve a task
* Retrieves a [task]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#task)
by task ID.
by ID.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID The task ID.
* @param taskID The ID of the task to retrieve.
* @return ApiGetTasksIDRequest
*/
func (a *TasksApiService) GetTasksID(ctx _context.Context, taskID string) ApiGetTasksIDRequest {
@ -1395,10 +1405,15 @@ func (r ApiGetTasksIDRunsRequest) ExecuteWithHttpInfo() (Runs, *_nethttp.Respons
/*
* GetTasksIDRuns List runs for a task
* Retrieves a list of runs for a [task]({{% INFLUXDB_DOCS_URL %}}/process-data/).
To limit which task runs are returned, pass query parameters in your request.
If no query parameters are passed, InfluxDB returns all task runs up to the default `limit`.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID The ID of the task to get runs for.
* @param taskID The ID of the task to get runs for. Only returns runs for this task.
* @return ApiGetTasksIDRunsRequest
*/
*/
func (a *TasksApiService) GetTasksIDRuns(ctx _context.Context, taskID string) ApiGetTasksIDRunsRequest {
return ApiGetTasksIDRunsRequest{
ApiService: a,
@ -1761,8 +1776,8 @@ func (r ApiGetTasksIDRunsIDLogsRequest) ExecuteWithHttpInfo() (Logs, *_nethttp.R
/*
* GetTasksIDRunsIDLogs Retrieve all logs for a run
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID ID of task to get logs for.
* @param runID ID of run to get logs for.
* @param taskID The ID of the task to get logs for.
* @param runID The ID of the run to get logs for.
* @return ApiGetTasksIDRunsIDLogsRequest
*/
func (a *TasksApiService) GetTasksIDRunsIDLogs(ctx _context.Context, taskID string, runID string) ApiGetTasksIDRunsIDLogsRequest {
@ -1939,7 +1954,7 @@ func (r ApiPatchTasksIDRequest) ExecuteWithHttpInfo() (Task, *_nethttp.Response,
* Updates a task and then cancels all scheduled runs of the task.
Use this endpoint to modify task properties (for example: `cron`, `name`, `flux`, `status`).
Once InfluxDB applies the update, it cancels all scheduled runs of the task.
Once InfluxDB applies the update, it cancels all previously scheduled runs of the task.
To update a task, pass an object that contains the updated key-value pairs.
To activate or inactivate a task, set the `status` property.
@ -2128,7 +2143,8 @@ In your task, provide one of the following:
#### Related guides
- [Create a task]({{% INFLUXDB_DOCS_URL %}}/process-data/manage-tasks/create-task/).
- [Get started with tasks]({{% INFLUXDB_DOCS_URL %}}/process-data/get-started/)
- [Create a task]({{% INFLUXDB_DOCS_URL %}}/process-data/manage-tasks/create-task/)
- [Common tasks]({{% INFLUXDB_DOCS_URL %}}/process-data/common-tasks/)
- [Task configuration options]({{% INFLUXDB_DOCS_URL %}}/process-data/task-options/)
@ -2232,6 +2248,17 @@ func (a *TasksApiService) PostTasksExecuteWithHttpInfo(r ApiPostTasksRequest) (T
}
newErr.body = localVarBody
newErr.error = localVarHTTPResponse.Status
if localVarHTTPResponse.StatusCode == 400 {
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
newErr.error = _fmt.Sprintf("%s: %s", newErr.Error(), err.Error())
return localVarReturnValue, localVarHTTPResponse, newErr
}
v.SetMessage(_fmt.Sprintf("%s: %s", newErr.Error(), v.GetMessage()))
newErr.model = &v
return localVarReturnValue, localVarHTTPResponse, newErr
}
var v Error
err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type"))
if err != nil {
@ -2306,11 +2333,15 @@ func (r ApiPostTasksIDRunsRequest) ExecuteWithHttpInfo() (Run, *_nethttp.Respons
}
/*
* PostTasksIDRuns Manually start a task run, overriding the current schedule
* PostTasksIDRuns Start a task run, overriding the schedule
* Schedules a task run to start immediately, ignoring scheduled runs.
Use this endpoint to manually start a task run.
* @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param taskID
* @return ApiPostTasksIDRunsRequest
*/
*/
func (a *TasksApiService) PostTasksIDRuns(ctx _context.Context, taskID string) ApiPostTasksIDRunsRequest {
return ApiPostTasksIDRunsRequest{
ApiService: a,

@ -1 +1 @@
Subproject commit 3fdd5849c90ef9b7b062d8449616f1770b1a7b6b
Subproject commit 0df6358a78262bcf33edd35bb242dce2a9744ea1

View File

@ -42,10 +42,11 @@ const (
RESOURCEENUMOSS_ANNOTATIONS ResourceEnumOSS = "annotations"
RESOURCEENUMOSS_REMOTES ResourceEnumOSS = "remotes"
RESOURCEENUMOSS_REPLICATIONS ResourceEnumOSS = "replications"
RESOURCEENUMOSS_INSTANCE ResourceEnumOSS = "instance"
)
func ResourceEnumOSSValues() []ResourceEnumOSS {
return []ResourceEnumOSS{"authorizations", "buckets", "dashboards", "orgs", "sources", "tasks", "telegrafs", "users", "variables", "scrapers", "secrets", "labels", "views", "documents", "notificationRules", "notificationEndpoints", "checks", "dbrp", "notebooks", "annotations", "remotes", "replications"}
return []ResourceEnumOSS{"authorizations", "buckets", "dashboards", "orgs", "sources", "tasks", "telegrafs", "users", "variables", "scrapers", "secrets", "labels", "views", "documents", "notificationRules", "notificationEndpoints", "checks", "dbrp", "notebooks", "annotations", "remotes", "replications", "instance"}
}
func (v *ResourceEnumOSS) UnmarshalJSON(src []byte) error {
@ -55,7 +56,7 @@ func (v *ResourceEnumOSS) UnmarshalJSON(src []byte) error {
return err
}
enumTypeValue := ResourceEnumOSS(value)
for _, existing := range []ResourceEnumOSS{"authorizations", "buckets", "dashboards", "orgs", "sources", "tasks", "telegrafs", "users", "variables", "scrapers", "secrets", "labels", "views", "documents", "notificationRules", "notificationEndpoints", "checks", "dbrp", "notebooks", "annotations", "remotes", "replications"} {
for _, existing := range []ResourceEnumOSS{"authorizations", "buckets", "dashboards", "orgs", "sources", "tasks", "telegrafs", "users", "variables", "scrapers", "secrets", "labels", "views", "documents", "notificationRules", "notificationEndpoints", "checks", "dbrp", "notebooks", "annotations", "remotes", "replications", "instance"} {
if existing == enumTypeValue {
*v = enumTypeValue
return nil

View File

@ -27,7 +27,9 @@ type DBRP struct {
// InfluxDB v1 retention policy
RetentionPolicy string `json:"retention_policy" yaml:"retention_policy"`
// Mapping represents the default retention policy for the database specified.
Default bool `json:"default" yaml:"default"`
Default bool `json:"default" yaml:"default"`
// Indicates an autogenerated, virtual mapping based on the bucket name. Currently only available in OSS.
Virtual *bool `json:"virtual,omitempty" yaml:"virtual,omitempty"`
Links *Links `json:"links,omitempty" yaml:"links,omitempty"`
}
@ -198,6 +200,38 @@ func (o *DBRP) SetDefault(v bool) {
o.Default = v
}
// GetVirtual returns the Virtual field value if set, zero value otherwise.
func (o *DBRP) GetVirtual() bool {
if o == nil || o.Virtual == nil {
var ret bool
return ret
}
return *o.Virtual
}
// GetVirtualOk returns a tuple with the Virtual field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *DBRP) GetVirtualOk() (*bool, bool) {
if o == nil || o.Virtual == nil {
return nil, false
}
return o.Virtual, true
}
// HasVirtual returns a boolean if a field has been set.
func (o *DBRP) HasVirtual() bool {
if o != nil && o.Virtual != nil {
return true
}
return false
}
// SetVirtual gets a reference to the given bool and assigns it to the Virtual field.
func (o *DBRP) SetVirtual(v bool) {
o.Virtual = &v
}
// GetLinks returns the Links field value if set, zero value otherwise.
func (o *DBRP) GetLinks() Links {
if o == nil || o.Links == nil {
@ -250,6 +284,9 @@ func (o DBRP) MarshalJSON() ([]byte, error) {
if true {
toSerialize["default"] = o.Default
}
if o.Virtual != nil {
toSerialize["virtual"] = o.Virtual
}
if o.Links != nil {
toSerialize["links"] = o.Links
}

View File

@ -17,9 +17,9 @@ import (
// DeletePredicateRequest The delete predicate request.
type DeletePredicateRequest struct {
// A timestamp ([RFC3339 date/time format](https://docs.influxdata.com/flux/v0.x/data-types/basic/time/#time-syntax)).
// A timestamp ([RFC3339 date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339-timestamp)). The earliest time to delete from.
Start time.Time `json:"start" yaml:"start"`
// A timestamp ([RFC3339 date/time format](https://docs.influxdata.com/flux/v0.x/data-types/basic/time/#time-syntax)).
// A timestamp ([RFC3339 date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339-timestamp)). The latest time to delete from.
Stop time.Time `json:"stop" yaml:"stop"`
// An expression in [delete predicate syntax]({{% INFLUXDB_DOCS_URL %}}/reference/syntax/delete-predicate/).
Predicate *string `json:"predicate,omitempty" yaml:"predicate,omitempty"`

View File

@ -14,17 +14,17 @@ import (
"encoding/json"
)
// Dialect Dialect are options to change the default CSV output format; https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions
// Dialect Options for tabular data output. Default output is [annotated CSV]({{% INFLUXDB_DOCS_URL %}}/reference/syntax/annotated-csv/#csv-response-format) with headers. For more information about tabular data **dialect**, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions).
type Dialect struct {
// If true, the results will contain a header row
// If true, the results contain a header row.
Header *bool `json:"header,omitempty" yaml:"header,omitempty"`
// Separator between cells; the default is ,
// The separator used between cells. Default is a comma (`,`).
Delimiter *string `json:"delimiter,omitempty" yaml:"delimiter,omitempty"`
// https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns
// Annotation rows to include in the results. An _annotation_ is metadata associated with an object (column) in the data model. #### Related guides - See [Annotated CSV annotations]({{% INFLUXDB_DOCS_URL %}}/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns).
Annotations *[]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
// Character prefixed to comment strings
// The character prefixed to comment strings. Default is a number sign (`#`).
CommentPrefix *string `json:"commentPrefix,omitempty" yaml:"commentPrefix,omitempty"`
// Format of timestamps
// The format for timestamps in results. Default is [`RFC3339` date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. #### Example formatted date/time values | Format | Value | |:------------|:----------------------------| | `RFC3339` | `\"2006-01-02T15:04:05Z07:00\"` | | `RFC3339Nano` | `\"2006-01-02T15:04:05.999999999Z07:00\"` |
DateTimeFormat *string `json:"dateTimeFormat,omitempty" yaml:"dateTimeFormat,omitempty"`
}

View File

@ -17,11 +17,11 @@ import (
// LogEvent struct for LogEvent
type LogEvent struct {
// Time event occurred, RFC3339Nano.
// The time ([RFC3339Nano date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339nano-timestamp)) that the event occurred.
Time *time.Time `json:"time,omitempty" yaml:"time,omitempty"`
// A description of the event that occurred.
Message *string `json:"message,omitempty" yaml:"message,omitempty"`
// the ID of the task that logged
// The ID of the task run that generated the event.
RunID *string `json:"runID,omitempty" yaml:"runID,omitempty"`
}

View File

@ -20,17 +20,17 @@ type Run struct {
Id *string `json:"id,omitempty" yaml:"id,omitempty"`
TaskID *string `json:"taskID,omitempty" yaml:"taskID,omitempty"`
Status *string `json:"status,omitempty" yaml:"status,omitempty"`
// Time used for run's \"now\" option, RFC3339.
// The time [RFC3339 date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339-timestamp) used for the run's `now` option.
ScheduledFor *time.Time `json:"scheduledFor,omitempty" yaml:"scheduledFor,omitempty"`
// An array of logs associated with the run.
Log *[]LogEvent `json:"log,omitempty" yaml:"log,omitempty"`
// Flux used for the task
Flux *string `json:"flux,omitempty" yaml:"flux,omitempty"`
// Time run started executing, RFC3339Nano.
// The time ([RFC3339Nano date/time format](https://go.dev/src/time/format.go)) the run started executing.
StartedAt *time.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
// Time run finished executing, RFC3339Nano.
// The time ([RFC3339Nano date/time format](https://go.dev/src/time/format.go)) the run finished executing.
FinishedAt *time.Time `json:"finishedAt,omitempty" yaml:"finishedAt,omitempty"`
// Time run was manually requested, RFC3339Nano.
// The time ([RFC3339Nano date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339nano-timestamp)) the run was manually requested.
RequestedAt *time.Time `json:"requestedAt,omitempty" yaml:"requestedAt,omitempty"`
Links *RunLinks `json:"links,omitempty" yaml:"links,omitempty"`
}

View File

@ -17,7 +17,7 @@ import (
// RunManually struct for RunManually
type RunManually struct {
// Time used for run's \"now\" option, RFC3339. Default is the server's now time.
// The time [RFC3339 date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. Default is the server _now_ time.
ScheduledFor NullableTime `json:"scheduledFor,omitempty" yaml:"scheduledFor,omitempty"`
}

View File

@ -34,13 +34,13 @@ type Task struct {
AuthorizationID *string `json:"authorizationID,omitempty" yaml:"authorizationID,omitempty"`
// The Flux script that the task runs. #### Limitations - If you use the `flux` property, you can't use the `scriptID` and `scriptParameters` properties.
Flux *string `json:"flux,omitempty" yaml:"flux,omitempty"`
// The interval ([duration literal](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals))) at which the task runs. `every` also determines when the task first runs, depending on the specified time.
// The interval ([duration literal]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339-timestamp)) at which the task runs. `every` also determines when the task first runs, depending on the specified time.
Every *string `json:"every,omitempty" yaml:"every,omitempty"`
// A [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that defines the schedule on which the task runs. InfluxDB bases cron runs on the system time.
// A [Cron expression](https://en.wikipedia.org/wiki/Cron#Overview) that defines the schedule on which the task runs. InfluxDB uses the system time when evaluating Cron expressions.
Cron *string `json:"cron,omitempty" yaml:"cron,omitempty"`
// A [duration](https://docs.influxdata.com/flux/v0.x/spec/lexical-elements/#duration-literals) to delay execution of the task after the scheduled time has elapsed. `0` removes the offset.
Offset *string `json:"offset,omitempty" yaml:"offset,omitempty"`
// A timestamp ([RFC3339 date/time format](https://docs.influxdata.com/flux/v0.x/data-types/basic/time/#time-syntax)) of the latest scheduled and completed run.
// A timestamp ([RFC3339 date/time format]({{% INFLUXDB_DOCS_URL %}}/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run.
LatestCompleted *time.Time `json:"latestCompleted,omitempty" yaml:"latestCompleted,omitempty"`
LastRunStatus *string `json:"lastRunStatus,omitempty" yaml:"lastRunStatus,omitempty"`
LastRunError *string `json:"lastRunError,omitempty" yaml:"lastRunError,omitempty"`

View File

@ -207,6 +207,7 @@ func (c Client) printDBRPs(opts dbrpPrintOpts) error {
"Bucket ID",
"Retention Policy",
"Default",
"Virtual",
"Organization ID",
}
@ -221,6 +222,7 @@ func (c Client) printDBRPs(opts dbrpPrintOpts) error {
"Database": t.Database,
"Retention Policy": t.RetentionPolicy,
"Default": t.Default,
"Virtual": t.GetVirtual(),
"Organization ID": t.OrgID,
"Bucket ID": t.BucketID,
}

View File

@ -80,8 +80,8 @@ func TestClient_List(t *testing.T) {
}, nil)
},
outLines: []string{
`123\s+someDB\s+456\s+someRP\s+false\s+1234123412341234`,
`234\s+someDB\s+456\s+someRP\s+true\s+1234123412341234`,
`123\s+someDB\s+456\s+someRP\s+false\s+false\s+1234123412341234`,
`234\s+someDB\s+456\s+someRP\s+true\s+false\s+1234123412341234`,
},
},
{
@ -120,7 +120,7 @@ func TestClient_List(t *testing.T) {
require.Equal(t, tc.expectedError, err)
if tc.expectedError == nil {
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Virtual\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
}
})
}
@ -160,7 +160,7 @@ func TestClient_Create(t *testing.T) {
}, nil)
},
outLines: []string{
`123\s+someDB\s+456\s+someRP\s+false\s+1234123412341234`,
`123\s+someDB\s+456\s+someRP\s+false\s+false\s+1234123412341234`,
},
},
{
@ -200,7 +200,7 @@ func TestClient_Create(t *testing.T) {
require.Equal(t, tc.expectedError, err)
if tc.expectedError == nil {
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Virtual\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
}
})
}
@ -243,7 +243,7 @@ func TestClient_Update(t *testing.T) {
}, nil)
},
outLines: []string{
`123\s+someDB\s+456\s+someRP\s+false\s+1234123412341234`,
`123\s+someDB\s+456\s+someRP\s+false\s+false\s+1234123412341234`,
},
},
{
@ -268,7 +268,7 @@ func TestClient_Update(t *testing.T) {
}, nil)
},
outLines: []string{
`123\s+someDB\s+456\s+someRP\s+false\s+1234123412341234`,
`123\s+someDB\s+456\s+someRP\s+false\s+false\s+1234123412341234`,
},
},
{
@ -308,7 +308,7 @@ func TestClient_Update(t *testing.T) {
require.Equal(t, tc.expectedError, err)
if tc.expectedError == nil {
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Virtual\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
}
})
}
@ -355,7 +355,7 @@ func TestClient_Delete(t *testing.T) {
DBRPsApi.EXPECT().DeleteDBRPIDExecute(gomock.Any()).Return(nil)
},
outLines: []string{
`123\s+someDB\s+456\s+someRP\s+false\s+1234123412341234`,
`123\s+someDB\s+456\s+someRP\s+false\s+false\s+1234123412341234`,
},
},
{
@ -384,7 +384,7 @@ func TestClient_Delete(t *testing.T) {
DBRPsApi.EXPECT().DeleteDBRPIDExecute(gomock.Any()).Return(nil)
},
outLines: []string{
`123\s+someDB\s+456\s+someRP\s+false\s+1234123412341234`,
`123\s+someDB\s+456\s+someRP\s+false\s+false\s+1234123412341234`,
},
},
{
@ -453,7 +453,7 @@ func TestClient_Delete(t *testing.T) {
require.Equal(t, tc.expectedError, err)
if tc.expectedError == nil {
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
testutils.MatchLines(t, append([]string{`ID\s+Database\s+Bucket\s+ID\s+Retention Policy\s+Default\s+Virtual\s+Organization ID`}, tc.outLines...), strings.Split(stdout.String(), "\n"))
}
})
}