Merge 4fadba9bcb2f9c03f82f98141ad7ea7fae131a98 into 431386085f193a321724e3a0eabb0584f70e6c88

This commit is contained in:
albertony 2025-02-27 01:18:01 +05:30 committed by GitHub
commit ad9a45bdb3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 76 additions and 75 deletions

View File

@ -52,7 +52,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
ci.Timeout = saveTimeout
}()
ci.LowLevelRetries = 1
ci.Timeout = idleTimeout
ci.Timeout = fs.Duration(idleTimeout)
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
fixFs := deriveFs(ctx, t, f, settings{

View File

@ -871,7 +871,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
User: opt.User,
Auth: []ssh.AuthMethod{},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: f.ci.ConnectTimeout,
Timeout: time.Duration(f.ci.ConnectTimeout),
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
}

View File

@ -491,8 +491,8 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
ConnectTimeout: time.Duration(10 * ci.ConnectTimeout), // Use the timeouts in the transport
Timeout: time.Duration(10 * ci.Timeout), // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
FetchUntilEmptyPage: opt.FetchUntilEmptyPage,
PartialPageFetchThreshold: opt.PartialPageFetchThreshold,

View File

@ -278,7 +278,7 @@ func testBisync(t *testing.T, path1, path2 string) {
}
bisync.Colors = true
time.Local = bisync.TZ
ci.FsCacheExpireDuration = 5 * time.Hour
ci.FsCacheExpireDuration = fs.Duration(5 * time.Hour)
baseDir, err := os.Getwd()
require.NoError(t, err, "get current directory")

View File

@ -55,7 +55,7 @@ type Options struct {
Compare CompareOpt
CompareFlag string
DebugName string
MaxLock time.Duration
MaxLock fs.Duration
ConflictResolve Prefer
ConflictLoser ConflictLoserAction
ConflictSuffixFlag string
@ -146,7 +146,7 @@ func init() {
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
flags.BoolVarP(cmdFlags, &Opt.Compare.DownloadHash, "download-hash", "", Opt.Compare.DownloadHash, "Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)", "")
flags.DurationVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", Opt.MaxLock, "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "")
flags.FVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "")
flags.FVarP(cmdFlags, &Opt.ConflictResolve, "conflict-resolve", "", "Automatically resolve conflicts by preferring the version that is: "+ConflictResolveList+" (default: none)", "")
flags.FVarP(cmdFlags, &Opt.ConflictLoser, "conflict-loser", "", "Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): "+ConflictLoserList+" (default: num)", "")
flags.StringVarP(cmdFlags, &Opt.ConflictSuffixFlag, "conflict-suffix", "", Opt.ConflictSuffixFlag, "Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')", "")

View File

@ -14,7 +14,7 @@ import (
"github.com/rclone/rclone/lib/terminal"
)
const basicallyforever = 200 * 365 * 24 * time.Hour
const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour)
var stopRenewal func()
@ -66,9 +66,9 @@ func (b *bisyncRun) removeLockFile() {
}
func (b *bisyncRun) setLockFileExpiration() {
if b.opt.MaxLock > 0 && b.opt.MaxLock < 2*time.Minute {
if b.opt.MaxLock > 0 && b.opt.MaxLock < fs.Duration(2*time.Minute) {
fs.Logf(nil, Color(terminal.YellowFg, "--max-lock cannot be shorter than 2 minutes (unless 0.) Changing --max-lock from %v to %v"), b.opt.MaxLock, 2*time.Minute)
b.opt.MaxLock = 2 * time.Minute
b.opt.MaxLock = fs.Duration(2 * time.Minute)
} else if b.opt.MaxLock <= 0 {
b.opt.MaxLock = basicallyforever
}
@ -80,7 +80,7 @@ func (b *bisyncRun) renewLockFile() {
data.Session = b.basePath
data.PID = strconv.Itoa(os.Getpid())
data.TimeRenewed = time.Now()
data.TimeExpires = time.Now().Add(b.opt.MaxLock)
data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
// save data file
df, err := os.Create(b.lockFile)
@ -131,7 +131,7 @@ func (b *bisyncRun) startLockRenewal() func() {
wg.Add(1)
go func() {
defer wg.Done()
ticker := time.NewTicker(b.opt.MaxLock - time.Minute)
ticker := time.NewTicker(time.Duration(b.opt.MaxLock) - time.Minute)
for {
select {
case <-ticker.C:

View File

@ -138,7 +138,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
if b.SyncCI != nil {
fs.Infoc(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
b.SyncCI.MaxTransfer = 1
b.SyncCI.MaxDuration = 1 * time.Second
b.SyncCI.MaxDuration = fs.Duration(1 * time.Second)
b.SyncCI.CutoffMode = fs.CutoffModeSoft
gracePeriod := 30 * time.Second // TODO: flag to customize this?
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {

View File

@ -376,8 +376,8 @@ func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
return files.Save(queueFile)
}
func naptime(totalWait time.Duration) {
expireTime := time.Now().Add(totalWait)
func naptime(totalWait fs.Duration) {
expireTime := time.Now().Add(time.Duration(totalWait))
fs.Logf(nil, "will retry in %v at %v", totalWait, expireTime.Format("2006-01-02 15:04:05 MST"))
for i := 0; time.Until(expireTime) > 0; i++ {
if i > 0 && i%10 == 0 {

View File

@ -288,7 +288,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
accounting.GlobalStats().ResetErrors()
}
if ci.RetriesInterval > 0 {
time.Sleep(ci.RetriesInterval)
time.Sleep(time.Duration(ci.RetriesInterval))
}
}
stopStats()

View File

@ -14,13 +14,13 @@ import (
)
var (
pollInterval = 10 * time.Second
pollInterval = fs.Duration(10 * time.Second)
)
func init() {
test.Command.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.DurationVarP(cmdFlags, &pollInterval, "poll-interval", "", pollInterval, "Time to wait between polling for changes", "")
flags.FVarP(cmdFlags, &pollInterval, "poll-interval", "", "Time to wait between polling for changes", "")
}
var commandDefinition = &cobra.Command{
@ -39,7 +39,7 @@ var commandDefinition = &cobra.Command{
if do := features.ChangeNotify; do != nil {
pollChan := make(chan time.Duration)
do(ctx, changeNotify, pollChan)
pollChan <- pollInterval
pollChan <- time.Duration(pollInterval)
fs.Logf(nil, "Waiting for changes, polling every %v", pollInterval)
} else {
return errors.New("poll-interval is not supported by this remote")

View File

@ -40,7 +40,7 @@ var (
checkStreaming bool
checkBase32768 bool
all bool
uploadWait time.Duration
uploadWait fs.Duration
positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`)
positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`)
positionRightRe = regexp.MustCompile(`(?s)^position-right-([[:xdigit:]]+)-(.*)$`)
@ -52,7 +52,7 @@ func init() {
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file", "")
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization", "")
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters", "")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file", "")
flags.FVarP(cmdFlags, &uploadWait, "upload-wait", "", "Wait after writing a file", "")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length", "")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size", "")
flags.BoolVarP(cmdFlags, &checkBase32768, "check-base32768", "", false, "Check can store all possible base32768 characters", "")
@ -204,7 +204,7 @@ func (r *results) writeFile(path string) (fs.Object, error) {
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
obj, err := r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
if uploadWait > 0 {
time.Sleep(uploadWait)
time.Sleep(time.Duration(uploadWait))
}
return obj, err
}

5
fs/cache/cache.go vendored
View File

@ -6,6 +6,7 @@ import (
"runtime"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
@ -25,8 +26,8 @@ func createOnFirstUse() {
once.Do(func() {
ci := fs.GetConfig(context.Background())
c = cache.New()
c.SetExpireDuration(ci.FsCacheExpireDuration)
c.SetExpireInterval(ci.FsCacheExpireInterval)
c.SetExpireDuration(time.Duration(ci.FsCacheExpireDuration))
c.SetExpireInterval(time.Duration(ci.FsCacheExpireInterval))
c.SetFinalizer(func(value interface{}) {
if s, ok := value.(fs.Shutdowner); ok {
_ = fs.CountError(context.Background(), s.Shutdown(context.Background()))

View File

@ -549,12 +549,12 @@ type ConfigInfo struct {
IgnoreTimes bool `config:"ignore_times"`
IgnoreExisting bool `config:"ignore_existing"`
IgnoreErrors bool `config:"ignore_errors"`
ModifyWindow time.Duration `config:"modify_window"`
ModifyWindow Duration `config:"modify_window"`
Checkers int `config:"checkers"`
Transfers int `config:"transfers"`
ConnectTimeout time.Duration `config:"contimeout"` // Connect timeout
Timeout time.Duration `config:"timeout"` // Data channel timeout
ExpectContinueTimeout time.Duration `config:"expect_continue_timeout"`
ConnectTimeout Duration `config:"contimeout"` // Connect timeout
Timeout Duration `config:"timeout"` // Data channel timeout
ExpectContinueTimeout Duration `config:"expect_continue_timeout"`
Dump DumpFlags `config:"dump"`
InsecureSkipVerify bool `config:"no_check_certificate"` // Skip server certificate verification
DeleteMode DeleteMode `config:"delete_mode"`
@ -563,7 +563,7 @@ type ConfigInfo struct {
TrackRenames bool `config:"track_renames"` // Track file renames.
TrackRenamesStrategy string `config:"track_renames_strategy"` // Comma separated list of strategies used to track renames
Retries int `config:"retries"` // High-level retries
RetriesInterval time.Duration `config:"retries_sleep"`
RetriesInterval Duration `config:"retries_sleep"`
LowLevelRetries int `config:"low_level_retries"`
UpdateOlder bool `config:"update"` // Skip files that are newer on the destination
NoGzip bool `config:"no_gzip_encoding"` // Disable compression
@ -601,7 +601,7 @@ type ConfigInfo struct {
PasswordCommand SpaceSepList `config:"password_command"`
UseServerModTime bool `config:"use_server_modtime"`
MaxTransfer SizeSuffix `config:"max_transfer"`
MaxDuration time.Duration `config:"max_duration"`
MaxDuration Duration `config:"max_duration"`
CutoffMode CutoffMode `config:"cutoff_mode"`
MaxBacklog int `config:"max_backlog"`
MaxStatsGroups int `config:"max_stats_groups"`
@ -629,11 +629,11 @@ type ConfigInfo struct {
RefreshTimes bool `config:"refresh_times"`
NoConsole bool `config:"no_console"`
TrafficClass uint8 `config:"traffic_class"`
FsCacheExpireDuration time.Duration `config:"fs_cache_expire_duration"`
FsCacheExpireInterval time.Duration `config:"fs_cache_expire_interval"`
FsCacheExpireDuration Duration `config:"fs_cache_expire_duration"`
FsCacheExpireInterval Duration `config:"fs_cache_expire_interval"`
DisableHTTP2 bool `config:"disable_http2"`
HumanReadable bool `config:"human_readable"`
KvLockTime time.Duration `config:"kv_lock_time"` // maximum time to keep key-value database locked by process
KvLockTime Duration `config:"kv_lock_time"` // maximum time to keep key-value database locked by process
DisableHTTPKeepAlives bool `config:"disable_http_keep_alives"`
Metadata bool `config:"metadata"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
@ -743,7 +743,7 @@ func initialLogLevel() LogLevel {
// TimeoutOrInfinite returns ci.Timeout if > 0 or infinite otherwise
func (ci *ConfigInfo) TimeoutOrInfinite() time.Duration {
if ci.Timeout > 0 {
return ci.Timeout
return time.Duration(ci.Timeout)
}
return ModTimeNotSupported
}

View File

@ -77,7 +77,7 @@ func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) {
// GetModifyWindow calculates the maximum modify window between the given Fses
// and the Config.ModifyWindow parameter.
func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration {
window := GetConfig(ctx).ModifyWindow
window := time.Duration(GetConfig(ctx).ModifyWindow)
for _, f := range fss {
if f != nil {
precision := f.Precision()

View File

@ -27,10 +27,10 @@ func NewDialer(ctx context.Context) *Dialer {
ci := fs.GetConfig(ctx)
dialer := &Dialer{
Dialer: net.Dialer{
Timeout: ci.ConnectTimeout,
Timeout: time.Duration(ci.ConnectTimeout),
KeepAlive: 30 * time.Second,
},
timeout: ci.Timeout,
timeout: time.Duration(ci.Timeout),
tclass: int(ci.TrafficClass),
}
if ci.BindAddr != nil {

View File

@ -58,8 +58,8 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht
t.Proxy = http.ProxyFromEnvironment
t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
t.TLSHandshakeTimeout = ci.ConnectTimeout
t.ResponseHeaderTimeout = ci.Timeout
t.TLSHandshakeTimeout = time.Duration(ci.ConnectTimeout)
t.ResponseHeaderTimeout = time.Duration(ci.Timeout)
t.DisableKeepAlives = ci.DisableHTTPKeepAlives
// TLS Config
@ -109,7 +109,7 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht
return NewDialer(ctx).DialContext(reqCtx, network, addr)
}
t.IdleConnTimeout = 60 * time.Second
t.ExpectContinueTimeout = ci.ExpectContinueTimeout
t.ExpectContinueTimeout = time.Duration(ci.ExpectContinueTimeout)
if ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
fs.Debugf(nil, "You have specified to dump information. Please be noted that the "+

View File

@ -147,7 +147,7 @@ func (jobs *Jobs) kickExpire() {
jobs.mu.Lock()
defer jobs.mu.Unlock()
if !jobs.expireRunning {
time.AfterFunc(jobs.opt.JobExpireInterval, jobs.Expire)
time.AfterFunc(time.Duration(jobs.opt.JobExpireInterval), jobs.Expire)
jobs.expireRunning = true
}
}
@ -159,13 +159,13 @@ func (jobs *Jobs) Expire() {
now := time.Now()
for ID, job := range jobs.jobs {
job.mu.Lock()
if job.Finished && now.Sub(job.EndTime) > jobs.opt.JobExpireDuration {
if job.Finished && now.Sub(job.EndTime) > time.Duration(jobs.opt.JobExpireDuration) {
delete(jobs.jobs, ID)
}
job.mu.Unlock()
}
if len(jobs.jobs) != 0 {
time.AfterFunc(jobs.opt.JobExpireInterval, jobs.Expire)
time.AfterFunc(time.Duration(jobs.opt.JobExpireInterval), jobs.Expire)
jobs.expireRunning = true
} else {
jobs.expireRunning = false

View File

@ -24,7 +24,7 @@ func TestNewJobs(t *testing.T) {
func TestJobsKickExpire(t *testing.T) {
testy.SkipUnreliable(t)
jobs := newJobs()
jobs.opt.JobExpireInterval = time.Millisecond
jobs.opt.JobExpireInterval = fs.Duration(time.Millisecond)
assert.Equal(t, false, jobs.expireRunning)
jobs.kickExpire()
jobs.mu.Lock()
@ -41,7 +41,7 @@ func TestJobsExpire(t *testing.T) {
ctx := context.Background()
wait := make(chan struct{})
jobs := newJobs()
jobs.opt.JobExpireInterval = time.Millisecond
jobs.opt.JobExpireInterval = fs.Duration(time.Millisecond)
assert.Equal(t, false, jobs.expireRunning)
var gotJobID int64
var gotJob *Job
@ -64,7 +64,7 @@ func TestJobsExpire(t *testing.T) {
assert.Equal(t, 1, len(jobs.jobs))
jobs.mu.Lock()
job.mu.Lock()
job.EndTime = time.Now().Add(-rc.Opt.JobExpireDuration - 60*time.Second)
job.EndTime = time.Now().Add(-time.Duration(rc.Opt.JobExpireDuration) - 60*time.Second)
assert.Equal(t, true, jobs.expireRunning)
job.mu.Unlock()
jobs.mu.Unlock()

View File

@ -75,12 +75,12 @@ var OptionsInfo = fs.Options{{
Groups: "RC,Metrics",
}, {
Name: "rc_job_expire_duration",
Default: 60 * time.Second,
Default: fs.Duration(60 * time.Second),
Help: "Expire finished async jobs older than this value",
Groups: "RC",
}, {
Name: "rc_job_expire_interval",
Default: 10 * time.Second,
Default: fs.Duration(10 * time.Second),
Help: "Interval to check for expired async jobs",
Groups: "RC",
}, {
@ -120,8 +120,8 @@ type Options struct {
MetricsHTTP libhttp.Config `config:"metrics"`
MetricsAuth libhttp.AuthConfig `config:"metrics"`
MetricsTemplate libhttp.TemplateConfig `config:"metrics"`
JobExpireDuration time.Duration `config:"rc_job_expire_duration"`
JobExpireInterval time.Duration `config:"rc_job_expire_interval"`
JobExpireDuration fs.Duration `config:"rc_job_expire_duration"`
JobExpireInterval fs.Duration `config:"rc_job_expire_interval"`
}
// Opt is the default values used for Options

View File

@ -190,7 +190,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
return nil, err
}
if ci.MaxDuration > 0 {
s.maxDurationEndTime = time.Now().Add(ci.MaxDuration)
s.maxDurationEndTime = time.Now().Add(time.Duration(ci.MaxDuration))
fs.Infof(s.fdst, "Transfer session %v deadline: %s", ci.CutoffMode, s.maxDurationEndTime.Format("2006/01/02 15:04:05"))
}
// If a max session duration has been defined add a deadline

View File

@ -1437,7 +1437,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
r.CheckRemoteItems(t, oneO, twoO, threeO, fourO)
ci.UpdateOlder = true
ci.ModifyWindow = fs.ModTimeNotSupported
ci.ModifyWindow = fs.Duration(fs.ModTimeNotSupported)
ctx = predictDstFromLogger(ctx)
err := Sync(ctx, r.Fremote, r.Flocal, false)
@ -1467,7 +1467,7 @@ func testSyncWithMaxDuration(t *testing.T, cutoffMode fs.CutoffMode) {
}
r := fstest.NewRun(t)
maxDuration := 250 * time.Millisecond
maxDuration := fs.Duration(250 * time.Millisecond)
ci.MaxDuration = maxDuration
ci.CutoffMode = cutoffMode
ci.CheckFirst = true
@ -1509,7 +1509,7 @@ func testSyncWithMaxDuration(t *testing.T, cutoffMode fs.CutoffMode) {
const maxTransferTime = 20 * time.Second
what := fmt.Sprintf("expecting elapsed time %v between %v and %v", elapsed, maxDuration, maxTransferTime)
assert.True(t, elapsed >= maxDuration, what)
assert.True(t, elapsed >= time.Duration(maxDuration), what)
assert.True(t, elapsed < maxTransferTime, what)
}

View File

@ -120,11 +120,11 @@ var ConfigInfo = fs.Options{{
Help: "IPaddress:Port or :Port to bind server to",
}, {
Name: "server_read_timeout",
Default: 1 * time.Hour,
Default: fs.Duration(1 * time.Hour),
Help: "Timeout for server reading data",
}, {
Name: "server_write_timeout",
Default: 1 * time.Hour,
Default: fs.Duration(1 * time.Hour),
Help: "Timeout for server writing data",
}, {
Name: "max_header_bytes",
@ -158,25 +158,25 @@ var ConfigInfo = fs.Options{{
// Config contains options for the http Server
type Config struct {
ListenAddr []string `config:"addr"` // Port to listen on
BaseURL string `config:"baseurl"` // prefix to strip from URLs
ServerReadTimeout time.Duration `config:"server_read_timeout"` // Timeout for server reading data
ServerWriteTimeout time.Duration `config:"server_write_timeout"` // Timeout for server writing data
MaxHeaderBytes int `config:"max_header_bytes"` // Maximum size of request header
TLSCert string `config:"cert"` // Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
TLSKey string `config:"key"` // Path to TLS PEM private key file
TLSCertBody []byte `config:"-"` // TLS PEM public key certificate body (can also include intermediate/CA certificates), ignores TLSCert
TLSKeyBody []byte `config:"-"` // TLS PEM private key body, ignores TLSKey
ClientCA string `config:"client_ca"` // Path to TLS PEM CA file with certificate authorities to verify clients with
MinTLSVersion string `config:"min_tls_version"` // MinTLSVersion contains the minimum TLS version that is acceptable.
AllowOrigin string `config:"allow_origin"` // AllowOrigin sets the Access-Control-Allow-Origin header
ListenAddr []string `config:"addr"` // Port to listen on
BaseURL string `config:"baseurl"` // prefix to strip from URLs
ServerReadTimeout fs.Duration `config:"server_read_timeout"` // Timeout for server reading data
ServerWriteTimeout fs.Duration `config:"server_write_timeout"` // Timeout for server writing data
MaxHeaderBytes int `config:"max_header_bytes"` // Maximum size of request header
TLSCert string `config:"cert"` // Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
TLSKey string `config:"key"` // Path to TLS PEM private key file
TLSCertBody []byte `config:"-"` // TLS PEM public key certificate body (can also include intermediate/CA certificates), ignores TLSCert
TLSKeyBody []byte `config:"-"` // TLS PEM private key body, ignores TLSKey
ClientCA string `config:"client_ca"` // Path to TLS PEM CA file with certificate authorities to verify clients with
MinTLSVersion string `config:"min_tls_version"` // MinTLSVersion contains the minimum TLS version that is acceptable.
AllowOrigin string `config:"allow_origin"` // AllowOrigin sets the Access-Control-Allow-Origin header
}
// AddFlagsPrefix adds flags for the httplib
func (cfg *Config) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
flags.StringArrayVarP(flagSet, &cfg.ListenAddr, prefix+"addr", "", cfg.ListenAddr, "IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to", prefix)
flags.DurationVarP(flagSet, &cfg.ServerReadTimeout, prefix+"server-read-timeout", "", cfg.ServerReadTimeout, "Timeout for server reading data", prefix)
flags.DurationVarP(flagSet, &cfg.ServerWriteTimeout, prefix+"server-write-timeout", "", cfg.ServerWriteTimeout, "Timeout for server writing data", prefix)
flags.FVarP(flagSet, &cfg.ServerReadTimeout, prefix+"server-read-timeout", "", "Timeout for server reading data", prefix)
flags.FVarP(flagSet, &cfg.ServerWriteTimeout, prefix+"server-write-timeout", "", "Timeout for server writing data", prefix)
flags.IntVarP(flagSet, &cfg.MaxHeaderBytes, prefix+"max-header-bytes", "", cfg.MaxHeaderBytes, "Maximum size of request header", prefix)
flags.StringVarP(flagSet, &cfg.TLSCert, prefix+"cert", "", cfg.TLSCert, "Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)", prefix)
flags.StringVarP(flagSet, &cfg.TLSKey, prefix+"key", "", cfg.TLSKey, "Path to TLS PEM private key file", prefix)
@ -198,8 +198,8 @@ func AddHTTPFlagsPrefix(flagSet *pflag.FlagSet, prefix string, cfg *Config) {
func DefaultCfg() Config {
return Config{
ListenAddr: []string{"127.0.0.1:8080"},
ServerReadTimeout: 1 * time.Hour,
ServerWriteTimeout: 1 * time.Hour,
ServerReadTimeout: fs.Duration(1 * time.Hour),
ServerWriteTimeout: fs.Duration(1 * time.Hour),
MaxHeaderBytes: 4096,
MinTLSVersion: "tls1.0",
}
@ -272,8 +272,8 @@ func newInstance(ctx context.Context, s *Server, listener net.Listener, tlsCfg *
listener: listener,
httpServer: &http.Server{
Handler: s.mux,
ReadTimeout: s.cfg.ServerReadTimeout,
WriteTimeout: s.cfg.ServerWriteTimeout,
ReadTimeout: time.Duration(s.cfg.ServerReadTimeout),
WriteTimeout: time.Duration(s.cfg.ServerWriteTimeout),
MaxHeaderBytes: s.cfg.MaxHeaderBytes,
ReadHeaderTimeout: 10 * time.Second, // time to send the headers
IdleTimeout: 60 * time.Second, // time to keep idle connections open

View File

@ -79,7 +79,7 @@ func Start(ctx context.Context, facility string, f fs.Fs) (*DB, error) {
}
name := makeName(facility, f)
lockTime := fs.GetConfig(ctx).KvLockTime
lockTime := time.Duration(fs.GetConfig(ctx).KvLockTime)
db := &DB{
name: name,