Merge branch 'master' into 7935-sftp-read-ssh-config-file

This commit is contained in:
kivi 2025-03-02 00:13:43 +01:00 committed by GitHub
commit 8348bdd802
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
225 changed files with 1393 additions and 1765 deletions

View File

@ -26,7 +26,7 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
include:
- job_name: linux
@ -80,12 +80,6 @@ jobs:
compile_all: true
deploy: true
- job_name: go1.22
os: ubuntu-latest
go: '1.22'
quicktest: true
racequicktest: true
- job_name: go1.23
os: ubuntu-latest
go: '1.23'

View File

@ -19,6 +19,7 @@ import (
"net/url"
"os"
"path"
"slices"
"sort"
"strconv"
"strings"
@ -656,19 +657,33 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// FIXME interpret special errors - more to do here
if storageErr, ok := err.(*azcore.ResponseError); ok {
var storageErr *azcore.ResponseError
if errors.As(err, &storageErr) {
// General errors from:
// https://learn.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes
// Blob specific errors from:
// https://learn.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
switch storageErr.ErrorCode {
case "InvalidBlobOrBlock":
// These errors happen sometimes in multipart uploads
// because of block concurrency issues
return true, err
case "InternalError":
// The server encountered an internal error. Please retry the request.
return true, err
case "OperationTimedOut":
// The operation could not be completed within the permitted time. The
// operation may or may not have succeeded on the server side. Please query
// the server state before retrying the operation.
return true, err
case "ServerBusy":
// The server is currently unable to receive requests. Please retry your
// request.
return true, err
}
statusCode := storageErr.StatusCode
for _, e := range retryErrorCodes {
if statusCode == e {
return true, err
}
if slices.Contains(retryErrorCodes, statusCode) {
return true, err
}
}
return fserrors.ShouldRetry(err), err
@ -1872,7 +1887,11 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
pollTime := 100 * time.Millisecond
for copyStatus != nil && string(*copyStatus) == string(container.CopyStatusTypePending) {
time.Sleep(pollTime)
getMetadata, err := dstBlobSVC.GetProperties(ctx, &getOptions)
var getMetadata blob.GetPropertiesResponse
err = f.pacer.Call(func() (bool, error) {
getMetadata, err = dstBlobSVC.GetProperties(ctx, &getOptions)
return f.shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}

View File

@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string {
strBldr := strings.Builder{}
for i := 0; i < charCount; i++ {
for range charCount {
randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos])
}

View File

@ -130,10 +130,10 @@ type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.

View File

@ -16,6 +16,7 @@ import (
"io"
"net/http"
"path"
"slices"
"strconv"
"strings"
"sync"
@ -589,12 +590,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
// hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool {
for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
return slices.Contains(f.info.Allowed.Capabilities, permission)
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@ -1275,7 +1271,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup
wg.Add(f.ci.Transfers)
for i := 0; i < f.ci.Transfers; i++ {
for range f.ci.Transfers {
go func() {
defer wg.Done()
for object := range toBeDeleted {
@ -1939,7 +1935,7 @@ func init() {
// urlEncode encodes in with % encoding
func urlEncode(in string) string {
var out bytes.Buffer
for i := 0; i < len(in); i++ {
for i := range len(in) {
c := in[i]
if noNeedToEncode[c] {
_ = out.WriteByte(c)
@ -2260,7 +2256,7 @@ See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
@ -2349,7 +2345,7 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
},
}
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
maxAge := defaultMaxAge
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
@ -2372,7 +2368,7 @@ it would do.
`,
}
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
return nil, f.cleanUp(ctx, true, false, 0)
}
@ -2391,7 +2387,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)

View File

@ -478,17 +478,14 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
remaining = up.size
)
g.SetLimit(up.f.opt.UploadConcurrency)
for part := 0; part < up.parts; part++ {
for part := range up.parts {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
reqSize := min(remaining, up.chunkSize)
part := part // for the closure
g.Go(func() (err error) {

View File

@ -237,8 +237,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]interface{}{
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
signingHeaders := map[string]any{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
@ -1343,12 +1343,8 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
nextStreamPosition = streamPosition
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
limit := min(f.opt.ListChunk, 500)
opts := rest.Opts{
Method: "GET",

View File

@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10
var tries int
outer:
for tries = 0; tries < maxTries; tries++ {
for tries = range maxTries {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil {
@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := 0; part < session.TotalParts; part++ {
for part := range session.TotalParts {
// Check any errors
select {
case err = <-errs:
@ -211,10 +211,7 @@ outer:
default:
}
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
reqSize := min(remaining, chunkSize)
// Make a block of memory
buf := make([]byte, reqSize)

View File

@ -1092,7 +1092,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
return err
}
for i := 0; i < len(entries); i++ {
for i := range entries {
innerDir, ok := entries[i].(fs.Directory)
if ok {
err := f.recurse(ctx, innerDir.Remote(), list)
@ -1428,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}()
// wait until both are done
for c := 0; c < 2; c++ {
for range 2 {
<-done
}
}
@ -1753,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
// Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
func (f *Fs) Stats() (map[string]map[string]any, error) {
return f.cache.Stats()
}
@ -1933,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
switch name {
case "stats":
return f.Stats()

View File

@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
for i := 0; i < len(checkSample); i++ {
for i := range checkSample {
require.Equal(t, testData[i], checkSample[i])
}
}
@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err)
for i := 0; i < len(readData); i++ {
for i := range readData {
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
}
}
@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object)
require.True(t, ok)
for i := 0; i < 4; i++ { // read first 4
for i := range 4 { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
for i := 0; i < int(cnt); i++ {
for range int(cnt) {
data := randStringBytes(int(chunk))
_, _ = f.Write(data)
}
@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err
}
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
var err error
var l []interface{}
var l []any
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error
var state cache.BackgroundUploadState
for i := 0; i < 2; i++ {
for range 2 {
select {
case state = <-buCh:
// continue
@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error
for i := 0; i < maxRetries; i++ {
for range maxRetries {
err = block()
if err == nil {
return nil

View File

@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := ""
for i := 0; i < totalFiles; i++ {
for i := range totalFiles {
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"

View File

@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := 0; i < r.workers; i++ {
for i := range r.workers {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
for i := range r.cacheFs().opt.ReadRetries * 8 {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true

View File

@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
if err != nil {
return err
}
var data map[string]interface{}
var data map[string]any
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %w", err)
@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
}
// adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m interface{}, path ...interface{}) (interface{}, bool) {
func get(m any, path ...any) (any, bool) {
for _, p := range path {
switch idx := p.(type) {
case string:
if mm, ok := m.(map[string]interface{}); ok {
if mm, ok := m.(map[string]any); ok {
if val, found := mm[idx]; found {
m = val
continue
@ -285,7 +285,7 @@ func get(m interface{}, path ...interface{}) (interface{}, bool) {
}
return nil, false
case int:
if mm, ok := m.([]interface{}); ok {
if mm, ok := m.([]any); ok {
if len(mm) > idx {
m = mm[idx]
continue

View File

@ -18,6 +18,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
)
// Constants
@ -597,7 +598,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
})
if err != nil {
if err == bolt.ErrDatabaseNotOpen {
if err == errors.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return
}
@ -606,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}
// Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]interface{})
r["data"] = make(map[string]interface{})
func (b *Persistent) Stats() (map[string]map[string]any, error) {
r := make(map[string]map[string]any)
r["data"] = make(map[string]any)
r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]interface{})
r["files"] = make(map[string]any)
r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now()

View File

@ -632,7 +632,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
func (f *Fs) forbidChunk(o any, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath)
@ -680,7 +680,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36)
for tries := 0; tries < maxTransactionProbes; tries++ {
for range maxTransactionProbes {
f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock()
@ -1189,10 +1189,7 @@ func (f *Fs) put(
}
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
size := min(c.sizeLeft, c.chunkSize)
savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation
@ -1477,10 +1474,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen)
for size > 0 {
n := size
if n > bufLen {
n = bufLen
}
n := min(size, bufLen)
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err
}

View File

@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
})
}
type settings map[string]interface{}
type settings map[string]any
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
}
c.buffers.New = func() interface{} {
c.buffers.New = func() any {
return new([blockSize]byte)
}
err := c.Key(password, salt)
@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation
for i := 0; i < len(c.nameKey); i++ {
for i := range len(c.nameKey) {
dir += int(c.nameKey[i])
}
@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// add the nameKey to get the real rotate distance
for i := 0; i < len(c.nameKey); i++ {
for i := range len(c.nameKey) {
dir += int(c.nameKey[i])
}
@ -664,7 +664,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce
func (n *nonce) add(x uint64) {
carry := uint16(0)
for i := 0; i < 8; i++ {
for i := range 8 {
digit := (*n)[i]
xDigit := byte(x)
x >>= 8

View File

@ -1307,10 +1307,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext)
if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
}
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
}
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
@ -1490,7 +1487,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err)
// Test truncating the file at each possible point
for i := 0; i < len(file16)-1; i++ {
for i := range len(file16) - 1 {
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
fh, err := c.newDecrypter(cd)

View File

@ -924,7 +924,7 @@ Usage Example:
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))

View File

@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
}
length := len(buf)
padding := n - (length % n)
for i := 0; i < padding; i++ {
for range padding {
buf = append(buf, byte(padding))
}
if (len(buf) % n) != 0 {
@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 {
return nil, ErrorPaddingTooShort
}
for i := 0; i < padding; i++ {
for i := range padding {
if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame
}

View File

@ -18,6 +18,7 @@ import (
"net/http"
"os"
"path"
"slices"
"sort"
"strconv"
"strings"
@ -199,12 +200,7 @@ func driveScopes(scopesString string) (scopes []string) {
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool {
for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
return slices.Contains(scopes, scopePrefix+"drive.appfolder")
}
func driveOAuthOptions() []fs.Option {
@ -958,12 +954,7 @@ func parseDrivePath(path string) (root string, err error) {
type listFn func(*drive.File) bool
func containsString(slice []string, s string) bool {
for _, e := range slice {
if e == s {
return true
}
}
return false
return slices.Contains(slice, s)
}
// getFile returns drive.File for the ID passed and fields passed in
@ -1152,13 +1143,7 @@ OUTER:
// Check the case of items is correct since
// the `=` operator is case insensitive.
if title != "" && title != item.Name {
found := false
for _, stem := range stems {
if stem == item.Name {
found = true
break
}
}
found := slices.Contains(stems, item.Name)
if !found {
continue
}
@ -1561,13 +1546,10 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
break
}
if slices.Contains(info.Spaces, "photos") {
info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
}
}
o := &Object{
@ -2245,7 +2227,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg.Add(1)
in <- listREntry{directoryID, dir}
for i := 0; i < f.ci.Checkers; i++ {
for range f.ci.Checkers {
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
}
go func() {
@ -2254,11 +2236,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// if the input channel overflowed add the collected entries to the channel now
for len(overflow) > 0 {
mu.Lock()
l := len(overflow)
// only fill half of the channel to prevent entries being put into overflow again
if l > listRInputBuffer/2 {
l = listRInputBuffer / 2
}
l := min(len(overflow), listRInputBuffer/2)
wg.Add(l)
for _, d := range overflow[:l] {
in <- d
@ -2278,7 +2257,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Unlock()
}()
// wait until the all workers to finish
for i := 0; i < f.ci.Checkers; i++ {
for range f.ci.Checkers {
e := <-out
mu.Lock()
// if one worker returns an error early, close the input so all other workers exit
@ -3914,7 +3893,7 @@ Third delete all orphaned files to the trash
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "get":
out := make(map[string]string)

View File

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"maps"
"strconv"
"strings"
"sync"
@ -324,9 +325,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata
for k, v := range info.Properties {
metadata[k] = v
}
maps.Copy(metadata, info.Properties)
// System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)

View File

@ -177,10 +177,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
if start >= rx.ContentLength {
break
}
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer

View File

@ -55,10 +55,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
for len(p) > 0 {
d.writtenMore = true
toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
toWrite := min(bytesPerBlock-d.n, len(p))
_, err = d.blockHash.Write(p[:toWrite])
if err != nil {
panic(hashReturnedError)

View File

@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := 0; i < chunk; i++ {
for i := range chunk {
data[i] = 'A'
}
for _, test := range []struct {

View File

@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated
// string.
func fields(opt interface{}) (pipeTags string, err error) {
func fields(opt any) (pipeTags string, err error) {
var tags []string
def := reflect.ValueOf(opt)
defType := def.Type()
for i := 0; i < def.NumField(); i++ {
for i := range def.NumField() {
field := defType.Field(i)
tag, ok := field.Tag.Lookup("json")
if !ok {
@ -239,7 +239,7 @@ func fields(opt interface{}) (pipeTags string, err error) {
// mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure.
func mustFields(opt interface{}) string {
func mustFields(opt any) string {
tags, err := fields(opt)
if err != nil {
panic(err)
@ -351,12 +351,12 @@ type SpaceInfo struct {
// DeleteResponse is returned from doDeleteFile
type DeleteResponse struct {
Status
Deleted []string `json:"deleted"`
Errors []interface{} `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
Deleted []string `json:"deleted"`
Errors []any `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"`
}

View File

@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
}
// params for rpc
type params map[string]interface{}
type params map[string]any
// rpc calls the rpc.php method of the SME file fabric
//

View File

@ -10,6 +10,7 @@ import (
"net/http"
"net/url"
"path"
"slices"
"strings"
"time"
@ -169,11 +170,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
if apiErr, ok := err.(files_sdk.ResponseError); ok {
for _, e := range retryErrorCodes {
if apiErr.HttpCode == e {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
}

View File

@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/require"
)
type settings map[string]interface{}
type settings map[string]any
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@ -4,6 +4,7 @@ package googlephotos
import (
"path"
"slices"
"strings"
"sync"
@ -119,7 +120,7 @@ func (as *albums) _del(album *api.Album) {
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
dirs = slices.Delete(dirs, i, i+1)
break
}
}

View File

@ -388,7 +388,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
Method: "GET",
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
}
var openIDconfig map[string]interface{}
var openIDconfig map[string]any
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(ctx, resp, err)
@ -448,7 +448,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
"token_type_hint": []string{"access_token"},
},
}
var res interface{}
var res any
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(ctx, resp, err)

View File

@ -24,7 +24,7 @@ import (
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "drop":
return nil, f.db.Stop(true)

View File

@ -6,6 +6,7 @@ import (
"encoding/gob"
"errors"
"fmt"
"maps"
"strings"
"time"
@ -195,9 +196,7 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
r.Fp = op.fp
}
for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
maps.Copy(r.Hashes, op.hashes)
if data, err = r.encode(op.key); err != nil {
return fmt.Errorf("marshal failed: %w", err)
}

View File

@ -52,10 +52,7 @@ func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *ui
total := len(p)
nullBytes := make([]byte, blockSize)
for len(p) > 0 {
toWrite := int(blockSize - *bytesInBlock)
if toWrite > len(p) {
toWrite = len(p)
}
toWrite := min(int(blockSize-*bytesInBlock), len(p))
c, err := writer.Write(p[:toWrite])
*bytesInBlock += uint32(c)
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
@ -276,7 +273,7 @@ func (h *hidriveHash) Sum(b []byte) []byte {
}
checksum := zeroSum
for i := 0; i < len(h.levels); i++ {
for i := range h.levels {
level := h.levels[i]
if i < len(h.levels)-1 {
// Aggregate non-empty non-final levels.

View File

@ -216,7 +216,7 @@ func TestLevelWrite(t *testing.T) {
func TestLevelIsFull(t *testing.T) {
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel()
for i := 0; i < 256; i++ {
for range 256 {
assert.False(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.Equal(t, len(content), written)

View File

@ -505,7 +505,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < checkers; i++ {
for range checkers {
wg.Add(1)
go func() {
defer wg.Done()
@ -740,7 +740,7 @@ It doesn't return anything.
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "set":
newOpt := f.opt

View File

@ -76,7 +76,7 @@ func (c *Client) DriveService() (*DriveService, error) {
// This function is the main entry point for making requests to the iCloud
// API. If the initial request returns a 401 (Unauthorized), it will try to
// reauthenticate and retry the request.
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
resp, err = c.Session.Request(ctx, opts, request, response)
if err != nil && resp != nil {
// try to reauth
@ -100,7 +100,7 @@ func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{
// This function is useful when you have a session that is already
// authenticated, but you need to make a request without triggering
// a re-authentication.
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
// Make the request without re-authenticating
resp, err = c.Session.Request(ctx, opts, request, response)
return resp, err
@ -161,6 +161,6 @@ func newRequestError(Status string, Text string) *RequestError {
}
// newErr orf makes a new error from sprintf parameters.
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
func newRequestErrorf(Status string, Text string, Parameters ...any) *RequestError {
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
}

View File

@ -476,7 +476,7 @@ func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID st
// CopyDocByItemID copies a document by its item ID.
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
// putting name in info doesn't work. extension does work so assume this is a bug in the endpoint
values := map[string]any{
"info_to_update": map[string]any{},
}
@ -733,8 +733,8 @@ type DocumentUpdateResponse struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
OperationID interface{} `json:"operation_id"`
Document *Document `json:"document"`
OperationID any `json:"operation_id"`
Document *Document `json:"document"`
} `json:"results"`
}
@ -765,9 +765,9 @@ type Document struct {
IsWritable bool `json:"is_writable"`
IsHidden bool `json:"is_hidden"`
} `json:"file_flags"`
LastOpenedTime int64 `json:"lastOpenedTime"`
RestorePath interface{} `json:"restorePath"`
HasChainedParent bool `json:"hasChainedParent"`
LastOpenedTime int64 `json:"lastOpenedTime"`
RestorePath any `json:"restorePath"`
HasChainedParent bool `json:"hasChainedParent"`
}
// DriveID returns the drive ID of the Document.

View File

@ -3,13 +3,13 @@ package api
import (
"context"
"fmt"
"maps"
"net/http"
"net/url"
"slices"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
@ -35,7 +35,7 @@ type Session struct {
// }
// Request makes a request
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
func (s *Session) Request(ctx context.Context, opts rest.Opts, request any, response any) (*http.Response, error) {
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
if err != nil {
@ -129,7 +129,7 @@ func (s *Session) AuthWithToken(ctx context.Context) error {
// Validate2FACode validates the 2FA code
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
values := map[string]any{"securityCode": map[string]string{"code": code}}
body, err := IntoReader(values)
if err != nil {
return err
@ -220,9 +220,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
"Referer": fmt.Sprintf("%s/", homeEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
for k, v := range overwrite {
headers[k] = v
}
maps.Copy(headers, overwrite)
return headers
}
@ -230,9 +228,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
headers := GetCommonHeaders(map[string]string{})
headers["Cookie"] = s.GetCookieString()
for k, v := range overwrite {
headers[k] = v
}
maps.Copy(headers, overwrite)
return headers
}
@ -254,9 +250,7 @@ func GetCommonHeaders(overwrite map[string]string) map[string]string {
"Referer": fmt.Sprintf("%s/", baseEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
for k, v := range overwrite {
headers[k] = v
}
maps.Copy(headers, overwrite)
return headers
}
@ -338,33 +332,33 @@ type AccountInfo struct {
// ValidateDataDsInfo represents an validation info
type ValidateDataDsInfo struct {
HsaVersion int `json:"hsaVersion"`
LastName string `json:"lastName"`
ICDPEnabled bool `json:"iCDPEnabled"`
TantorMigrated bool `json:"tantorMigrated"`
Dsid string `json:"dsid"`
HsaEnabled bool `json:"hsaEnabled"`
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
IroncadeMigrated bool `json:"ironcadeMigrated"`
Locale string `json:"locale"`
BrZoneConsolidated bool `json:"brZoneConsolidated"`
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
IsManagedAppleID bool `json:"isManagedAppleID"`
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
Gilligvited bool `json:"gilligvited"`
AppleIDAliases []interface{} `json:"appleIdAliases"`
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
IsPaidDeveloper bool `json:"isPaidDeveloper"`
CountryCode string `json:"countryCode"`
NotificationID string `json:"notificationId"`
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
ADsID string `json:"aDsID"`
Locked bool `json:"locked"`
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
PrimaryEmail string `json:"primaryEmail"`
HsaVersion int `json:"hsaVersion"`
LastName string `json:"lastName"`
ICDPEnabled bool `json:"iCDPEnabled"`
TantorMigrated bool `json:"tantorMigrated"`
Dsid string `json:"dsid"`
HsaEnabled bool `json:"hsaEnabled"`
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
IroncadeMigrated bool `json:"ironcadeMigrated"`
Locale string `json:"locale"`
BrZoneConsolidated bool `json:"brZoneConsolidated"`
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
IsManagedAppleID bool `json:"isManagedAppleID"`
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
Gilligvited bool `json:"gilligvited"`
AppleIDAliases []any `json:"appleIdAliases"`
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
IsPaidDeveloper bool `json:"isPaidDeveloper"`
CountryCode string `json:"countryCode"`
NotificationID string `json:"notificationId"`
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
ADsID string `json:"aDsID"`
Locked bool `json:"locked"`
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
PrimaryEmail string `json:"primaryEmail"`
AppleIDEntries []struct {
IsPrimary bool `json:"isPrimary"`
Type string `json:"type"`

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"slices"
"strconv"
"time"
@ -142,12 +143,7 @@ func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
return slices.Contains(retryErrorCodes, resp.StatusCode)
}
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {

View File

@ -13,6 +13,7 @@ import (
"net/url"
"path"
"regexp"
"slices"
"strconv"
"strings"
"time"
@ -200,7 +201,7 @@ Only enable if you need to be guaranteed to be reflected after write operations.
const iaItemMaxSize int64 = 1099511627776
// metadata keys that are not writeable
var roMetadataKey = map[string]interface{}{
var roMetadataKey = map[string]any{
// do not add mtime here, it's a documented exception
"name": nil, "source": nil, "size": nil, "md5": nil,
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
@ -991,10 +992,8 @@ func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil {
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true, err
}
if slices.Contains(retryErrorCodes, resp.StatusCode) {
return true, err
}
}
// Ok, not an awserr, check for generic failure conditions
@ -1147,13 +1146,7 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
}
fileTrackers, _ := listOrString(iaFile.UpdateTrack)
trackerMatch := false
for _, v := range fileTrackers {
if v == tracker {
trackerMatch = true
break
}
}
trackerMatch := slices.Contains(fileTrackers, tracker)
if !trackerMatch {
continue
}

View File

@ -70,7 +70,7 @@ func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// MarshalJSON turns a Rfc3339Time into JSON
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
return fmt.Appendf(nil, "\"%s\"", t.String()), nil
}
// LoginToken is struct representing the login token generated in the WebUI
@ -165,25 +165,25 @@ type DeviceRegistrationResponse struct {
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause any `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// TrashResponse is returned when emptying the Trash

View File

@ -193,7 +193,7 @@ func (o *Object) set(e *entity) {
// Call linkbox with the query in opts and return result
//
// This will be checked for error and an error will be returned if Status != 1
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result any) error {
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err)

View File

@ -1046,7 +1046,7 @@ you can try to change the output.`,
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
switch name {
case "noop":
if txt, ok := opt["error"]; ok {
@ -1056,7 +1056,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
return nil, errors.New(txt)
}
if _, ok := opt["echo"]; ok {
out := map[string]interface{}{}
out := map[string]any{}
out["name"] = name
out["arg"] = arg
out["opt"] = opt

View File

@ -86,7 +86,7 @@ func TestVerifyCopy(t *testing.T) {
require.NoError(t, err)
src.(*Object).fs.opt.NoCheckUpdated = true
for i := 0; i < 100; i++ {
for i := range 100 {
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
}
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)

View File

@ -63,8 +63,8 @@ type UserInfoResponse struct {
Prolong bool `json:"prolong"`
Promocodes struct {
} `json:"promocodes"`
Subscription []interface{} `json:"subscription"`
Version string `json:"version"`
Subscription []any `json:"subscription"`
Version string `json:"version"`
} `json:"billing"`
Bonuses struct {
CameraUpload bool `json:"camera_upload"`

View File

@ -901,7 +901,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
return nil, nil
case api.ListParseUnknown15:
skip := int(r.ReadPu32())
for i := 0; i < skip; i++ {
for range skip {
r.ReadPu32()
r.ReadPu32()
}
@ -1768,7 +1768,7 @@ func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOpt
func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
f.speedupGlobs = nil
f.speedupAny = false
uniqueValidPatterns := make(map[string]interface{})
uniqueValidPatterns := make(map[string]any)
for _, pattern := range strings.Split(patternString, ",") {
pattern = strings.ToLower(strings.TrimSpace(pattern))
@ -2131,10 +2131,7 @@ func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end in
if limit < 0 {
limit = size - offset
}
end = offset + limit
if end > size {
end = size
}
end = min(offset+limit, size)
partial = !(offset == 0 && end == size)
return offset, end, partial
}

View File

@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := 0; i < chunk; i++ {
for i := range chunk {
data[i] = 'A'
}
for _, test := range []struct {

View File

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"path"
"slices"
"strings"
"sync"
"time"
@ -218,11 +219,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...interface{}) {
srv.SetLogger(func(format string, v ...any) {
fs.Infof("*go-mega*", format, v...)
})
if opt.Debug {
srv.SetDebugger(func(format string, v ...interface{}) {
srv.SetDebugger(func(format string, v ...any) {
fs.Debugf("*go-mega*", format, v...)
})
}
@ -498,11 +499,8 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
if err != nil {
return false, fmt.Errorf("list failed: %w", err)
}
for _, item := range nodes {
if fn(item) {
found = true
break
}
if slices.ContainsFunc(nodes, fn) {
found = true
}
return
}
@ -1156,7 +1154,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the chunks
// FIXME do this in parallel
for id := 0; id < u.Chunks(); id++ {
for id := range u.Chunks() {
_, chunkSize, err := u.ChunkLocation(id)
if err != nil {
return fmt.Errorf("upload failed to read chunk location: %w", err)

View File

@ -29,7 +29,7 @@ func testPurgeListDeadlock(t *testing.T) {
r.Fremote.Features().Disable("Purge") // force fallback-purge
// make a lot of files to prevent it from finishing too quickly
for i := 0; i < 100; i++ {
for i := range 100 {
dst := "file" + fmt.Sprint(i) + ".txt"
r.WriteObject(ctx, dst, "hello", t1)
}

View File

@ -274,7 +274,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Command the backend to run a named commands: du and symlink
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "du":
// No arg parsing needed, the path is passed in the fs
@ -858,7 +858,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// callBackend calls NetStorage API using either rest.Call or rest.CallXML function,
// depending on whether the response is required
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response interface{}, options []fs.OpenOption) (io.ReadCloser, error) {
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response any, options []fs.OpenOption) (io.ReadCloser, error) {
opts := rest.Opts{
Method: method,
RootURL: URL,
@ -1080,7 +1080,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
}
// netStorageDuRequest performs a NetStorage du request
func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) {
URL := f.url("")
const actionHeader = "version=1&action=du&format=xml&encoding=utf-8"
duResp := &Du{}
@ -1100,7 +1100,7 @@ func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
}
// netStorageDuRequest performs a NetStorage symlink request
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (interface{}, error) {
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (any, error) {
target := url.QueryEscape(strings.TrimSuffix(dst, "/"))
actionHeader := "version=1&action=symlink&target=" + target
if modTime != nil {

View File

@ -2532,10 +2532,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
remaining := size
position := int64(0)
for remaining > 0 {
n := int64(o.fs.opt.ChunkSize)
if remaining < n {
n = remaining
}
n := min(remaining, int64(o.fs.opt.ChunkSize))
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)

View File

@ -86,7 +86,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
// Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
for i := 0; i < dataSize; i++ {
for i := range dataSize {
shift := (i * 11) % 160
shiftBytes := shift / 8
shiftBits := shift % 8

View File

@ -130,10 +130,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
require.NoError(t, err, what)
h := New()
for i := 0; i < len(in); i += blockSize {
end := i + blockSize
if end > len(in) {
end = len(in)
}
end := min(i+blockSize, len(in))
n, err := h.Write(in[i:end])
require.Equal(t, end-i, n, what)
require.NoError(t, err, what)

View File

@ -491,7 +491,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Method: "POST",
Path: "/file/move_copy.json",
}
var request interface{} = moveCopyFileData
var request any = moveCopyFileData
// use /file/rename.json if moving within the same directory
_, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
@ -564,7 +564,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Method: "POST",
Path: "/folder/move_copy.json",
}
var request interface{} = moveFolderData
var request any = moveFolderData
// use /folder/rename.json if moving within the same parent directory
if srcDirectoryID == dstDirectoryID {
@ -1042,10 +1042,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
chunkCounter := 0
for remainingBytes > 0 {
currentChunkSize := int64(o.fs.opt.ChunkSize)
if currentChunkSize > remainingBytes {
currentChunkSize = remainingBytes
}
currentChunkSize := min(int64(o.fs.opt.ChunkSize), remainingBytes)
remainingBytes -= currentChunkSize
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)

View File

@ -131,7 +131,7 @@ If it is a string or a []string it will be shown to the user
otherwise it will be JSON encoded and shown to the user like that
*/
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
opt map[string]string) (result interface{}, err error) {
opt map[string]string) (result any, err error) {
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
switch commandName {
case operationRename:
@ -159,7 +159,7 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string,
}
}
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) {
if remote == "" {
return nil, fmt.Errorf("path to object file cannot be empty")
}
@ -332,7 +332,7 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat
return uploadedParts, nil
}
func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) {
func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) {
req := objectstorage.RestoreObjectsRequest{
NamespaceName: common.String(f.opt.Namespace),
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},

View File

@ -112,7 +112,7 @@ func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType s
string(objectstorage.WorkRequestSummaryStatusCanceled),
string(objectstorage.WorkRequestStatusFailed),
},
Refresh: func() (interface{}, string, error) {
Refresh: func() (any, string, error) {
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
getWorkRequestRequest.WorkRequestId = wID
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)

View File

@ -131,7 +131,7 @@ func (o *Object) setMetaData(
contentMd5 *string,
contentType *string,
lastModified *common.SDKTime,
storageTier interface{},
storageTier any,
meta map[string]string) error {
if contentLength != nil {

View File

@ -5,6 +5,7 @@ package oracleobjectstorage
import (
"context"
"fmt"
"slices"
"strings"
"time"
@ -23,7 +24,7 @@ var refreshGracePeriod = 30 * time.Second
//
// `state` is the latest state of that object. And `err` is any error that
// may have happened while refreshing the state.
type StateRefreshFunc func() (result interface{}, state string, err error)
type StateRefreshFunc func() (result any, state string, err error)
// StateChangeConf is the configuration struct used for `WaitForState`.
type StateChangeConf struct {
@ -56,7 +57,7 @@ type StateChangeConf struct {
// reach the target state.
//
// Cancellation from the passed in context will cancel the refresh loop
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
notfoundTick := 0
@ -72,7 +73,7 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
}
type Result struct {
Result interface{}
Result any
State string
Error error
Done bool
@ -165,12 +166,9 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
}
}
for _, allowed := range conf.Pending {
if currentState == allowed {
found = true
targetOccurrence = 0
break
}
if slices.Contains(conf.Pending, currentState) {
found = true
targetOccurrence = 0
}
if !found && len(conf.Pending) > 0 {
@ -278,8 +276,8 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
// NotFoundError resource not found error
type NotFoundError struct {
LastError error
LastRequest interface{}
LastResponse interface{}
LastRequest any
LastResponse any
Message string
Retries int
}

View File

@ -990,10 +990,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
if err != nil {
return nil, err
}
free := q.Quota - q.UsedQuota
if free < 0 {
free = 0
}
free := max(q.Quota-q.UsedQuota, 0)
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
@ -1324,7 +1321,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
// sometimes pcloud leaves a half complete file on
// error, so delete it if it exists, trying a few times
for i := 0; i < 5; i++ {
for range 5 {
delObj, delErr := o.fs.NewObject(ctx, o.remote)
if delErr == nil && delObj != nil {
_ = delObj.Remove(ctx)

View File

@ -37,7 +37,7 @@ func (c *writerAt) Close() error {
}
sizeOk := false
sizeLastSeen := int64(0)
for retry := 0; retry < 5; retry++ {
for retry := range 5 {
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
obj, err := c.fs.NewObject(c.ctx, c.remote)
if err != nil {

View File

@ -71,14 +71,14 @@ type Error struct {
// ErrorDetails contains further details of api error
type ErrorDetails struct {
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
@ -168,44 +168,44 @@ type FileList struct {
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
// However, it is not generally applicable as it is only for media.
type File struct {
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
ReferenceEvents []interface{} `json:"reference_events"`
ReferenceResource interface{} `json:"reference_resource"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
Tags []interface{} `json:"tags"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
ReferenceEvents []any `json:"reference_events"`
ReferenceResource any `json:"reference_resource"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []any `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
Tags []any `json:"tags"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
}
// FileLinks includes links to file at backend
@ -235,18 +235,18 @@ type Media struct {
VideoType string `json:"video_type,omitempty"` // "mpegts"
HdrType string `json:"hdr_type,omitempty"`
} `json:"video,omitempty"`
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"` // "category_origin"
Audio interface{} `json:"audio"` // TODO: undiscovered yet
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"` // "category_origin"
Audio any `json:"audio"` // TODO: undiscovered yet
}
// FileParams includes parameters for instant open
@ -263,20 +263,20 @@ type FileParams struct {
// FileApp includes parameters for instant open
type FileApp struct {
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []interface{} `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct{} `json:"params,omitempty"` // TODO
CategoryIDs []interface{} `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct{} `json:"links,omitempty"` // TODO
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []any `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []any `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct{} `json:"params,omitempty"` // TODO
CategoryIDs []any `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct{} `json:"links,omitempty"` // TODO
}
// ------------------------------------------------------------
@ -290,27 +290,27 @@ type TaskList struct {
// Task is a basic element representing a single task such as offline download and upload
type Task struct {
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []interface{} `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []any `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource any `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
}
// TaskParams includes parameters informing status of Task

View File

@ -638,7 +638,7 @@ func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper
return c
}
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request any, response any) (resp *http.Response, err error) {
if c.captcha != nil {
token, err := c.captcha.Token(opts)
if err != nil || token == "" {

View File

@ -1232,7 +1232,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
params := url.Values{}
iVal := reflect.ValueOf(&form.MultiParts).Elem()
iTyp := iVal.Type()
for i := 0; i < iVal.NumField(); i++ {
for i := range iVal.NumField() {
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
}
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
@ -1520,7 +1520,7 @@ Result:
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "addurl":
if len(arg) != 1 {

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"slices"
"strconv"
"time"
@ -13,10 +14,8 @@ import (
)
func checkStatusCode(resp *http.Response, expected ...int) error {
for _, code := range expected {
if resp.StatusCode == code {
return nil
}
if slices.Contains(expected, resp.StatusCode) {
return nil
}
return &statusCodeError{response: resp}
}

View File

@ -332,10 +332,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
var offsetMismatch bool
buf := make([]byte, defaultChunkSize)
for clientOffset < size {
chunkSize := size - clientOffset
if chunkSize >= int64(defaultChunkSize) {
chunkSize = int64(defaultChunkSize)
}
chunkSize := min(size-clientOffset, int64(defaultChunkSize))
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
chunkStart := clientOffset
reqSize := chunkSize

View File

@ -358,7 +358,7 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
})()
ch := make(chan chunk, mu.cfg.concurrency)
for i := 0; i < mu.cfg.concurrency; i++ {
for range mu.cfg.concurrency {
mu.wg.Add(1)
go mu.readChunk(ch)
}

View File

@ -15,6 +15,7 @@ import (
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
@ -643,10 +644,8 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
return err
}
for _, removedID := range result.IDs {
if removedID == id {
return nil
}
if slices.Contains(result.IDs, id) {
return nil
}
return fmt.Errorf("file %s was not deleted successfully", id)

View File

@ -59,11 +59,7 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
if effectiveChunkSize < u.reserved {
effectiveChunkSize = u.reserved
}
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory

View File

@ -40,7 +40,7 @@ func (signer *IbmIamSigner) SignHTTP(ctx context.Context, credentials aws.Creden
return nil
}
// NoOpCredentialsProvider is needed since S3 SDK requires having credentials, eventhough authentication is happening via IBM IAM.
// NoOpCredentialsProvider is needed since S3 SDK requires having credentials, even though authentication is happening via IBM IAM.
type NoOpCredentialsProvider struct{}
// Retrieve returns mock credentials for the NoOpCredentialsProvider.

View File

@ -19,6 +19,7 @@ import (
"net/url"
"path"
"regexp"
"slices"
"sort"
"strconv"
"strings"
@ -3097,10 +3098,8 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
return true, err
}
}
for _, e := range retryErrorCodes {
if httpStatusCode == e {
return true, err
}
if slices.Contains(retryErrorCodes, httpStatusCode) {
return true, err
}
}
// Ok, not an awserr, check for generic failure conditions
@ -3230,7 +3229,7 @@ func fixupRequest(o *s3.Options, opt *Options) {
type s3logger struct{}
// Logf is expected to support the standard fmt package "verbs".
func (s3logger) Logf(classification logging.Classification, format string, v ...interface{}) {
func (s3logger) Logf(classification logging.Classification, format string, v ...any) {
switch classification {
default:
case logging.Debug:
@ -5253,7 +5252,7 @@ It doesn't return anything.
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "restore":
req := s3.RestoreObjectInput{

View File

@ -9,9 +9,9 @@ import (
// Renew allows tokens to be renewed on expiry.
type Renew struct {
ts *time.Ticker // timer indicating when it's time to renew the token
run func() error // the callback to do the renewal
done chan interface{} // channel to end the go routine
ts *time.Ticker // timer indicating when it's time to renew the token
run func() error // the callback to do the renewal
done chan any // channel to end the go routine
shutdown *sync.Once
}
@ -22,7 +22,7 @@ func NewRenew(every time.Duration, run func() error) *Renew {
r := &Renew{
ts: time.NewTicker(every),
run: run,
done: make(chan interface{}),
done: make(chan any),
shutdown: &sync.Once{},
}
go r.renewOnExpiry()

View File

@ -1313,7 +1313,7 @@ func (f *Fs) getCachedLibraries(ctx context.Context) ([]api.Library, error) {
f.librariesMutex.Lock()
defer f.librariesMutex.Unlock()
libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value interface{}, ok bool, error error) {
libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value any, ok bool, error error) {
// Load the libraries if not present in the cache
libraries, err := f.getLibraries(ctx)
if err != nil {

View File

@ -8,6 +8,7 @@ import (
"fmt"
"io"
"os/exec"
"slices"
"strings"
"time"
@ -89,7 +90,7 @@ func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
// Connect to a remote host and request the sftp subsystem via
// the 'ssh' command. This assumes that passwordless login is
// correctly configured.
ssh := append([]string(nil), s.f.opt.SSH...)
ssh := slices.Clone(s.f.opt.SSH)
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
// Allow the command a short time only to shut down

View File

@ -20,13 +20,13 @@ func TestStringLock(t *testing.T) {
inner = 100
total = outer * inner
)
for k := 0; k < outer; k++ {
for range outer {
for j := range counter {
wg.Add(1)
go func(j int) {
defer wg.Done()
ID := fmt.Sprintf("%d", j)
for i := 0; i < inner; i++ {
for range inner {
lock.Lock(ID)
n := counter[j]
time.Sleep(1 * time.Millisecond)

View File

@ -537,7 +537,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, f.ci.Transfers)
for i := 0; i < f.ci.Transfers; i++ {
for range f.ci.Transfers {
f.bufferTokens <- nil
}
}

View File

@ -57,10 +57,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method)
}
threads := f.ci.Transfers
if threads > info.MaxNumberOfThreads {
threads = info.MaxNumberOfThreads
}
threads := min(f.ci.Transfers, info.MaxNumberOfThreads)
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering

View File

@ -337,7 +337,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
// Cleanup stray files left after failed upload
for i := 0; i < 5; i++ {
for range 5 {
cleanObj, cleanErr := f.NewObject(ctx, src.Remote())
if cleanErr == nil {
cleanErr = cleanObj.Remove(ctx)

View File

@ -574,7 +574,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
RootURL: pathID,
NoResponse: true,
}
var mkdir interface{}
var mkdir any
if pathID == f.opt.RootID {
// folders at the root are syncFolders
mkdir = &api.CreateSyncFolder{

View File

@ -8,8 +8,10 @@ import (
"errors"
"fmt"
"io"
"maps"
"path"
"regexp"
"slices"
"strconv"
"strings"
"sync"
@ -417,10 +419,8 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
// If this is a swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
if swiftError.StatusCode == e {
return true, err
}
if slices.Contains(retryErrorCodes, swiftError.StatusCode) {
return true, err
}
}
// Check for generic failure conditions
@ -701,7 +701,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
if !recurse {
opts.Delimiter = '/'
}
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (any, error) {
var objects []swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
@ -1378,9 +1378,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
meta := o.headers.ObjectMetadata()
meta.SetModTime(modTime)
newHeaders := meta.ObjectHeaders()
for k, v := range newHeaders {
o.headers[k] = v
}
maps.Copy(o.headers, newHeaders)
// Include any other metadata from request
for k, v := range o.headers {
if strings.HasPrefix(k, "X-Object-") {
@ -1450,7 +1448,7 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, container string
// encoded but we need '&' encoded.
func urlEncode(str string) string {
var buf bytes.Buffer
for i := 0; i < len(str); i++ {
for i := range len(str) {
c := str[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
_ = buf.WriteByte(c)

View File

@ -82,8 +82,8 @@ type File struct {
ContentType string `json:"content_type"`
Format struct {
} `json:"format"`
DownloadTypes []interface{} `json:"download_types"`
ThumbnailInfo []interface{} `json:"thumbnail_info"`
DownloadTypes []any `json:"download_types"`
ThumbnailInfo []any `json:"thumbnail_info"`
PreviewInfo struct {
} `json:"preview_info"`
Privacy string `json:"privacy"`

View File

@ -729,7 +729,7 @@ func (o *Object) Storable() bool {
return true
}
func (o *Object) updateFileProperties(ctx context.Context, req interface{}) (err error) {
func (o *Object) updateFileProperties(ctx context.Context, req any) (err error) {
var resp *api.File
opts := rest.Opts{
@ -887,7 +887,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove implements the mandatory method fs.Object.Remove
func (o *Object) Remove(ctx context.Context) error {
for i := 0; i < 2; i++ {
for range 2 {
// First call moves the item to recycle bin, second deletes it for good
var err error
opts := rest.Opts{

View File

@ -902,7 +902,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Backward compatible to old config
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
for i := 0; i < len(opt.Remotes)-1; i++ {
for i := range len(opt.Remotes) - 1 {
opt.Remotes[i] += ":ro"
}
opt.Upstreams = opt.Remotes
@ -1045,7 +1045,7 @@ func parentDir(absPath string) string {
func multithread(num int, fn func(int)) {
var wg sync.WaitGroup
for i := 0; i < num; i++ {
for i := range num {
wg.Add(1)
i := i
go func() {

View File

@ -246,7 +246,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
return f, nil
}
func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) {
func (f *Fs) decodeError(resp *http.Response, response any) (err error) {
defer fs.CheckClose(resp.Body, &err)
body, err := io.ReadAll(resp.Body)

View File

@ -112,12 +112,8 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa
return err
}
contentLength := chunkSize
// Last chunk may be smaller
if size-offset < contentLength {
contentLength = size - offset
}
contentLength := min(size-offset, chunkSize)
endOffset := offset + contentLength - 1

View File

@ -185,7 +185,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
if err != nil {
return nil, err
}
reqData := map[string]interface{}{
reqData := map[string]any{
"Username": ca.user,
"Password": ca.pass,
"Address": ca.endpoint,

View File

@ -23,20 +23,20 @@ type ResourceInfoRequestOptions struct {
// ResourceInfoResponse struct is returned by the API for metadata requests.
type ResourceInfoResponse struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Created string `json:"created"`
CustomProperties map[string]interface{} `json:"custom_properties"`
Preview string `json:"preview"`
PublicURL string `json:"public_url"`
OriginPath string `json:"origin_path"`
Modified string `json:"modified"`
Path string `json:"path"`
Md5 string `json:"md5"`
ResourceType string `json:"type"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
Embedded *ResourceListResponse `json:"_embedded"`
PublicKey string `json:"public_key"`
Name string `json:"name"`
Created string `json:"created"`
CustomProperties map[string]any `json:"custom_properties"`
Preview string `json:"preview"`
PublicURL string `json:"public_url"`
OriginPath string `json:"origin_path"`
Modified string `json:"modified"`
Path string `json:"path"`
Md5 string `json:"md5"`
ResourceType string `json:"type"`
MimeType string `json:"mime_type"`
Size int64 `json:"size"`
Embedded *ResourceListResponse `json:"_embedded"`
}
// ResourceListResponse struct
@ -64,7 +64,7 @@ type AsyncStatus struct {
// CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
type CustomPropertyResponse struct {
CustomProperties map[string]interface{} `json:"custom_properties"`
CustomProperties map[string]any `json:"custom_properties"`
}
// SortMode struct - sort mode

View File

@ -1024,7 +1024,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
}
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
rcm := map[string]interface{}{
rcm := map[string]any{
property: value,
}
cpr := api.CustomPropertyResponse{CustomProperties: rcm}

View File

@ -82,7 +82,7 @@ Note to run these commands on a running backend then see
return err
}
// Run the command
var out interface{}
var out any
switch name {
case "help":
return showHelp(fsInfo)

View File

@ -10,7 +10,7 @@ import (
)
// Names comprises a set of file names
type Names map[string]interface{}
type Names map[string]any
// ToNames converts string slice to a set of names
func ToNames(list []string) Names {

View File

@ -627,7 +627,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
testFunc := func() {
src := filepath.Join(b.dataDir, "file7.txt")
for i := 0; i < 50; i++ {
for i := range 50 {
dst := "file" + fmt.Sprint(i) + ".txt"
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
if err != nil {
@ -1606,7 +1606,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
s = pathReplacer.Replace(strings.TrimSpace(s))
// Apply regular expression replacements
for i := 0; i < len(repFrom); i++ {
for i := range repFrom {
s = repFrom[i].ReplaceAllString(s, repTo[i])
}
s = strings.TrimSpace(s)
@ -1621,7 +1621,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
// Sort consecutive groups of naturally unordered lines.
// Any such group must end before the log ends or it might be lost.
absorbed := false
for i := 0; i < len(dampers); i++ {
for i := range dampers {
match := false
if s != "" && !absorbed {
match = hoppers[i].MatchString(s)
@ -1869,7 +1869,7 @@ func fileType(fileName string) string {
}
// logPrintf prints a message to stdout and to the test log
func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
func (b *bisyncTest) logPrintf(text string, args ...any) {
line := fmt.Sprintf(text, args...)
fs.Log(nil, line)
if b.logFile != nil {
@ -1936,7 +1936,7 @@ func ctxNoDsStore(ctx context.Context, t *testing.T) (context.Context, *filter.F
return ctxNoDsStore, fi
}
func checkError(t *testing.T, err error, msgAndArgs ...interface{}) {
func checkError(t *testing.T, err error, msgAndArgs ...any) {
if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
t.Skipf("Skip test because remote cannot upload empty files")
}

View File

@ -12,7 +12,7 @@ import (
"github.com/rclone/rclone/lib/terminal"
)
func (b *bisyncRun) indentf(tag, file, format string, args ...interface{}) {
func (b *bisyncRun) indentf(tag, file, format string, args ...any) {
b.indent(tag, file, fmt.Sprintf(format, args...))
}

View File

@ -524,7 +524,7 @@ func (b *bisyncRun) testFn() {
}
}
func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, retryable bool) {
func (b *bisyncRun) handleErr(o any, msg string, err error, critical, retryable bool) {
if err != nil {
if retryable {
b.retryable = true
@ -624,7 +624,7 @@ func (b *bisyncRun) debugFn(nametocheck string, fn func()) {
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
const individualWait = 1 * time.Second
for i := 0; i < int(totalWait/individualWait); i++ {
for i := range int(totalWait / individualWait) {
ok = fn()
if ok {
return ok

View File

@ -73,7 +73,7 @@ rclone.org website.`,
// Write the flags page
var buf bytes.Buffer
cmd.Root.SetOutput(&buf)
cmd.Root.SetOut(&buf)
cmd.Root.SetArgs([]string{"help", "flags"})
cmd.GeneratingDocs = true
err = cmd.Root.Execute()

View File

@ -15,6 +15,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/buildinfo"
)
@ -31,9 +32,7 @@ func checkRcloneBinaryVersion(t *testing.T) error {
cmd := exec.Command("rclone", "rc", "--loopback", "core/version")
stdout, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to get rclone version: %w", err)
}
require.NoError(t, err)
var parsed versionInfo
if err := json.Unmarshal(stdout, &parsed); err != nil {
@ -185,6 +184,11 @@ func skipE2eTestIfNecessary(t *testing.T) {
t.Skip("Skipping due to short mode.")
}
// TODO(#7984): Port e2e tests to `fstest` framework.
if *fstest.RemoteName != "" {
t.Skip("Skipping because fstest remote was specified.")
}
// TODO: Support e2e tests on Windows. Need to evaluate the semantics of the
// HOME and PATH environment variables.
switch runtime.GOOS {

View File

@ -7,7 +7,7 @@
// (Tracked in [issue #7625].)
//
// 1. ✅ Minimal support for the [external special remote protocol]. Tested on
// "local" and "drive" backends.
// "local", "drive", and "dropbox" backends.
// 2. Add support for the ASYNC protocol extension. This may improve performance.
// 3. Support the [simple export interface]. This will enable `git-annex
// export` functionality.
@ -28,11 +28,13 @@ import (
"io"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@ -267,21 +269,30 @@ func (s *server) handleInitRemote() error {
return fmt.Errorf("failed to get configs: %w", err)
}
remoteRootFs, err := cache.Get(context.TODO(), fmt.Sprintf("%s:", s.configRcloneRemoteName))
if err != nil {
s.sendMsg("INITREMOTE-FAILURE failed to open root directory of rclone remote")
return fmt.Errorf("failed to open root directory of rclone remote: %w", err)
// Explicitly check that a remote with the given name exists. If we just
// relied on `cache.Get()` to return `fs.ErrorNotFoundInConfigFile`, this
// function would incorrectly succeed when the given remote name is actually
// a file path.
//
// The :local: backend does not correspond to a remote named by the rclone
// config, but is permitted to enable testing. Technically, a user might hit
// this code path, but it would be a strange choice because git-annex
// natively supports a "directory" special remote.
trimmedName := strings.TrimSuffix(s.configRcloneRemoteName, ":")
if s.configRcloneRemoteName != ":local" {
var remoteExists bool
if slices.Contains(config.FileSections(), trimmedName) {
remoteExists = true
}
if !remoteExists {
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
return fmt.Errorf("remote does not exist: %s", s.configRcloneRemoteName)
}
}
if !remoteRootFs.Features().CanHaveEmptyDirectories {
s.sendMsg("INITREMOTE-FAILURE this rclone remote does not support empty directories")
return fmt.Errorf("rclone remote does not support empty directories")
}
if err := operations.Mkdir(context.TODO(), remoteRootFs, s.configPrefix); err != nil {
s.sendMsg("INITREMOTE-FAILURE failed to mkdir")
return fmt.Errorf("failed to mkdir: %w", err)
}
s.configRcloneRemoteName = trimmedName + ":"
s.sendMsg("INITREMOTE-SUCCESS")
return nil

View File

@ -2,25 +2,24 @@ package gitannex
import (
"bufio"
"crypto/sha256"
"context"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"testing"
"time"
// Without this import, the local filesystem backend would be unavailable.
// It looks unused, but the act of importing it runs its `init()` function.
_ "github.com/rclone/rclone/backend/local"
// Without this import, the various backends would be unavailable. It looks
// unused, but the act of importing runs the package's `init()` function.
_ "github.com/rclone/rclone/backend/all"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -256,9 +255,9 @@ type testState struct {
mockStdinW *io.PipeWriter
mockStdoutReader *bufio.Reader
localFsDir string
configPath string
remoteName string
fstestRun *fstest.Run
remoteName string
remotePrefix string
}
func makeTestState(t *testing.T) testState {
@ -276,6 +275,10 @@ func makeTestState(t *testing.T) testState {
}
}
func (h *testState) requireRemoteIsEmpty() {
h.fstestRun.CheckRemoteItems(h.t)
}
func (h *testState) requireReadLineExact(line string) {
receivedLine, err := h.mockStdoutReader.ReadString('\n')
require.NoError(h.t, err)
@ -296,21 +299,106 @@ func (h *testState) requireWriteLine(line string) {
// Preconfigure the handle. This enables the calling test to skip the PREPARE
// handshake.
func (h *testState) preconfigureServer() {
h.server.configPrefix = h.localFsDir
h.server.configRcloneRemoteName = h.remoteName
h.server.configPrefix = h.remotePrefix
h.server.configRcloneLayout = string(layoutModeNodir)
h.server.configsDone = true
}
// getUniqueRemoteName returns a valid remote name derived from the given test's
// name. This is necessary because when a test registers a second remote with
// the same name, the original remote appears to take precedence. This function
// is injective, so each test gets a unique remote name. Returned strings
// contain no spaces.
func getUniqueRemoteName(t *testing.T) string {
// Using sha256 as a hack to ensure injectivity without adding a global
// variable.
return fmt.Sprintf("remote-%x", sha256.Sum256([]byte(t.Name())))
// Drop-in replacement for `filepath.Rel()` that works around a Windows-specific
// quirk when one of the paths begins with `\\?\` or `//?/`. It seems that
// fstest gives us paths with this prefix on Windows, which throws a wrench in
// the gitannex tests that need to construct relative paths from absolute paths.
// For a demonstration, see `TestWindowsFilepathRelQuirk` below.
//
// The `\\?\` prefix tells Windows APIs to pass strings unmodified to the
// filesystem without additional parsing [1]. Our workaround is roughly to add
// the prefix to whichever parameter doesn't have it (when the OS is Windows).
// I'm not sure this generalizes, but it works for the the kinds of inputs we're
// throwing at it.
//
// [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
func relativeFilepathWorkaround(basepath, targpath string) (string, error) {
if runtime.GOOS != "windows" {
return filepath.Rel(basepath, targpath)
}
// Canonicalize paths to use backslashes.
basepath = filepath.Clean(basepath)
targpath = filepath.Clean(targpath)
const winFilePrefixDisableStringParsing = `\\?\`
baseHasPrefix := strings.HasPrefix(basepath, winFilePrefixDisableStringParsing)
targHasPrefix := strings.HasPrefix(targpath, winFilePrefixDisableStringParsing)
if baseHasPrefix && !targHasPrefix {
targpath = winFilePrefixDisableStringParsing + targpath
}
if !baseHasPrefix && targHasPrefix {
basepath = winFilePrefixDisableStringParsing + basepath
}
return filepath.Rel(basepath, targpath)
}
func TestWindowsFilepathRelQuirk(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip()
}
t.Run("filepathRelQuirk", func(t *testing.T) {
var err error
_, err = filepath.Rel(`C:\foo`, `\\?\C:\foo\bar`)
require.Error(t, err)
_, err = filepath.Rel(`C:/foo`, `//?/C:/foo/bar`)
require.Error(t, err)
_, err = filepath.Rel(`\\?\C:\foo`, `C:\foo\bar`)
require.Error(t, err)
_, err = filepath.Rel(`//?/C:/foo`, `C:/foo/bar`)
require.Error(t, err)
path, err := filepath.Rel(`\\?\C:\foo`, `\\?\C:\foo\bar`)
require.NoError(t, err)
require.Equal(t, path, `bar`)
path, err = filepath.Rel(`//?/C:/foo`, `//?/C:/foo/bar`)
require.NoError(t, err)
require.Equal(t, path, `bar`)
})
t.Run("fstestAndTempDirHaveDifferentPrefixes", func(t *testing.T) {
r := fstest.NewRun(t)
p := r.Flocal.Root()
require.True(t, strings.HasPrefix(p, `//?/`))
tempDir := t.TempDir()
require.False(t, strings.HasPrefix(tempDir, `//?/`))
require.False(t, strings.HasPrefix(tempDir, `\\?\`))
})
t.Run("workaroundWorks", func(t *testing.T) {
path, err := relativeFilepathWorkaround(`C:\foo`, `\\?\C:\foo\bar`)
require.NoError(t, err)
require.Equal(t, path, "bar")
path, err = relativeFilepathWorkaround(`C:/foo`, `//?/C:/foo/bar`)
require.NoError(t, err)
require.Equal(t, path, "bar")
path, err = relativeFilepathWorkaround(`\\?\C:\foo`, `C:\foo\bar`)
require.NoError(t, err)
require.Equal(t, path, `bar`)
path, err = relativeFilepathWorkaround(`//?/C:/foo`, `C:/foo/bar`)
require.NoError(t, err)
require.Equal(t, path, `bar`)
path, err = relativeFilepathWorkaround(`\\?\C:\foo`, `\\?\C:\foo\bar`)
require.NoError(t, err)
require.Equal(t, path, `bar`)
})
}
type testCase struct {
@ -319,8 +407,8 @@ type testCase struct {
expectedError string
}
// These test cases run against the "local" backend.
var localBackendTestCases = []testCase{
// These test cases run against a backend selected by the `-remote` flag.
var fstestTestCases = []testCase{
{
label: "HandlesInit",
testProtocolFunc: func(t *testing.T, h *testState) {
@ -368,27 +456,86 @@ var localBackendTestCases = []testCase{
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
if !h.server.extensionInfo {
t.Errorf("expected INFO extension to be enabled")
return
}
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE " + h.remoteName)
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.localFsDir)
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
require.Equal(t, h.server.configPrefix, h.localFsDir)
require.Equal(t, h.server.configPrefix, h.remotePrefix)
require.True(t, h.server.configsDone)
require.NoError(t, h.mockStdinW.Close())
},
},
{
label: "HandlesPrepareWithNonexistentRemote",
testProtocolFunc: func(t *testing.T, h *testState) {
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE thisRemoteDoesNotExist")
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, "thisRemoteDoesNotExist")
require.Equal(t, h.server.configPrefix, h.remotePrefix)
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist: thisRemoteDoesNotExist")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist: thisRemoteDoesNotExist",
},
{
label: "HandlesPrepareWithPathAsRemote",
testProtocolFunc: func(t *testing.T, h *testState) {
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remotePrefix)
require.Equal(t, h.server.configPrefix, "/foo")
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
require.Regexp(t,
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist: "),
h.requireReadLine(),
)
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist:",
},
{
label: "HandlesPrepareWithSynonyms",
testProtocolFunc: func(t *testing.T, h *testState) {
@ -396,10 +543,7 @@ var localBackendTestCases = []testCase{
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
if !h.server.extensionInfo {
t.Errorf("expected INFO extension to be enabled")
return
}
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
@ -409,13 +553,13 @@ var localBackendTestCases = []testCase{
h.requireWriteLine("VALUE " + h.remoteName)
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.localFsDir)
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
require.Equal(t, h.server.configPrefix, h.localFsDir)
require.Equal(t, h.server.configPrefix, h.remotePrefix)
require.True(t, h.server.configsDone)
require.NoError(t, h.mockStdinW.Close())
@ -428,21 +572,18 @@ var localBackendTestCases = []testCase{
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
if !h.server.extensionInfo {
t.Errorf("expected INFO extension to be enabled")
return
}
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
remoteNameWithSpaces := fmt.Sprintf(" %s ", h.remoteName)
localFsDirWithSpaces := fmt.Sprintf(" %s\t", h.localFsDir)
prefixWithWhitespace := fmt.Sprintf(" %s\t", h.remotePrefix)
h.requireWriteLine(fmt.Sprintf("VALUE %s", remoteNameWithSpaces))
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine(fmt.Sprintf("VALUE %s", localFsDirWithSpaces))
h.requireWriteLine(fmt.Sprintf("VALUE %s", prefixWithWhitespace))
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE")
@ -452,7 +593,7 @@ var localBackendTestCases = []testCase{
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, remoteNameWithSpaces)
require.Equal(t, h.server.configPrefix, localFsDirWithSpaces)
require.Equal(t, h.server.configPrefix, prefixWithWhitespace)
require.True(t, h.server.configsDone)
require.NoError(t, h.mockStdinW.Close())
@ -639,20 +780,25 @@ var localBackendTestCases = []testCase{
h.requireReadLineExact("INITREMOTE-SUCCESS")
// Create temp file for transfer with an absolute path.
fileToTransfer := filepath.Join(t.TempDir(), "file.txt")
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
require.FileExists(t, fileToTransfer)
require.True(t, filepath.IsAbs(fileToTransfer))
item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
require.True(t, filepath.IsAbs(absPath))
// Specify an absolute path to transfer.
h.requireWriteLine("TRANSFER STORE KeyAbsolute " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE KeyAbsolute " + absPath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE KeyAbsolute")
require.FileExists(t, filepath.Join(h.localFsDir, "KeyAbsolute"))
// Check that the file was transferred.
remoteItem := fstest.NewItem("KeyAbsolute", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
// Transfer the same absolute path a second time, but with a different key.
h.requireWriteLine("TRANSFER STORE KeyAbsolute2 " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE KeyAbsolute2 " + absPath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE KeyAbsolute2")
require.FileExists(t, filepath.Join(h.localFsDir, "KeyAbsolute2"))
// Check that the same file was transferred to a new name.
remoteItem2 := fstest.NewItem("KeyAbsolute2", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem, remoteItem2)
h.requireWriteLine("CHECKPRESENT KeyAbsolute2")
h.requireReadLineExact("CHECKPRESENT-SUCCESS KeyAbsolute2")
@ -668,30 +814,36 @@ var localBackendTestCases = []testCase{
{
label: "TransferStoreRelative",
testProtocolFunc: func(t *testing.T, h *testState) {
h.preconfigureServer()
// Save the current working directory so we can restore it when this
// test ends.
cwd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(t.TempDir()))
tempDir := t.TempDir()
require.NoError(t, os.Chdir(tempDir))
t.Cleanup(func() { require.NoError(t, os.Chdir(cwd)) })
h.preconfigureServer()
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-SUCCESS")
// Create temp file for transfer with a relative path.
fileToTransfer := "file.txt"
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
require.FileExists(t, fileToTransfer)
require.False(t, filepath.IsAbs(fileToTransfer))
item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
relativePath, err := relativeFilepathWorkaround(tempDir, absPath)
require.NoError(t, err)
require.False(t, filepath.IsAbs(relativePath))
require.FileExists(t, relativePath)
// Specify a relative path to transfer.
h.requireWriteLine("TRANSFER STORE KeyRelative " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE KeyRelative " + relativePath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE KeyRelative")
require.FileExists(t, filepath.Join(h.localFsDir, "KeyRelative"))
remoteItem := fstest.NewItem("KeyRelative", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireWriteLine("CHECKPRESENT KeyRelative")
h.requireReadLineExact("CHECKPRESENT-SUCCESS KeyRelative")
@ -710,7 +862,8 @@ var localBackendTestCases = []testCase{
cwd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(t.TempDir()))
tempDir := t.TempDir()
require.NoError(t, os.Chdir(tempDir))
t.Cleanup(func() { require.NoError(t, os.Chdir(cwd)) })
h.preconfigureServer()
@ -720,15 +873,19 @@ var localBackendTestCases = []testCase{
h.requireReadLineExact("INITREMOTE-SUCCESS")
// Create temp file for transfer.
fileToTransfer := "filename with spaces.txt"
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
require.FileExists(t, fileToTransfer)
require.False(t, filepath.IsAbs(fileToTransfer))
item := h.fstestRun.WriteFile("filename with spaces.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
relativePath, err := relativeFilepathWorkaround(tempDir, absPath)
require.NoError(t, err)
require.False(t, filepath.IsAbs(relativePath))
require.FileExists(t, relativePath)
// Specify a relative path to transfer.
h.requireWriteLine("TRANSFER STORE KeyRelative " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE KeyRelative " + relativePath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE KeyRelative")
require.FileExists(t, filepath.Join(h.localFsDir, "KeyRelative"))
remoteItem := fstest.NewItem("KeyRelative", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireWriteLine("CHECKPRESENT KeyRelative")
h.requireReadLineExact("CHECKPRESENT-SUCCESS KeyRelative")
@ -745,8 +902,9 @@ var localBackendTestCases = []testCase{
h.preconfigureServer()
// Create temp file for transfer.
fileToTransfer := filepath.Join(t.TempDir(), "file.txt")
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
require.True(t, filepath.IsAbs(absPath))
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
@ -756,10 +914,11 @@ var localBackendTestCases = []testCase{
h.requireReadLineExact("CHECKPRESENT-FAILURE KeyThatDoesNotExist")
// Specify an absolute path to transfer.
require.True(t, filepath.IsAbs(fileToTransfer))
h.requireWriteLine("TRANSFER STORE KeyAbsolute " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE KeyAbsolute " + absPath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE KeyAbsolute")
require.FileExists(t, filepath.Join(h.localFsDir, "KeyAbsolute"))
remoteItem := fstest.NewItem("KeyAbsolute", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
require.NoError(t, h.mockStdinW.Close())
},
@ -781,8 +940,9 @@ var localBackendTestCases = []testCase{
h.preconfigureServer()
// Create temp file for transfer.
fileToTransfer := filepath.Join(t.TempDir(), "file.txt")
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
require.True(t, filepath.IsAbs(absPath))
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
@ -791,9 +951,11 @@ var localBackendTestCases = []testCase{
h.requireWriteLine("CHECKPRESENT foo")
h.requireReadLineExact("CHECKPRESENT-FAILURE foo")
h.requireWriteLine("TRANSFER STORE foo " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE foo " + absPath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE foo")
require.FileExists(t, filepath.Join(h.localFsDir, "foo"))
remoteItem := fstest.NewItem("foo", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireWriteLine("CHECKPRESENT foo")
h.requireReadLineExact("CHECKPRESENT-SUCCESS foo")
@ -807,8 +969,9 @@ var localBackendTestCases = []testCase{
h.preconfigureServer()
// Create temp file for transfer.
fileToTransfer := filepath.Join(t.TempDir(), "file.txt")
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
require.True(t, filepath.IsAbs(absPath))
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
@ -817,10 +980,11 @@ var localBackendTestCases = []testCase{
realisticKey := "SHA256E-s1048576--7ba87e06b9b7903cfbaf4a38736766c161e3e7b42f06fe57f040aa410a8f0701.this-is-a-test-key"
// Specify an absolute path to transfer.
require.True(t, filepath.IsAbs(fileToTransfer))
h.requireWriteLine(fmt.Sprintf("TRANSFER STORE %s %s", realisticKey, fileToTransfer))
h.requireWriteLine(fmt.Sprintf("TRANSFER STORE %s %s", realisticKey, absPath))
h.requireReadLineExact("TRANSFER-SUCCESS STORE " + realisticKey)
require.FileExists(t, filepath.Join(h.localFsDir, realisticKey))
remoteItem := fstest.NewItem(realisticKey, "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireWriteLine("CHECKPRESENT " + realisticKey)
h.requireReadLineExact("CHECKPRESENT-SUCCESS " + realisticKey)
@ -849,27 +1013,36 @@ var localBackendTestCases = []testCase{
h.preconfigureServer()
// Create temp file for transfer.
fileToTransfer := filepath.Join(t.TempDir(), "file.txt")
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
require.True(t, filepath.IsAbs(absPath))
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-SUCCESS")
// Specify an absolute path to transfer.
require.True(t, filepath.IsAbs(fileToTransfer))
h.requireWriteLine("TRANSFER STORE SomeKey " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE SomeKey " + absPath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE SomeKey")
require.FileExists(t, filepath.Join(h.localFsDir, "SomeKey"))
remoteItem := fstest.NewItem("SomeKey", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireWriteLine("CHECKPRESENT SomeKey")
h.requireReadLineExact("CHECKPRESENT-SUCCESS SomeKey")
retrievedFilePath := fileToTransfer + ".retrieved"
require.NoFileExists(t, retrievedFilePath)
h.fstestRun.CheckLocalItems(t,
fstest.NewItem("file.txt", "HELLO", item.ModTime),
)
retrievedFilePath := absPath + ".retrieved"
h.requireWriteLine("TRANSFER RETRIEVE SomeKey " + retrievedFilePath)
h.requireReadLineExact("TRANSFER-SUCCESS RETRIEVE SomeKey")
require.FileExists(t, retrievedFilePath)
h.fstestRun.CheckLocalItems(t,
fstest.NewItem("file.txt", "HELLO", item.ModTime),
fstest.NewItem("file.txt.retrieved", "HELLO", item.ModTime),
)
require.NoError(t, h.mockStdinW.Close())
},
@ -879,11 +1052,13 @@ var localBackendTestCases = []testCase{
testProtocolFunc: func(t *testing.T, h *testState) {
h.preconfigureServer()
ctx := context.WithoutCancel(context.Background())
// Write a file into the remote without using the git-annex
// protocol.
remoteFilePath := filepath.Join(h.localFsDir, "SomeKey")
require.NoError(t, os.WriteFile(remoteFilePath, []byte("HELLO"), 0600))
require.FileExists(t, remoteFilePath)
remoteItem := h.fstestRun.WriteObject(ctx, "SomeKey", "HELLO", time.Now())
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
@ -891,15 +1066,18 @@ var localBackendTestCases = []testCase{
h.requireWriteLine("CHECKPRESENT SomeKey")
h.requireReadLineExact("CHECKPRESENT-SUCCESS SomeKey")
require.FileExists(t, remoteFilePath)
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireWriteLine("REMOVE SomeKey")
h.requireReadLineExact("REMOVE-SUCCESS SomeKey")
require.NoFileExists(t, remoteFilePath)
h.requireRemoteIsEmpty()
h.requireWriteLine("CHECKPRESENT SomeKey")
h.requireReadLineExact("CHECKPRESENT-FAILURE SomeKey")
require.NoFileExists(t, remoteFilePath)
h.requireRemoteIsEmpty()
require.NoError(t, h.mockStdinW.Close())
},
@ -910,8 +1088,9 @@ var localBackendTestCases = []testCase{
h.preconfigureServer()
// Create temp file for transfer.
fileToTransfer := filepath.Join(t.TempDir(), "file.txt")
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
item := h.fstestRun.WriteFile("file.txt", "HELLO", time.Now())
absPath := filepath.Join(h.fstestRun.Flocal.Root(), item.Path)
require.True(t, filepath.IsAbs(absPath))
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
@ -921,17 +1100,19 @@ var localBackendTestCases = []testCase{
h.requireReadLineExact("CHECKPRESENT-FAILURE SomeKey")
// Specify an absolute path to transfer.
require.True(t, filepath.IsAbs(fileToTransfer))
h.requireWriteLine("TRANSFER STORE SomeKey " + fileToTransfer)
h.requireWriteLine("TRANSFER STORE SomeKey " + absPath)
h.requireReadLineExact("TRANSFER-SUCCESS STORE SomeKey")
require.FileExists(t, filepath.Join(h.localFsDir, "SomeKey"))
remoteItem := fstest.NewItem("SomeKey", "HELLO", item.ModTime)
h.fstestRun.CheckRemoteItems(t, remoteItem)
h.requireWriteLine("CHECKPRESENT SomeKey")
h.requireReadLineExact("CHECKPRESENT-SUCCESS SomeKey")
h.requireWriteLine("REMOVE SomeKey")
h.requireReadLineExact("REMOVE-SUCCESS SomeKey")
require.NoFileExists(t, filepath.Join(h.localFsDir, "SomeKey"))
h.requireRemoteIsEmpty()
h.requireWriteLine("CHECKPRESENT SomeKey")
h.requireReadLineExact("CHECKPRESENT-FAILURE SomeKey")
@ -944,10 +1125,6 @@ var localBackendTestCases = []testCase{
testProtocolFunc: func(t *testing.T, h *testState) {
h.preconfigureServer()
// Create temp file for transfer.
fileToTransfer := filepath.Join(t.TempDir(), "file.txt")
require.NoError(t, os.WriteFile(fileToTransfer, []byte("HELLO"), 0600))
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-SUCCESS")
@ -955,10 +1132,12 @@ var localBackendTestCases = []testCase{
h.requireWriteLine("CHECKPRESENT SomeKey")
h.requireReadLineExact("CHECKPRESENT-FAILURE SomeKey")
require.NoFileExists(t, filepath.Join(h.localFsDir, "SomeKey"))
h.requireRemoteIsEmpty()
h.requireWriteLine("REMOVE SomeKey")
h.requireReadLineExact("REMOVE-SUCCESS SomeKey")
require.NoFileExists(t, filepath.Join(h.localFsDir, "SomeKey"))
h.requireRemoteIsEmpty()
h.requireWriteLine("CHECKPRESENT SomeKey")
h.requireReadLineExact("CHECKPRESENT-FAILURE SomeKey")
@ -983,44 +1162,40 @@ var localBackendTestCases = []testCase{
},
}
func TestGitAnnexLocalBackendCases(t *testing.T) {
for _, testCase := range localBackendTestCases {
// Clear global state left behind by tests that chdir to a temp directory.
cache.Clear()
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
// Run fstest-compatible test cases with backend selected by `-remote`.
func TestGitAnnexFstestBackendCases(t *testing.T) {
for _, testCase := range fstestTestCases {
// TODO: Remove this when rclone requires a Go version >= 1.22. Future
// versions of Go fix the semantics of capturing a range variable.
// https://go.dev/blog/loopvar-preview
testCase := testCase
t.Run(testCase.label, func(t *testing.T) {
tempDir := t.TempDir()
r := fstest.NewRun(t)
t.Cleanup(func() { r.Finalise() })
// Create temp dir for an rclone remote pointing at local filesystem.
localFsDir := filepath.Join(tempDir, "remoteTarget")
require.NoError(t, os.Mkdir(localFsDir, 0700))
// Parse the fstest-provided remote string. It might have a path!
remoteName, remotePath, err := fspath.SplitFs(r.FremoteName)
require.NoError(t, err)
// Create temp config
remoteName := getUniqueRemoteName(t)
configLines := []string{
fmt.Sprintf("[%s]", remoteName),
"type = local",
fmt.Sprintf("remote = %s", localFsDir),
// The gitannex command requires the `rcloneremotename` is the name
// of a remote or exactly ":local", so the empty string will not
// suffice.
if remoteName == "" {
require.True(t, r.Fremote.Features().IsLocal)
remoteName = ":local"
}
configContents := strings.Join(configLines, "\n")
configPath := filepath.Join(tempDir, "rclone.conf")
require.NoError(t, os.WriteFile(configPath, []byte(configContents), 0600))
require.NoError(t, config.SetConfigPath(configPath))
// The custom config file will be ignored unless we install the
// global config file handler.
configfile.Install()
handle := makeTestState(t)
handle.localFsDir = localFsDir
handle.configPath = configPath
handle.fstestRun = r
handle.remoteName = remoteName
handle.remotePrefix = remotePath
var wg sync.WaitGroup
wg.Add(1)
@ -1042,54 +1217,3 @@ func TestGitAnnexLocalBackendCases(t *testing.T) {
})
}
}
// Configure the git-annex client with a mockfs backend and send it the
// "INITREMOTE" command over mocked stdin. This should fail because mockfs does
// not support empty directories.
func TestGitAnnexHandleInitRemoteBackendDoesNotSupportEmptyDirectories(t *testing.T) {
tempDir := t.TempDir()
// Temporarily override the filesystem registry.
oldRegistry := fs.Registry
mockfs.Register()
defer func() { fs.Registry = oldRegistry }()
// Create temp dir for an rclone remote pointing at local filesystem.
localFsDir := filepath.Join(tempDir, "remoteTarget")
require.NoError(t, os.Mkdir(localFsDir, 0700))
// Create temp config
remoteName := getUniqueRemoteName(t)
configLines := []string{
fmt.Sprintf("[%s]", remoteName),
"type = mockfs",
fmt.Sprintf("remote = %s", localFsDir),
}
configContents := strings.Join(configLines, "\n")
configPath := filepath.Join(tempDir, "rclone.conf")
require.NoError(t, os.WriteFile(configPath, []byte(configContents), 0600))
// The custom config file will be ignored unless we install the global
// config file handler.
configfile.Install()
require.NoError(t, config.SetConfigPath(configPath))
handle := makeTestState(t)
handle.server.configPrefix = localFsDir
handle.server.configRcloneRemoteName = remoteName
handle.server.configsDone = true
var wg sync.WaitGroup
wg.Add(1)
go func() {
require.NotNil(t, handle.server.run())
wg.Done()
}()
defer wg.Wait()
handle.requireReadLineExact("VERSION 1")
handle.requireWriteLine("INITREMOTE")
handle.requireReadLineExact("INITREMOTE-FAILURE this rclone remote does not support empty directories")
}

View File

@ -3,6 +3,8 @@ package gitannex
import (
"fmt"
"strings"
"github.com/rclone/rclone/fs/fspath"
)
type layoutMode string
@ -39,8 +41,11 @@ func parseLayoutMode(mode string) layoutMode {
type queryDirhashFunc func(msg string) (string, error)
func buildFsString(queryDirhash queryDirhashFunc, mode layoutMode, key, remoteName, prefix string) (string, error) {
remoteName = strings.TrimSuffix(remoteName, ":") + ":"
remoteString := fspath.JoinRootPath(remoteName, prefix)
if mode == layoutModeNodir {
return fmt.Sprintf("%s:%s", remoteName, prefix), nil
return remoteString, nil
}
var dirhash string
@ -59,13 +64,13 @@ func buildFsString(queryDirhash queryDirhashFunc, mode layoutMode, key, remoteNa
switch mode {
case layoutModeLower:
return fmt.Sprintf("%s:%s/%s", remoteName, prefix, dirhash), nil
return fmt.Sprintf("%s/%s", remoteString, dirhash), nil
case layoutModeDirectory:
return fmt.Sprintf("%s:%s/%s%s", remoteName, prefix, dirhash, key), nil
return fmt.Sprintf("%s/%s%s", remoteString, dirhash, key), nil
case layoutModeMixed:
return fmt.Sprintf("%s:%s/%s", remoteName, prefix, dirhash), nil
return fmt.Sprintf("%s/%s", remoteString, dirhash), nil
case layoutModeFrankencase:
return fmt.Sprintf("%s:%s/%s", remoteName, prefix, strings.ToLower(dirhash)), nil
return fmt.Sprintf("%s/%s", remoteString, strings.ToLower(dirhash)), nil
default:
panic("unreachable")
}

View File

@ -51,7 +51,7 @@ var helpCommand = &cobra.Command{
Short: Root.Short,
Long: Root.Long,
Run: func(command *cobra.Command, args []string) {
Root.SetOutput(os.Stdout)
Root.SetOut(os.Stdout)
_ = Root.Usage()
},
}
@ -85,7 +85,7 @@ var helpFlags = &cobra.Command{
} else if len(args) > 0 {
Root.SetUsageTemplate(filterFlagsMultiGroupTemplate)
}
Root.SetOutput(os.Stdout)
Root.SetOut(os.Stdout)
}
_ = command.Usage()
},
@ -106,7 +106,7 @@ var helpBackend = &cobra.Command{
Short: "List full info about a backend",
Run: func(command *cobra.Command, args []string) {
if len(args) == 0 {
Root.SetOutput(os.Stdout)
Root.SetOut(os.Stdout)
_ = command.Usage()
return
}
@ -273,7 +273,7 @@ func showBackends() {
fmt.Printf(" rclone help backend <name>\n")
}
func quoteString(v interface{}) string {
func quoteString(v any) string {
switch v.(type) {
case string:
return fmt.Sprintf("%q", v)

View File

@ -78,7 +78,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
fs.Debugf(f, "Mounting on %q", mountpoint)
if opt.DebugFUSE {
fuse.Debug = func(msg interface{}) {
fuse.Debug = func(msg any) {
fs.Debugf("fuse", "%v", msg)
}
}

Some files were not shown because too many files have changed in this diff Show More