mirror of
https://github.com/rclone/rclone.git
synced 2025-04-19 18:31:10 +08:00
build: modernize Go usage
This commit modernizes Go usage. This was done with: go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./... Then files needed to be `go fmt`ed and a few comments needed to be restored. The modernizations include replacing - if/else conditional assignment by a call to the built-in min or max functions added in go1.21 - sort.Slice(x, func(i, j int) bool) { return s[i] < s[j] } by a call to slices.Sort(s), added in go1.21 - interface{} by the 'any' type added in go1.18 - append([]T(nil), s...) by slices.Clone(s) or slices.Concat(s), added in go1.21 - loop around an m[k]=v map update by a call to one of the Collect, Copy, Clone, or Insert functions from the maps package, added in go1.21 - []byte(fmt.Sprintf...) by fmt.Appendf(nil, ...), added in go1.19 - append(s[:i], s[i+1]...) by slices.Delete(s, i, i+1), added in go1.21 - a 3-clause for i := 0; i < n; i++ {} loop by for i := range n {}, added in go1.22
This commit is contained in:
parent
431386085f
commit
401cf81034
@ -19,6 +19,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -681,10 +682,8 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
return true, err
|
||||
}
|
||||
statusCode := storageErr.StatusCode
|
||||
for _, e := range retryErrorCodes {
|
||||
if statusCode == e {
|
||||
return true, err
|
||||
}
|
||||
if slices.Contains(retryErrorCodes, statusCode) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
|
@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
||||
|
||||
func randomString(charCount int) string {
|
||||
strBldr := strings.Builder{}
|
||||
for i := 0; i < charCount; i++ {
|
||||
for range charCount {
|
||||
randPos := rand.Int63n(52)
|
||||
strBldr.WriteByte(chars[randPos])
|
||||
}
|
||||
|
@ -130,10 +130,10 @@ type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -589,12 +590,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
for _, capability := range f.info.Allowed.Capabilities {
|
||||
if capability == permission {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
@ -1275,7 +1271,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
for range f.ci.Transfers {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
@ -1939,7 +1935,7 @@ func init() {
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
for i := range len(in) {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
@ -2260,7 +2256,7 @@ See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
},
|
||||
}
|
||||
|
||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
var newRule api.LifecycleRule
|
||||
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
@ -2349,7 +2345,7 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
},
|
||||
}
|
||||
|
||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
maxAge := defaultMaxAge
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
@ -2372,7 +2368,7 @@ it would do.
|
||||
`,
|
||||
}
|
||||
|
||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
return nil, f.cleanUp(ctx, true, false, 0)
|
||||
}
|
||||
|
||||
@ -2391,7 +2387,7 @@ var commandHelp = []fs.CommandHelp{
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "lifecycle":
|
||||
return f.lifecycleCommand(ctx, name, arg, opt)
|
||||
|
@ -478,17 +478,14 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||
remaining = up.size
|
||||
)
|
||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||
for part := 0; part < up.parts; part++ {
|
||||
for part := range up.parts {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
reqSize := min(remaining, up.chunkSize)
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
|
@ -237,8 +237,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
|
||||
signingHeaders := map[string]any{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
return signingHeaders
|
||||
@ -1343,12 +1343,8 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
nextStreamPosition = streamPosition
|
||||
|
||||
for {
|
||||
limit := f.opt.ListChunk
|
||||
|
||||
// box only allows a max of 500 events
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
limit := min(f.opt.ListChunk, 500)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
|
@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
for tries = range maxTries {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < session.TotalParts; part++ {
|
||||
for part := range session.TotalParts {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
@ -211,10 +211,7 @@ outer:
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
reqSize := min(remaining, chunkSize)
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@ -1092,7 +1092,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(entries); i++ {
|
||||
for i := range entries {
|
||||
innerDir, ok := entries[i].(fs.Directory)
|
||||
if ok {
|
||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||
@ -1428,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
}()
|
||||
|
||||
// wait until both are done
|
||||
for c := 0; c < 2; c++ {
|
||||
for range 2 {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
@ -1753,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
// Stats returns stats about the cache storage
|
||||
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
||||
return f.cache.Stats()
|
||||
}
|
||||
|
||||
@ -1933,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
||||
switch name {
|
||||
case "stats":
|
||||
return f.Stats()
|
||||
|
16
backend/cache/cache_internal_test.go
vendored
16
backend/cache/cache_internal_test.go
vendored
@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
for i := range checkSample {
|
||||
require.Equal(t, testData[i], checkSample[i])
|
||||
}
|
||||
}
|
||||
@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(readData); i++ {
|
||||
for i := range readData {
|
||||
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
||||
}
|
||||
}
|
||||
@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
|
||||
for i := 0; i < 4; i++ { // read first 4
|
||||
for i := range 4 { // read first 4
|
||||
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
for range int(cnt) {
|
||||
data := randStringBytes(int(chunk))
|
||||
_, _ = f.Write(data)
|
||||
}
|
||||
@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
||||
var err error
|
||||
var l []interface{}
|
||||
var l []any
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
var err error
|
||||
var state cache.BackgroundUploadState
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
select {
|
||||
case state = <-buCh:
|
||||
// continue
|
||||
@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
|
||||
|
||||
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
for range maxRetries {
|
||||
err = block()
|
||||
if err == nil {
|
||||
return nil
|
||||
|
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
for i := range totalFiles {
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
|
4
backend/cache/handle.go
vendored
4
backend/cache/handle.go
vendored
@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < r.workers; i++ {
|
||||
for i := range r.workers {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
|
8
backend/cache/plex.go
vendored
8
backend/cache/plex.go
vendored
@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
var data map[string]any
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %w", err)
|
||||
@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
func get(m any, path ...any) (any, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if mm, ok := m.(map[string]any); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
@ -285,7 +285,7 @@ func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if mm, ok := m.([]any); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
|
8
backend/cache/storage_persistent.go
vendored
8
backend/cache/storage_persistent.go
vendored
@ -607,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
}
|
||||
|
||||
// Stats returns a go map with the stats key values
|
||||
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||
r := make(map[string]map[string]interface{})
|
||||
r["data"] = make(map[string]interface{})
|
||||
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
||||
r := make(map[string]map[string]any)
|
||||
r["data"] = make(map[string]any)
|
||||
r["data"]["oldest-ts"] = time.Now()
|
||||
r["data"]["oldest-file"] = ""
|
||||
r["data"]["newest-ts"] = time.Now()
|
||||
r["data"]["newest-file"] = ""
|
||||
r["data"]["total-chunks"] = 0
|
||||
r["data"]["total-size"] = int64(0)
|
||||
r["files"] = make(map[string]interface{})
|
||||
r["files"] = make(map[string]any)
|
||||
r["files"]["oldest-ts"] = time.Now()
|
||||
r["files"]["oldest-name"] = ""
|
||||
r["files"]["newest-ts"] = time.Now()
|
||||
|
@ -632,7 +632,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
|
||||
|
||||
// forbidChunk prints error message or raises error if file is chunk.
|
||||
// First argument sets log prefix, use `false` to suppress message.
|
||||
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
|
||||
func (f *Fs) forbidChunk(o any, filePath string) error {
|
||||
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
||||
if f.opt.FailHard {
|
||||
return fmt.Errorf("chunk overlap with %q", parentPath)
|
||||
@ -680,7 +680,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
||||
circleSec := unixSec % closestPrimeZzzzSeconds
|
||||
first4chars := strconv.FormatInt(circleSec, 36)
|
||||
|
||||
for tries := 0; tries < maxTransactionProbes; tries++ {
|
||||
for range maxTransactionProbes {
|
||||
f.xactIDMutex.Lock()
|
||||
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
||||
f.xactIDMutex.Unlock()
|
||||
@ -1189,10 +1189,7 @@ func (f *Fs) put(
|
||||
}
|
||||
|
||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
||||
size := c.sizeLeft
|
||||
if size > c.chunkSize {
|
||||
size = c.chunkSize
|
||||
}
|
||||
size := min(c.sizeLeft, c.chunkSize)
|
||||
savedReadCount := c.readCount
|
||||
|
||||
// If a single chunk is expected, avoid the extra rename operation
|
||||
@ -1477,10 +1474,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
||||
const bufLen = 1048576 // 1 MiB
|
||||
buf := make([]byte, bufLen)
|
||||
for size > 0 {
|
||||
n := size
|
||||
if n > bufLen {
|
||||
n = bufLen
|
||||
}
|
||||
n := min(size, bufLen)
|
||||
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
})
|
||||
}
|
||||
|
||||
type settings map[string]interface{}
|
||||
type settings map[string]any
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
|
@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
c.buffers.New = func() any {
|
||||
return new([blockSize]byte)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
||||
|
||||
// but we'll augment it with the nameKey for real calculation
|
||||
for i := 0; i < len(c.nameKey); i++ {
|
||||
for i := range len(c.nameKey) {
|
||||
dir += int(c.nameKey[i])
|
||||
}
|
||||
|
||||
@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
}
|
||||
|
||||
// add the nameKey to get the real rotate distance
|
||||
for i := 0; i < len(c.nameKey); i++ {
|
||||
for i := range len(c.nameKey) {
|
||||
dir += int(c.nameKey[i])
|
||||
}
|
||||
|
||||
@ -664,7 +664,7 @@ func (n *nonce) increment() {
|
||||
// add a uint64 to the nonce
|
||||
func (n *nonce) add(x uint64) {
|
||||
carry := uint16(0)
|
||||
for i := 0; i < 8; i++ {
|
||||
for i := range 8 {
|
||||
digit := (*n)[i]
|
||||
xDigit := byte(x)
|
||||
x >>= 8
|
||||
|
@ -1307,10 +1307,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
end := len(ciphertext)
|
||||
if underlyingLimit >= 0 {
|
||||
end = int(underlyingOffset + underlyingLimit)
|
||||
if end > len(ciphertext) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
@ -1490,7 +1487,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test truncating the file at each possible point
|
||||
for i := 0; i < len(file16)-1; i++ {
|
||||
for i := range len(file16) - 1 {
|
||||
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
||||
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
|
@ -924,7 +924,7 @@ Usage Example:
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "decode":
|
||||
out := make([]string, 0, len(arg))
|
||||
|
@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
|
||||
}
|
||||
length := len(buf)
|
||||
padding := n - (length % n)
|
||||
for i := 0; i < padding; i++ {
|
||||
for range padding {
|
||||
buf = append(buf, byte(padding))
|
||||
}
|
||||
if (len(buf) % n) != 0 {
|
||||
@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
|
||||
if padding == 0 {
|
||||
return nil, ErrorPaddingTooShort
|
||||
}
|
||||
for i := 0; i < padding; i++ {
|
||||
for i := range padding {
|
||||
if buf[length-1-i] != byte(padding) {
|
||||
return nil, ErrorPaddingNotAllTheSame
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -199,12 +200,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(scopes, scopePrefix+"drive.appfolder")
|
||||
}
|
||||
|
||||
func driveOAuthOptions() []fs.Option {
|
||||
@ -958,12 +954,7 @@ func parseDrivePath(path string) (root string, err error) {
|
||||
type listFn func(*drive.File) bool
|
||||
|
||||
func containsString(slice []string, s string) bool {
|
||||
for _, e := range slice {
|
||||
if e == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(slice, s)
|
||||
}
|
||||
|
||||
// getFile returns drive.File for the ID passed and fields passed in
|
||||
@ -1152,13 +1143,7 @@ OUTER:
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
if title != "" && title != item.Name {
|
||||
found := false
|
||||
for _, stem := range stems {
|
||||
if stem == item.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(stems, item.Name)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
@ -1561,13 +1546,10 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
|
||||
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
for _, space := range info.Spaces {
|
||||
if space == "photos" {
|
||||
info.Md5Checksum = ""
|
||||
info.Sha1Checksum = ""
|
||||
info.Sha256Checksum = ""
|
||||
break
|
||||
}
|
||||
if slices.Contains(info.Spaces, "photos") {
|
||||
info.Md5Checksum = ""
|
||||
info.Sha1Checksum = ""
|
||||
info.Sha256Checksum = ""
|
||||
}
|
||||
}
|
||||
o := &Object{
|
||||
@ -2245,7 +2227,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
wg.Add(1)
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
for range f.ci.Checkers {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||
}
|
||||
go func() {
|
||||
@ -2254,11 +2236,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
// if the input channel overflowed add the collected entries to the channel now
|
||||
for len(overflow) > 0 {
|
||||
mu.Lock()
|
||||
l := len(overflow)
|
||||
// only fill half of the channel to prevent entries being put into overflow again
|
||||
if l > listRInputBuffer/2 {
|
||||
l = listRInputBuffer / 2
|
||||
}
|
||||
l := min(len(overflow), listRInputBuffer/2)
|
||||
wg.Add(l)
|
||||
for _, d := range overflow[:l] {
|
||||
in <- d
|
||||
@ -2278,7 +2257,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu.Unlock()
|
||||
}()
|
||||
// wait until the all workers to finish
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
for range f.ci.Checkers {
|
||||
e := <-out
|
||||
mu.Lock()
|
||||
// if one worker returns an error early, close the input so all other workers exit
|
||||
@ -3914,7 +3893,7 @@ Third delete all orphaned files to the trash
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "get":
|
||||
out := make(map[string]string)
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -324,9 +325,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
metadata := make(fs.Metadata, 16)
|
||||
|
||||
// Dump user metadata first as it overrides system metadata
|
||||
for k, v := range info.Properties {
|
||||
metadata[k] = v
|
||||
}
|
||||
maps.Copy(metadata, info.Properties)
|
||||
|
||||
// System metadata
|
||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
||||
|
@ -177,10 +177,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
if start >= rx.ContentLength {
|
||||
break
|
||||
}
|
||||
reqSize = rx.ContentLength - start
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
} else {
|
||||
// If size unknown read into buffer
|
||||
|
@ -55,10 +55,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
for len(p) > 0 {
|
||||
d.writtenMore = true
|
||||
toWrite := bytesPerBlock - d.n
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
toWrite := min(bytesPerBlock-d.n, len(p))
|
||||
_, err = d.blockHash.Write(p[:toWrite])
|
||||
if err != nil {
|
||||
panic(hashReturnedError)
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
for i := range chunk {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
|
@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
|
||||
|
||||
// fields returns the JSON fields in use by opt as a | separated
|
||||
// string.
|
||||
func fields(opt interface{}) (pipeTags string, err error) {
|
||||
func fields(opt any) (pipeTags string, err error) {
|
||||
var tags []string
|
||||
def := reflect.ValueOf(opt)
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
for i := range def.NumField() {
|
||||
field := defType.Field(i)
|
||||
tag, ok := field.Tag.Lookup("json")
|
||||
if !ok {
|
||||
@ -239,7 +239,7 @@ func fields(opt interface{}) (pipeTags string, err error) {
|
||||
|
||||
// mustFields returns the JSON fields in use by opt as a | separated
|
||||
// string. It panics on failure.
|
||||
func mustFields(opt interface{}) string {
|
||||
func mustFields(opt any) string {
|
||||
tags, err := fields(opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -351,12 +351,12 @@ type SpaceInfo struct {
|
||||
// DeleteResponse is returned from doDeleteFile
|
||||
type DeleteResponse struct {
|
||||
Status
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []interface{} `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []any `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
|
@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
}
|
||||
|
||||
// params for rpc
|
||||
type params map[string]interface{}
|
||||
type params map[string]any
|
||||
|
||||
// rpc calls the rpc.php method of the SME file fabric
|
||||
//
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -169,11 +170,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type settings map[string]interface{}
|
||||
type settings map[string]any
|
||||
|
||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||
|
@ -4,6 +4,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -119,7 +120,7 @@ func (as *albums) _del(album *api.Album) {
|
||||
dirs := as.path[dir]
|
||||
for i, dir := range dirs {
|
||||
if dir == leaf {
|
||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||
dirs = slices.Delete(dirs, i, i+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
||||
Method: "GET",
|
||||
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
|
||||
}
|
||||
var openIDconfig map[string]interface{}
|
||||
var openIDconfig map[string]any
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@ -448,7 +448,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
"token_type_hint": []string{"access_token"},
|
||||
},
|
||||
}
|
||||
var res interface{}
|
||||
var res any
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "drop":
|
||||
return nil, f.db.Stop(true)
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -195,9 +196,7 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
||||
r.Fp = op.fp
|
||||
}
|
||||
|
||||
for hashType, hashVal := range op.hashes {
|
||||
r.Hashes[hashType] = hashVal
|
||||
}
|
||||
maps.Copy(r.Hashes, op.hashes)
|
||||
if data, err = r.encode(op.key); err != nil {
|
||||
return fmt.Errorf("marshal failed: %w", err)
|
||||
}
|
||||
|
@ -52,10 +52,7 @@ func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *ui
|
||||
total := len(p)
|
||||
nullBytes := make([]byte, blockSize)
|
||||
for len(p) > 0 {
|
||||
toWrite := int(blockSize - *bytesInBlock)
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
toWrite := min(int(blockSize-*bytesInBlock), len(p))
|
||||
c, err := writer.Write(p[:toWrite])
|
||||
*bytesInBlock += uint32(c)
|
||||
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
||||
@ -276,7 +273,7 @@ func (h *hidriveHash) Sum(b []byte) []byte {
|
||||
}
|
||||
|
||||
checksum := zeroSum
|
||||
for i := 0; i < len(h.levels); i++ {
|
||||
for i := range h.levels {
|
||||
level := h.levels[i]
|
||||
if i < len(h.levels)-1 {
|
||||
// Aggregate non-empty non-final levels.
|
||||
|
@ -216,7 +216,7 @@ func TestLevelWrite(t *testing.T) {
|
||||
func TestLevelIsFull(t *testing.T) {
|
||||
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel()
|
||||
for i := 0; i < 256; i++ {
|
||||
for range 256 {
|
||||
assert.False(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.Equal(t, len(content), written)
|
||||
|
@ -505,7 +505,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < checkers; i++ {
|
||||
for range checkers {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@ -740,7 +740,7 @@ It doesn't return anything.
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
|
@ -76,7 +76,7 @@ func (c *Client) DriveService() (*DriveService, error) {
|
||||
// This function is the main entry point for making requests to the iCloud
|
||||
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
||||
// reauthenticate and retry the request.
|
||||
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
if err != nil && resp != nil {
|
||||
// try to reauth
|
||||
@ -100,7 +100,7 @@ func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{
|
||||
// This function is useful when you have a session that is already
|
||||
// authenticated, but you need to make a request without triggering
|
||||
// a re-authentication.
|
||||
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
|
||||
// Make the request without re-authenticating
|
||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||
return resp, err
|
||||
@ -161,6 +161,6 @@ func newRequestError(Status string, Text string) *RequestError {
|
||||
}
|
||||
|
||||
// newErr orf makes a new error from sprintf parameters.
|
||||
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
|
||||
func newRequestErrorf(Status string, Text string, Parameters ...any) *RequestError {
|
||||
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
||||
}
|
||||
|
@ -733,8 +733,8 @@ type DocumentUpdateResponse struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
ErrorMessage string `json:"error_message"`
|
||||
} `json:"status"`
|
||||
OperationID interface{} `json:"operation_id"`
|
||||
Document *Document `json:"document"`
|
||||
OperationID any `json:"operation_id"`
|
||||
Document *Document `json:"document"`
|
||||
} `json:"results"`
|
||||
}
|
||||
|
||||
@ -765,9 +765,9 @@ type Document struct {
|
||||
IsWritable bool `json:"is_writable"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
} `json:"file_flags"`
|
||||
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||
RestorePath interface{} `json:"restorePath"`
|
||||
HasChainedParent bool `json:"hasChainedParent"`
|
||||
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||
RestorePath any `json:"restorePath"`
|
||||
HasChainedParent bool `json:"hasChainedParent"`
|
||||
}
|
||||
|
||||
// DriveID returns the drive ID of the Document.
|
||||
|
@ -3,13 +3,13 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
@ -35,7 +35,7 @@ type Session struct {
|
||||
// }
|
||||
|
||||
// Request makes a request
|
||||
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
|
||||
func (s *Session) Request(ctx context.Context, opts rest.Opts, request any, response any) (*http.Response, error) {
|
||||
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
||||
if err != nil {
|
||||
@ -129,7 +129,7 @@ func (s *Session) AuthWithToken(ctx context.Context) error {
|
||||
|
||||
// Validate2FACode validates the 2FA code
|
||||
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
||||
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
|
||||
values := map[string]any{"securityCode": map[string]string{"code": code}}
|
||||
body, err := IntoReader(values)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -220,9 +220,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
|
||||
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
maps.Copy(headers, overwrite)
|
||||
return headers
|
||||
}
|
||||
|
||||
@ -230,9 +228,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
|
||||
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
||||
headers := GetCommonHeaders(map[string]string{})
|
||||
headers["Cookie"] = s.GetCookieString()
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
maps.Copy(headers, overwrite)
|
||||
return headers
|
||||
}
|
||||
|
||||
@ -254,9 +250,7 @@ func GetCommonHeaders(overwrite map[string]string) map[string]string {
|
||||
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||
}
|
||||
for k, v := range overwrite {
|
||||
headers[k] = v
|
||||
}
|
||||
maps.Copy(headers, overwrite)
|
||||
return headers
|
||||
}
|
||||
|
||||
@ -338,33 +332,33 @@ type AccountInfo struct {
|
||||
|
||||
// ValidateDataDsInfo represents an validation info
|
||||
type ValidateDataDsInfo struct {
|
||||
HsaVersion int `json:"hsaVersion"`
|
||||
LastName string `json:"lastName"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
TantorMigrated bool `json:"tantorMigrated"`
|
||||
Dsid string `json:"dsid"`
|
||||
HsaEnabled bool `json:"hsaEnabled"`
|
||||
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||
Locale string `json:"locale"`
|
||||
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||
Gilligvited bool `json:"gilligvited"`
|
||||
AppleIDAliases []interface{} `json:"appleIdAliases"`
|
||||
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||
CountryCode string `json:"countryCode"`
|
||||
NotificationID string `json:"notificationId"`
|
||||
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||
ADsID string `json:"aDsID"`
|
||||
Locked bool `json:"locked"`
|
||||
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||
PrimaryEmail string `json:"primaryEmail"`
|
||||
HsaVersion int `json:"hsaVersion"`
|
||||
LastName string `json:"lastName"`
|
||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||
TantorMigrated bool `json:"tantorMigrated"`
|
||||
Dsid string `json:"dsid"`
|
||||
HsaEnabled bool `json:"hsaEnabled"`
|
||||
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||
Locale string `json:"locale"`
|
||||
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||
Gilligvited bool `json:"gilligvited"`
|
||||
AppleIDAliases []any `json:"appleIdAliases"`
|
||||
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||
CountryCode string `json:"countryCode"`
|
||||
NotificationID string `json:"notificationId"`
|
||||
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||
ADsID string `json:"aDsID"`
|
||||
Locked bool `json:"locked"`
|
||||
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||
PrimaryEmail string `json:"primaryEmail"`
|
||||
AppleIDEntries []struct {
|
||||
IsPrimary bool `json:"isPrimary"`
|
||||
Type string `json:"type"`
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -142,12 +143,7 @@ func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(retryErrorCodes, resp.StatusCode)
|
||||
}
|
||||
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -200,7 +201,7 @@ Only enable if you need to be guaranteed to be reflected after write operations.
|
||||
const iaItemMaxSize int64 = 1099511627776
|
||||
|
||||
// metadata keys that are not writeable
|
||||
var roMetadataKey = map[string]interface{}{
|
||||
var roMetadataKey = map[string]any{
|
||||
// do not add mtime here, it's a documented exception
|
||||
"name": nil, "source": nil, "size": nil, "md5": nil,
|
||||
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
|
||||
@ -991,10 +992,8 @@ func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
|
||||
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
if slices.Contains(retryErrorCodes, resp.StatusCode) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
// Ok, not an awserr, check for generic failure conditions
|
||||
@ -1147,13 +1146,7 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
|
||||
}
|
||||
|
||||
fileTrackers, _ := listOrString(iaFile.UpdateTrack)
|
||||
trackerMatch := false
|
||||
for _, v := range fileTrackers {
|
||||
if v == tracker {
|
||||
trackerMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
trackerMatch := slices.Contains(fileTrackers, tracker)
|
||||
if !trackerMatch {
|
||||
continue
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
// MarshalJSON turns a Rfc3339Time into JSON
|
||||
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
|
||||
return fmt.Appendf(nil, "\"%s\"", t.String()), nil
|
||||
}
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
@ -165,25 +165,25 @@ type DeviceRegistrationResponse struct {
|
||||
|
||||
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
||||
type CustomerInfo struct {
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
CountryCode string `json:"country_code"`
|
||||
LanguageCode string `json:"language_code"`
|
||||
CustomerGroupCode string `json:"customer_group_code"`
|
||||
BrandCode string `json:"brand_code"`
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Quota int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQuota int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
WebHash string `json:"web_hash"`
|
||||
AndroidHash string `json:"android_hash"`
|
||||
IOSHash string `json:"ios_hash"`
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
CountryCode string `json:"country_code"`
|
||||
LanguageCode string `json:"language_code"`
|
||||
CustomerGroupCode string `json:"customer_group_code"`
|
||||
BrandCode string `json:"brand_code"`
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Quota int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQuota int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause any `json:"locked_cause"`
|
||||
WebHash string `json:"web_hash"`
|
||||
AndroidHash string `json:"android_hash"`
|
||||
IOSHash string `json:"ios_hash"`
|
||||
}
|
||||
|
||||
// TrashResponse is returned when emptying the Trash
|
||||
|
@ -193,7 +193,7 @@ func (o *Object) set(e *entity) {
|
||||
// Call linkbox with the query in opts and return result
|
||||
//
|
||||
// This will be checked for error and an error will be returned if Status != 1
|
||||
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
|
||||
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result any) error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
|
@ -1046,7 +1046,7 @@ you can try to change the output.`,
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
||||
switch name {
|
||||
case "noop":
|
||||
if txt, ok := opt["error"]; ok {
|
||||
@ -1056,7 +1056,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
return nil, errors.New(txt)
|
||||
}
|
||||
if _, ok := opt["echo"]; ok {
|
||||
out := map[string]interface{}{}
|
||||
out := map[string]any{}
|
||||
out["name"] = name
|
||||
out["arg"] = arg
|
||||
out["opt"] = opt
|
||||
|
@ -86,7 +86,7 @@ func TestVerifyCopy(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
src.(*Object).fs.opt.NoCheckUpdated = true
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
|
||||
}
|
||||
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)
|
||||
|
@ -63,8 +63,8 @@ type UserInfoResponse struct {
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
Subscription []interface{} `json:"subscription"`
|
||||
Version string `json:"version"`
|
||||
Subscription []any `json:"subscription"`
|
||||
Version string `json:"version"`
|
||||
} `json:"billing"`
|
||||
Bonuses struct {
|
||||
CameraUpload bool `json:"camera_upload"`
|
||||
|
@ -901,7 +901,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
|
||||
return nil, nil
|
||||
case api.ListParseUnknown15:
|
||||
skip := int(r.ReadPu32())
|
||||
for i := 0; i < skip; i++ {
|
||||
for range skip {
|
||||
r.ReadPu32()
|
||||
r.ReadPu32()
|
||||
}
|
||||
@ -1768,7 +1768,7 @@ func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOpt
|
||||
func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
f.speedupGlobs = nil
|
||||
f.speedupAny = false
|
||||
uniqueValidPatterns := make(map[string]interface{})
|
||||
uniqueValidPatterns := make(map[string]any)
|
||||
|
||||
for _, pattern := range strings.Split(patternString, ",") {
|
||||
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
||||
@ -2131,10 +2131,7 @@ func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end in
|
||||
if limit < 0 {
|
||||
limit = size - offset
|
||||
}
|
||||
end = offset + limit
|
||||
if end > size {
|
||||
end = size
|
||||
}
|
||||
end = min(offset+limit, size)
|
||||
partial = !(offset == 0 && end == size)
|
||||
return offset, end, partial
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
for i := range chunk {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -218,11 +219,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
srv.SetLogger(func(format string, v ...any) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
if opt.Debug {
|
||||
srv.SetDebugger(func(format string, v ...interface{}) {
|
||||
srv.SetDebugger(func(format string, v ...any) {
|
||||
fs.Debugf("*go-mega*", format, v...)
|
||||
})
|
||||
}
|
||||
@ -498,11 +499,8 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list failed: %w", err)
|
||||
}
|
||||
for _, item := range nodes {
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.ContainsFunc(nodes, fn) {
|
||||
found = true
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -1156,7 +1154,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload the chunks
|
||||
// FIXME do this in parallel
|
||||
for id := 0; id < u.Chunks(); id++ {
|
||||
for id := range u.Chunks() {
|
||||
_, chunkSize, err := u.ChunkLocation(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upload failed to read chunk location: %w", err)
|
||||
|
@ -29,7 +29,7 @@ func testPurgeListDeadlock(t *testing.T) {
|
||||
r.Fremote.Features().Disable("Purge") // force fallback-purge
|
||||
|
||||
// make a lot of files to prevent it from finishing too quickly
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
dst := "file" + fmt.Sprint(i) + ".txt"
|
||||
r.WriteObject(ctx, dst, "hello", t1)
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// Command the backend to run a named commands: du and symlink
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "du":
|
||||
// No arg parsing needed, the path is passed in the fs
|
||||
@ -858,7 +858,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
|
||||
// callBackend calls NetStorage API using either rest.Call or rest.CallXML function,
|
||||
// depending on whether the response is required
|
||||
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response interface{}, options []fs.OpenOption) (io.ReadCloser, error) {
|
||||
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response any, options []fs.OpenOption) (io.ReadCloser, error) {
|
||||
opts := rest.Opts{
|
||||
Method: method,
|
||||
RootURL: URL,
|
||||
@ -1080,7 +1080,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
|
||||
}
|
||||
|
||||
// netStorageDuRequest performs a NetStorage du request
|
||||
func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
|
||||
func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) {
|
||||
URL := f.url("")
|
||||
const actionHeader = "version=1&action=du&format=xml&encoding=utf-8"
|
||||
duResp := &Du{}
|
||||
@ -1100,7 +1100,7 @@ func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
|
||||
}
|
||||
|
||||
// netStorageDuRequest performs a NetStorage symlink request
|
||||
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (interface{}, error) {
|
||||
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (any, error) {
|
||||
target := url.QueryEscape(strings.TrimSuffix(dst, "/"))
|
||||
actionHeader := "version=1&action=symlink&target=" + target
|
||||
if modTime != nil {
|
||||
|
@ -2532,10 +2532,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
for remaining > 0 {
|
||||
n := int64(o.fs.opt.ChunkSize)
|
||||
if remaining < n {
|
||||
n = remaining
|
||||
}
|
||||
n := min(remaining, int64(o.fs.opt.ChunkSize))
|
||||
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
||||
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
|
||||
|
@ -86,7 +86,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
for i := 0; i < dataSize; i++ {
|
||||
for i := range dataSize {
|
||||
shift := (i * 11) % 160
|
||||
shiftBytes := shift / 8
|
||||
shiftBits := shift % 8
|
||||
|
@ -130,10 +130,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
|
||||
require.NoError(t, err, what)
|
||||
h := New()
|
||||
for i := 0; i < len(in); i += blockSize {
|
||||
end := i + blockSize
|
||||
if end > len(in) {
|
||||
end = len(in)
|
||||
}
|
||||
end := min(i+blockSize, len(in))
|
||||
n, err := h.Write(in[i:end])
|
||||
require.Equal(t, end-i, n, what)
|
||||
require.NoError(t, err, what)
|
||||
|
@ -491,7 +491,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
var request interface{} = moveCopyFileData
|
||||
var request any = moveCopyFileData
|
||||
|
||||
// use /file/rename.json if moving within the same directory
|
||||
_, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
@ -564,7 +564,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Method: "POST",
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
var request interface{} = moveFolderData
|
||||
var request any = moveFolderData
|
||||
|
||||
// use /folder/rename.json if moving within the same parent directory
|
||||
if srcDirectoryID == dstDirectoryID {
|
||||
@ -1042,10 +1042,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
chunkCounter := 0
|
||||
|
||||
for remainingBytes > 0 {
|
||||
currentChunkSize := int64(o.fs.opt.ChunkSize)
|
||||
if currentChunkSize > remainingBytes {
|
||||
currentChunkSize = remainingBytes
|
||||
}
|
||||
currentChunkSize := min(int64(o.fs.opt.ChunkSize), remainingBytes)
|
||||
remainingBytes -= currentChunkSize
|
||||
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
||||
|
||||
|
@ -131,7 +131,7 @@ If it is a string or a []string it will be shown to the user
|
||||
otherwise it will be JSON encoded and shown to the user like that
|
||||
*/
|
||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
opt map[string]string) (result interface{}, err error) {
|
||||
opt map[string]string) (result any, err error) {
|
||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||
switch commandName {
|
||||
case operationRename:
|
||||
@ -159,7 +159,7 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
|
||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) {
|
||||
if remote == "" {
|
||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||
}
|
||||
@ -332,7 +332,7 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) {
|
||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) {
|
||||
req := objectstorage.RestoreObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||
|
@ -112,7 +112,7 @@ func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType s
|
||||
string(objectstorage.WorkRequestSummaryStatusCanceled),
|
||||
string(objectstorage.WorkRequestStatusFailed),
|
||||
},
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
Refresh: func() (any, string, error) {
|
||||
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
|
||||
getWorkRequestRequest.WorkRequestId = wID
|
||||
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
|
||||
|
@ -131,7 +131,7 @@ func (o *Object) setMetaData(
|
||||
contentMd5 *string,
|
||||
contentType *string,
|
||||
lastModified *common.SDKTime,
|
||||
storageTier interface{},
|
||||
storageTier any,
|
||||
meta map[string]string) error {
|
||||
|
||||
if contentLength != nil {
|
||||
|
@ -5,6 +5,7 @@ package oracleobjectstorage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -23,7 +24,7 @@ var refreshGracePeriod = 30 * time.Second
|
||||
//
|
||||
// `state` is the latest state of that object. And `err` is any error that
|
||||
// may have happened while refreshing the state.
|
||||
type StateRefreshFunc func() (result interface{}, state string, err error)
|
||||
type StateRefreshFunc func() (result any, state string, err error)
|
||||
|
||||
// StateChangeConf is the configuration struct used for `WaitForState`.
|
||||
type StateChangeConf struct {
|
||||
@ -56,7 +57,7 @@ type StateChangeConf struct {
|
||||
// reach the target state.
|
||||
//
|
||||
// Cancellation from the passed in context will cancel the refresh loop
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
|
||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||
|
||||
notfoundTick := 0
|
||||
@ -72,7 +73,7 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Result interface{}
|
||||
Result any
|
||||
State string
|
||||
Error error
|
||||
Done bool
|
||||
@ -165,12 +166,9 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
||||
}
|
||||
}
|
||||
|
||||
for _, allowed := range conf.Pending {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence = 0
|
||||
break
|
||||
}
|
||||
if slices.Contains(conf.Pending, currentState) {
|
||||
found = true
|
||||
targetOccurrence = 0
|
||||
}
|
||||
|
||||
if !found && len(conf.Pending) > 0 {
|
||||
@ -278,8 +276,8 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
||||
// NotFoundError resource not found error
|
||||
type NotFoundError struct {
|
||||
LastError error
|
||||
LastRequest interface{}
|
||||
LastResponse interface{}
|
||||
LastRequest any
|
||||
LastResponse any
|
||||
Message string
|
||||
Retries int
|
||||
}
|
||||
|
@ -990,10 +990,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
free := q.Quota - q.UsedQuota
|
||||
if free < 0 {
|
||||
free = 0
|
||||
}
|
||||
free := max(q.Quota-q.UsedQuota, 0)
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
||||
@ -1324,7 +1321,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
// sometimes pcloud leaves a half complete file on
|
||||
// error, so delete it if it exists, trying a few times
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
||||
if delErr == nil && delObj != nil {
|
||||
_ = delObj.Remove(ctx)
|
||||
|
@ -37,7 +37,7 @@ func (c *writerAt) Close() error {
|
||||
}
|
||||
sizeOk := false
|
||||
sizeLastSeen := int64(0)
|
||||
for retry := 0; retry < 5; retry++ {
|
||||
for retry := range 5 {
|
||||
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
|
||||
obj, err := c.fs.NewObject(c.ctx, c.remote)
|
||||
if err != nil {
|
||||
|
@ -71,14 +71,14 @@ type Error struct {
|
||||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
@ -168,44 +168,44 @@ type FileList struct {
|
||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||
// However, it is not generally applicable as it is only for media.
|
||||
type File struct {
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Links *FileLinks `json:"links,omitempty"`
|
||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||
Medias []*Media `json:"medias,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||
Name string `json:"name,omitempty"`
|
||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
Params *FileParams `json:"params,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
ReferenceEvents []interface{} `json:"reference_events"`
|
||||
ReferenceResource interface{} `json:"reference_resource"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
Tags []interface{} `json:"tags"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||
WebContentLink string `json:"web_content_link,omitempty"`
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Links *FileLinks `json:"links,omitempty"`
|
||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||
Medias []*Media `json:"medias,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||
Name string `json:"name,omitempty"`
|
||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
Params *FileParams `json:"params,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
ReferenceEvents []any `json:"reference_events"`
|
||||
ReferenceResource any `json:"reference_resource"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []any `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
Tags []any `json:"tags"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||
WebContentLink string `json:"web_content_link,omitempty"`
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
}
|
||||
|
||||
// FileLinks includes links to file at backend
|
||||
@ -235,18 +235,18 @@ type Media struct {
|
||||
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
||||
HdrType string `json:"hdr_type,omitempty"`
|
||||
} `json:"video,omitempty"`
|
||||
Link *Link `json:"link,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"` // "category_origin"
|
||||
Audio interface{} `json:"audio"` // TODO: undiscovered yet
|
||||
Link *Link `json:"link,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"` // "category_origin"
|
||||
Audio any `json:"audio"` // TODO: undiscovered yet
|
||||
}
|
||||
|
||||
// FileParams includes parameters for instant open
|
||||
@ -263,20 +263,20 @@ type FileParams struct {
|
||||
|
||||
// FileApp includes parameters for instant open
|
||||
type FileApp struct {
|
||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||
Access []interface{} `json:"access,omitempty"`
|
||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct{} `json:"params,omitempty"` // TODO
|
||||
CategoryIDs []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct{} `json:"links,omitempty"` // TODO
|
||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||
Access []any `json:"access,omitempty"`
|
||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
VipTypes []any `json:"vip_types,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct{} `json:"params,omitempty"` // TODO
|
||||
CategoryIDs []any `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct{} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -290,27 +290,27 @@ type TaskList struct {
|
||||
|
||||
// Task is a basic element representing a single task such as offline download and upload
|
||||
type Task struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||
ID string `json:"id,omitempty"` // task id?
|
||||
Name string `json:"name,omitempty"` // torrent name?
|
||||
Type string `json:"type,omitempty"` // "offline"
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Statuses []interface{} `json:"statuses,omitempty"` // TODO
|
||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
FileName string `json:"file_name,omitempty"`
|
||||
FileSize string `json:"file_size,omitempty"`
|
||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||
Progress int `json:"progress,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
|
||||
Space string `json:"space,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||
ID string `json:"id,omitempty"` // task id?
|
||||
Name string `json:"name,omitempty"` // torrent name?
|
||||
Type string `json:"type,omitempty"` // "offline"
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Statuses []any `json:"statuses,omitempty"` // TODO
|
||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
FileName string `json:"file_name,omitempty"`
|
||||
FileSize string `json:"file_size,omitempty"`
|
||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||
Progress int `json:"progress,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
ReferenceResource any `json:"reference_resource,omitempty"` // TODO
|
||||
Space string `json:"space,omitempty"`
|
||||
}
|
||||
|
||||
// TaskParams includes parameters informing status of Task
|
||||
|
@ -638,7 +638,7 @@ func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request any, response any) (resp *http.Response, err error) {
|
||||
if c.captcha != nil {
|
||||
token, err := c.captcha.Token(opts)
|
||||
if err != nil || token == "" {
|
||||
|
@ -1232,7 +1232,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
||||
params := url.Values{}
|
||||
iVal := reflect.ValueOf(&form.MultiParts).Elem()
|
||||
iTyp := iVal.Type()
|
||||
for i := 0; i < iVal.NumField(); i++ {
|
||||
for i := range iVal.NumField() {
|
||||
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
||||
}
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
||||
@ -1520,7 +1520,7 @@ Result:
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "addurl":
|
||||
if len(arg) != 1 {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -13,10 +14,8 @@ import (
|
||||
)
|
||||
|
||||
func checkStatusCode(resp *http.Response, expected ...int) error {
|
||||
for _, code := range expected {
|
||||
if resp.StatusCode == code {
|
||||
return nil
|
||||
}
|
||||
if slices.Contains(expected, resp.StatusCode) {
|
||||
return nil
|
||||
}
|
||||
return &statusCodeError{response: resp}
|
||||
}
|
||||
|
@ -332,10 +332,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
||||
var offsetMismatch bool
|
||||
buf := make([]byte, defaultChunkSize)
|
||||
for clientOffset < size {
|
||||
chunkSize := size - clientOffset
|
||||
if chunkSize >= int64(defaultChunkSize) {
|
||||
chunkSize = int64(defaultChunkSize)
|
||||
}
|
||||
chunkSize := min(size-clientOffset, int64(defaultChunkSize))
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
chunkStart := clientOffset
|
||||
reqSize := chunkSize
|
||||
|
@ -358,7 +358,7 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
|
||||
})()
|
||||
|
||||
ch := make(chan chunk, mu.cfg.concurrency)
|
||||
for i := 0; i < mu.cfg.concurrency; i++ {
|
||||
for range mu.cfg.concurrency {
|
||||
mu.wg.Add(1)
|
||||
go mu.readChunk(ch)
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -643,10 +644,8 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, removedID := range result.IDs {
|
||||
if removedID == id {
|
||||
return nil
|
||||
}
|
||||
if slices.Contains(result.IDs, id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("file %s was not deleted successfully", id)
|
||||
|
@ -59,11 +59,7 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
|
||||
|
||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||
|
||||
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
|
||||
|
||||
if effectiveChunkSize < u.reserved {
|
||||
effectiveChunkSize = u.reserved
|
||||
}
|
||||
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
|
||||
|
||||
if neededMemory < effectiveChunkSize {
|
||||
effectiveChunkSize = neededMemory
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -3097,10 +3098,8 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if httpStatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
if slices.Contains(retryErrorCodes, httpStatusCode) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
// Ok, not an awserr, check for generic failure conditions
|
||||
@ -3230,7 +3229,7 @@ func fixupRequest(o *s3.Options, opt *Options) {
|
||||
type s3logger struct{}
|
||||
|
||||
// Logf is expected to support the standard fmt package "verbs".
|
||||
func (s3logger) Logf(classification logging.Classification, format string, v ...interface{}) {
|
||||
func (s3logger) Logf(classification logging.Classification, format string, v ...any) {
|
||||
switch classification {
|
||||
default:
|
||||
case logging.Debug:
|
||||
@ -5253,7 +5252,7 @@ It doesn't return anything.
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "restore":
|
||||
req := s3.RestoreObjectInput{
|
||||
|
@ -9,9 +9,9 @@ import (
|
||||
|
||||
// Renew allows tokens to be renewed on expiry.
|
||||
type Renew struct {
|
||||
ts *time.Ticker // timer indicating when it's time to renew the token
|
||||
run func() error // the callback to do the renewal
|
||||
done chan interface{} // channel to end the go routine
|
||||
ts *time.Ticker // timer indicating when it's time to renew the token
|
||||
run func() error // the callback to do the renewal
|
||||
done chan any // channel to end the go routine
|
||||
shutdown *sync.Once
|
||||
}
|
||||
|
||||
@ -22,7 +22,7 @@ func NewRenew(every time.Duration, run func() error) *Renew {
|
||||
r := &Renew{
|
||||
ts: time.NewTicker(every),
|
||||
run: run,
|
||||
done: make(chan interface{}),
|
||||
done: make(chan any),
|
||||
shutdown: &sync.Once{},
|
||||
}
|
||||
go r.renewOnExpiry()
|
||||
|
@ -1313,7 +1313,7 @@ func (f *Fs) getCachedLibraries(ctx context.Context) ([]api.Library, error) {
|
||||
f.librariesMutex.Lock()
|
||||
defer f.librariesMutex.Unlock()
|
||||
|
||||
libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value interface{}, ok bool, error error) {
|
||||
libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value any, ok bool, error error) {
|
||||
// Load the libraries if not present in the cache
|
||||
libraries, err := f.getLibraries(ctx)
|
||||
if err != nil {
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -89,7 +90,7 @@ func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
||||
// Connect to a remote host and request the sftp subsystem via
|
||||
// the 'ssh' command. This assumes that passwordless login is
|
||||
// correctly configured.
|
||||
ssh := append([]string(nil), s.f.opt.SSH...)
|
||||
ssh := slices.Clone(s.f.opt.SSH)
|
||||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||
|
||||
// Allow the command a short time only to shut down
|
||||
|
@ -20,13 +20,13 @@ func TestStringLock(t *testing.T) {
|
||||
inner = 100
|
||||
total = outer * inner
|
||||
)
|
||||
for k := 0; k < outer; k++ {
|
||||
for range outer {
|
||||
for j := range counter {
|
||||
wg.Add(1)
|
||||
go func(j int) {
|
||||
defer wg.Done()
|
||||
ID := fmt.Sprintf("%d", j)
|
||||
for i := 0; i < inner; i++ {
|
||||
for range inner {
|
||||
lock.Lock(ID)
|
||||
n := counter[j]
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
|
@ -537,7 +537,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Fill up (or reset) the buffer tokens
|
||||
func (f *Fs) fillBufferTokens() {
|
||||
f.bufferTokens = make(chan []byte, f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
for range f.ci.Transfers {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
}
|
||||
|
@ -57,10 +57,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||
}
|
||||
|
||||
threads := f.ci.Transfers
|
||||
if threads > info.MaxNumberOfThreads {
|
||||
threads = info.MaxNumberOfThreads
|
||||
}
|
||||
threads := min(f.ci.Transfers, info.MaxNumberOfThreads)
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
|
@ -337,7 +337,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
|
||||
// Cleanup stray files left after failed upload
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
cleanObj, cleanErr := f.NewObject(ctx, src.Remote())
|
||||
if cleanErr == nil {
|
||||
cleanErr = cleanObj.Remove(ctx)
|
||||
|
@ -574,7 +574,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
RootURL: pathID,
|
||||
NoResponse: true,
|
||||
}
|
||||
var mkdir interface{}
|
||||
var mkdir any
|
||||
if pathID == f.opt.RootID {
|
||||
// folders at the root are syncFolders
|
||||
mkdir = &api.CreateSyncFolder{
|
||||
|
@ -8,8 +8,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"path"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -417,10 +419,8 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
}
|
||||
// If this is a swift.Error object extract the HTTP error code
|
||||
if swiftError, ok := err.(*swift.Error); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if swiftError.StatusCode == e {
|
||||
return true, err
|
||||
}
|
||||
if slices.Contains(retryErrorCodes, swiftError.StatusCode) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
// Check for generic failure conditions
|
||||
@ -701,7 +701,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
|
||||
if !recurse {
|
||||
opts.Delimiter = '/'
|
||||
}
|
||||
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (any, error) {
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@ -1378,9 +1378,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
meta := o.headers.ObjectMetadata()
|
||||
meta.SetModTime(modTime)
|
||||
newHeaders := meta.ObjectHeaders()
|
||||
for k, v := range newHeaders {
|
||||
o.headers[k] = v
|
||||
}
|
||||
maps.Copy(o.headers, newHeaders)
|
||||
// Include any other metadata from request
|
||||
for k, v := range o.headers {
|
||||
if strings.HasPrefix(k, "X-Object-") {
|
||||
@ -1450,7 +1448,7 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, container string
|
||||
// encoded but we need '&' encoded.
|
||||
func urlEncode(str string) string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(str); i++ {
|
||||
for i := range len(str) {
|
||||
c := str[i]
|
||||
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
|
||||
_ = buf.WriteByte(c)
|
||||
|
@ -82,8 +82,8 @@ type File struct {
|
||||
ContentType string `json:"content_type"`
|
||||
Format struct {
|
||||
} `json:"format"`
|
||||
DownloadTypes []interface{} `json:"download_types"`
|
||||
ThumbnailInfo []interface{} `json:"thumbnail_info"`
|
||||
DownloadTypes []any `json:"download_types"`
|
||||
ThumbnailInfo []any `json:"thumbnail_info"`
|
||||
PreviewInfo struct {
|
||||
} `json:"preview_info"`
|
||||
Privacy string `json:"privacy"`
|
||||
|
@ -729,7 +729,7 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) updateFileProperties(ctx context.Context, req interface{}) (err error) {
|
||||
func (o *Object) updateFileProperties(ctx context.Context, req any) (err error) {
|
||||
var resp *api.File
|
||||
|
||||
opts := rest.Opts{
|
||||
@ -887,7 +887,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove implements the mandatory method fs.Object.Remove
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
// First call moves the item to recycle bin, second deletes it for good
|
||||
var err error
|
||||
opts := rest.Opts{
|
||||
|
@ -902,7 +902,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
||||
for i := 0; i < len(opt.Remotes)-1; i++ {
|
||||
for i := range len(opt.Remotes) - 1 {
|
||||
opt.Remotes[i] += ":ro"
|
||||
}
|
||||
opt.Upstreams = opt.Remotes
|
||||
@ -1045,7 +1045,7 @@ func parentDir(absPath string) string {
|
||||
|
||||
func multithread(num int, fn func(int)) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < num; i++ {
|
||||
for i := range num {
|
||||
wg.Add(1)
|
||||
i := i
|
||||
go func() {
|
||||
|
@ -246,7 +246,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) {
|
||||
func (f *Fs) decodeError(resp *http.Response, response any) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
|
@ -112,12 +112,8 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa
|
||||
return err
|
||||
}
|
||||
|
||||
contentLength := chunkSize
|
||||
|
||||
// Last chunk may be smaller
|
||||
if size-offset < contentLength {
|
||||
contentLength = size - offset
|
||||
}
|
||||
contentLength := min(size-offset, chunkSize)
|
||||
|
||||
endOffset := offset + contentLength - 1
|
||||
|
||||
|
@ -185,7 +185,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqData := map[string]interface{}{
|
||||
reqData := map[string]any{
|
||||
"Username": ca.user,
|
||||
"Password": ca.pass,
|
||||
"Address": ca.endpoint,
|
||||
|
@ -23,20 +23,20 @@ type ResourceInfoRequestOptions struct {
|
||||
|
||||
// ResourceInfoResponse struct is returned by the API for metadata requests.
|
||||
type ResourceInfoResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
Preview string `json:"preview"`
|
||||
PublicURL string `json:"public_url"`
|
||||
OriginPath string `json:"origin_path"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
Md5 string `json:"md5"`
|
||||
ResourceType string `json:"type"`
|
||||
MimeType string `json:"mime_type"`
|
||||
Size int64 `json:"size"`
|
||||
Embedded *ResourceListResponse `json:"_embedded"`
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CustomProperties map[string]any `json:"custom_properties"`
|
||||
Preview string `json:"preview"`
|
||||
PublicURL string `json:"public_url"`
|
||||
OriginPath string `json:"origin_path"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
Md5 string `json:"md5"`
|
||||
ResourceType string `json:"type"`
|
||||
MimeType string `json:"mime_type"`
|
||||
Size int64 `json:"size"`
|
||||
Embedded *ResourceListResponse `json:"_embedded"`
|
||||
}
|
||||
|
||||
// ResourceListResponse struct
|
||||
@ -64,7 +64,7 @@ type AsyncStatus struct {
|
||||
|
||||
// CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
||||
type CustomPropertyResponse struct {
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
CustomProperties map[string]any `json:"custom_properties"`
|
||||
}
|
||||
|
||||
// SortMode struct - sort mode
|
||||
|
@ -1024,7 +1024,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||
rcm := map[string]interface{}{
|
||||
rcm := map[string]any{
|
||||
property: value,
|
||||
}
|
||||
cpr := api.CustomPropertyResponse{CustomProperties: rcm}
|
||||
|
@ -82,7 +82,7 @@ Note to run these commands on a running backend then see
|
||||
return err
|
||||
}
|
||||
// Run the command
|
||||
var out interface{}
|
||||
var out any
|
||||
switch name {
|
||||
case "help":
|
||||
return showHelp(fsInfo)
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// Names comprises a set of file names
|
||||
type Names map[string]interface{}
|
||||
type Names map[string]any
|
||||
|
||||
// ToNames converts string slice to a set of names
|
||||
func ToNames(list []string) Names {
|
||||
|
@ -627,7 +627,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
testFunc := func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
for i := range 50 {
|
||||
dst := "file" + fmt.Sprint(i) + ".txt"
|
||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||
if err != nil {
|
||||
@ -1606,7 +1606,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
s = pathReplacer.Replace(strings.TrimSpace(s))
|
||||
|
||||
// Apply regular expression replacements
|
||||
for i := 0; i < len(repFrom); i++ {
|
||||
for i := range repFrom {
|
||||
s = repFrom[i].ReplaceAllString(s, repTo[i])
|
||||
}
|
||||
s = strings.TrimSpace(s)
|
||||
@ -1621,7 +1621,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
// Sort consecutive groups of naturally unordered lines.
|
||||
// Any such group must end before the log ends or it might be lost.
|
||||
absorbed := false
|
||||
for i := 0; i < len(dampers); i++ {
|
||||
for i := range dampers {
|
||||
match := false
|
||||
if s != "" && !absorbed {
|
||||
match = hoppers[i].MatchString(s)
|
||||
@ -1869,7 +1869,7 @@ func fileType(fileName string) string {
|
||||
}
|
||||
|
||||
// logPrintf prints a message to stdout and to the test log
|
||||
func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
||||
func (b *bisyncTest) logPrintf(text string, args ...any) {
|
||||
line := fmt.Sprintf(text, args...)
|
||||
fs.Log(nil, line)
|
||||
if b.logFile != nil {
|
||||
@ -1936,7 +1936,7 @@ func ctxNoDsStore(ctx context.Context, t *testing.T) (context.Context, *filter.F
|
||||
return ctxNoDsStore, fi
|
||||
}
|
||||
|
||||
func checkError(t *testing.T, err error, msgAndArgs ...interface{}) {
|
||||
func checkError(t *testing.T, err error, msgAndArgs ...any) {
|
||||
if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
|
||||
t.Skipf("Skip test because remote cannot upload empty files")
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
func (b *bisyncRun) indentf(tag, file, format string, args ...interface{}) {
|
||||
func (b *bisyncRun) indentf(tag, file, format string, args ...any) {
|
||||
b.indent(tag, file, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
|
@ -524,7 +524,7 @@ func (b *bisyncRun) testFn() {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, retryable bool) {
|
||||
func (b *bisyncRun) handleErr(o any, msg string, err error, critical, retryable bool) {
|
||||
if err != nil {
|
||||
if retryable {
|
||||
b.retryable = true
|
||||
@ -624,7 +624,7 @@ func (b *bisyncRun) debugFn(nametocheck string, fn func()) {
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
|
||||
const individualWait = 1 * time.Second
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
for i := range int(totalWait / individualWait) {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
@ -282,11 +283,8 @@ func (s *server) handleInitRemote() error {
|
||||
|
||||
if s.configRcloneRemoteName != ":local" {
|
||||
var remoteExists bool
|
||||
for _, remoteName := range config.FileSections() {
|
||||
if remoteName == trimmedName {
|
||||
remoteExists = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(config.FileSections(), trimmedName) {
|
||||
remoteExists = true
|
||||
}
|
||||
if !remoteExists {
|
||||
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
|
||||
|
@ -273,7 +273,7 @@ func showBackends() {
|
||||
fmt.Printf(" rclone help backend <name>\n")
|
||||
}
|
||||
|
||||
func quoteString(v interface{}) string {
|
||||
func quoteString(v any) string {
|
||||
switch v.(type) {
|
||||
case string:
|
||||
return fmt.Sprintf("%q", v)
|
||||
|
@ -78,7 +78,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
if opt.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fuse.Debug = func(msg any) {
|
||||
fs.Debugf("fuse", "%v", msg)
|
||||
}
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ func (u *UI) Print(x, y int, style tcell.Style, msg string) {
|
||||
}
|
||||
|
||||
// Printf a string
|
||||
func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...interface{}) {
|
||||
func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...any) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
u.Print(x, y, style, s)
|
||||
}
|
||||
@ -207,7 +207,7 @@ func (u *UI) Line(x, y, xmax int, style tcell.Style, spacer rune, msg string) {
|
||||
}
|
||||
|
||||
// Linef a string
|
||||
func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...interface{}) {
|
||||
func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...any) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
u.Line(x, y, xmax, style, spacer, s)
|
||||
}
|
||||
@ -273,11 +273,7 @@ func (u *UI) Box() {
|
||||
xmax := x + boxWidth
|
||||
if len(u.boxMenu) != 0 {
|
||||
count := lineOptionLength(u.boxMenu)
|
||||
if x+boxWidth > x+count {
|
||||
xmax = x + boxWidth
|
||||
} else {
|
||||
xmax = x + count
|
||||
}
|
||||
xmax = max(x+boxWidth, x+count)
|
||||
}
|
||||
ymax := y + len(u.boxText)
|
||||
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -111,7 +112,7 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
||||
|
||||
// Entries returns a copy of the entries in the directory
|
||||
func (d *Dir) Entries() fs.DirEntries {
|
||||
return append(fs.DirEntries(nil), d.entries...)
|
||||
return slices.Clone(d.entries)
|
||||
}
|
||||
|
||||
// Remove removes the i-th entry from the
|
||||
@ -146,7 +147,7 @@ func (d *Dir) remove(i int) {
|
||||
d.size -= size
|
||||
d.count -= count
|
||||
d.countUnknownSize -= countUnknownSize
|
||||
d.entries = append(d.entries[:i], d.entries[i+1:]...)
|
||||
d.entries = slices.Delete(d.entries, i, i+1)
|
||||
|
||||
dir := d
|
||||
// populate changed size and count to parent(s)
|
||||
|
@ -40,7 +40,7 @@ func startProgress() func() {
|
||||
}
|
||||
|
||||
// Intercept output from functions such as HashLister to stdout
|
||||
operations.SyncPrintf = func(format string, a ...interface{}) {
|
||||
operations.SyncPrintf = func(format string, a ...any) {
|
||||
printProgress(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
@ -97,7 +97,7 @@ func printProgress(logMessage string) {
|
||||
out(terminal.MoveUp)
|
||||
}
|
||||
// Move to the start of the block we wrote erasing all the previous lines
|
||||
for i := 0; i < nlines-1; i++ {
|
||||
for range nlines - 1 {
|
||||
out(terminal.EraseLine)
|
||||
out(terminal.MoveUp)
|
||||
}
|
||||
|
@ -312,12 +312,12 @@ func list(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list: %w", err)
|
||||
}
|
||||
commands, ok := list["commands"].([]interface{})
|
||||
commands, ok := list["commands"].([]any)
|
||||
if !ok {
|
||||
return errors.New("bad JSON")
|
||||
}
|
||||
for _, command := range commands {
|
||||
info, ok := command.(map[string]interface{})
|
||||
info, ok := command.(map[string]any)
|
||||
if !ok {
|
||||
return errors.New("bad JSON")
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ func makeRandomExeName(baseName, extension string) (string, error) {
|
||||
extension += ".exe"
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < maxAttempts; attempt++ {
|
||||
for range maxAttempts {
|
||||
filename := fmt.Sprintf("%s.%s.%s", baseName, random.String(4), extension)
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
return filename, nil
|
||||
|
@ -34,7 +34,7 @@ var mediaMimeTypeRegexp = regexp.MustCompile("^(video|audio|image)/")
|
||||
|
||||
// Turns the given entry and DMS host into a UPnP object. A nil object is
|
||||
// returned if the entry is not of interest.
|
||||
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, resources vfs.Nodes, host string) (ret interface{}, err error) {
|
||||
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, resources vfs.Nodes, host string) (ret any, err error) {
|
||||
obj := upnpav.Object{
|
||||
ID: cdsObject.ID(),
|
||||
Restricted: 1,
|
||||
@ -127,7 +127,7 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi
|
||||
}
|
||||
|
||||
// Returns all the upnpav objects in a directory.
|
||||
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []interface{}, err error) {
|
||||
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []any, err error) {
|
||||
node, err := cds.vfs.Stat(o.Path)
|
||||
if err != nil {
|
||||
return
|
||||
@ -295,10 +295,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
|
||||
}
|
||||
totalMatches := len(objs)
|
||||
objs = objs[func() (low int) {
|
||||
low = browse.StartingIndex
|
||||
if low > len(objs) {
|
||||
low = len(objs)
|
||||
}
|
||||
low = min(browse.StartingIndex, len(objs))
|
||||
return
|
||||
}():]
|
||||
if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user