Merge branch 'rclone:master' into namecrane-backend

This commit is contained in:
namecrane 2025-04-09 16:04:31 -07:00 committed by GitHub
commit 0b6dd1bc70
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
108 changed files with 4019 additions and 1082 deletions

View File

@ -44,7 +44,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@ -1378,7 +1378,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
containerName, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(containerName, directory, prefix string, addContainer bool) error {
return f.list(ctx, containerName, directory, prefix, addContainer, true, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@ -31,8 +31,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
@ -918,7 +918,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {

View File

@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
@ -1086,7 +1087,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil
}
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
@ -1138,7 +1139,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
err = f.recurse(ctx, dir, list)
if err != nil {
return err

View File

@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache

View File

@ -356,7 +356,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
f.features.ListR = nil // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
return f, err
}

View File

@ -46,6 +46,7 @@ func TestIntegration(t *testing.T) {
"DirCacheFlush",
"UserInfo",
"Disconnect",
"ListP",
},
}
if *fstest.RemoteName == "" {

View File

@ -18,7 +18,7 @@ type CloudinaryEncoder interface {
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string) string
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}

View File

@ -8,7 +8,9 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
@ -103,19 +105,39 @@ func init() {
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
@ -203,6 +225,18 @@ func (f *Fs) FromStandardPath(s string) string {
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
@ -212,8 +246,20 @@ func (f *Fs) ToStandardPath(s string) string {
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
@ -331,10 +377,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
}
for _, asset := range results.Assets {
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
if dir != "" {
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
}
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,

View File

@ -20,6 +20,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
@ -265,6 +266,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
@ -809,24 +813,52 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
entries := make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d)
}
return entries, nil
return callback(entries)
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
return err
}
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
return u.wrapEntries(ctx, entries)
listP := u.f.Features().ListP
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
}
// ListR lists the objects and directories of the Fs starting

View File

@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@ -208,6 +209,8 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream")
}
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@ -352,11 +355,39 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found.
// List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, dir)
if err != nil {
return nil, err
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
}
return f.processEntries(entries)
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
}
// ListR lists the objects and directories of the Fs starting

View File

@ -18,6 +18,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
)
// Globals
@ -293,6 +294,9 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@ -416,11 +420,40 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
return f.encryptEntries(ctx, entries)
listP := f.Fs.Features().ListP
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
}
// ListR lists the objects and directories of the Fs starting

View File

@ -38,8 +38,8 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@ -2189,7 +2189,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer)
out := make(chan error, f.ci.Checkers)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
overflow := []listREntry{}
listed := 0

View File

@ -25,7 +25,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
@ -734,7 +734,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
// implementation of ListR
func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err error) {
func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
@ -820,7 +820,7 @@ func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
err = f.listR(ctx, dir, list)
if err != nil {
return err

View File

@ -35,7 +35,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@ -845,7 +845,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@ -18,6 +18,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/kv"
)
@ -182,6 +183,9 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
// Enable ListP always
f.features.ListP = f.ListP
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
@ -237,10 +241,39 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.wrapEntries(entries)
if err != nil {
return err
}
return callback(entries)
}
return f.wrapEntries(entries)
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
}
// ListR lists the objects and directories recursively into out.

View File

@ -31,7 +31,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@ -1264,7 +1264,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "liststream")
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {

View File

@ -17,7 +17,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/bucket"
)
@ -383,7 +383,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
entries := fs.DirEntries{}
listR := func(bucket, directory, prefix string, addBucket bool) error {
err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {

View File

@ -28,7 +28,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@ -516,7 +516,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound
}
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
for resumeStart := u.Path; resumeStart != ""; {
var files []File
files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path)

View File

@ -30,6 +30,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
@ -1396,7 +1397,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// So we have to filter things outside of the root which is
// inefficient.
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
// list a folder conventionally - used for shared folders
var listFolder func(dir string) error

View File

@ -18,8 +18,8 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
@ -649,7 +649,7 @@ of listing recursively that doing a directory traversal.
*/
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucketName, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@ -27,7 +27,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@ -631,7 +631,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
return list.Add(o)
})

View File

@ -22,7 +22,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
@ -704,7 +704,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)

View File

@ -48,8 +48,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@ -4481,7 +4481,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *types.Ob
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
// List the objects and directories
err = f.list(ctx, listOpt{
bucket: bucket,
@ -4497,16 +4497,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
entries = append(entries, entry)
return callback(entry)
}
return nil
})
if err != nil {
return nil, err
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
return nil
}
// listBuckets lists the buckets to out
@ -4539,14 +4539,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return list.Flush()
}
// ListR lists the objects and directories of the Fs starting
@ -4567,7 +4599,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, listOpt{
bucket: bucket,
@ -5061,7 +5093,7 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
Usage Examples:
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
@ -6843,6 +6875,7 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}

View File

@ -25,8 +25,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@ -846,7 +846,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
list := list.NewHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return list.Add(entry)

View File

@ -1020,6 +1020,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
// Disable ListP always
features.ListP = nil
// show that we wrap other backends
features.Overlay = true

View File

@ -12,7 +12,7 @@ import (
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"}
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter", "ListP"}
unimplementableObjectMethods = []string{}
)

View File

@ -52,6 +52,15 @@ import (
_ "github.com/rclone/rclone/cmd/rmdirs"
_ "github.com/rclone/rclone/cmd/selfupdate"
_ "github.com/rclone/rclone/cmd/serve"
_ "github.com/rclone/rclone/cmd/serve/dlna"
_ "github.com/rclone/rclone/cmd/serve/docker"
_ "github.com/rclone/rclone/cmd/serve/ftp"
_ "github.com/rclone/rclone/cmd/serve/http"
_ "github.com/rclone/rclone/cmd/serve/nfs"
_ "github.com/rclone/rclone/cmd/serve/restic"
_ "github.com/rclone/rclone/cmd/serve/s3"
_ "github.com/rclone/rclone/cmd/serve/sftp"
_ "github.com/rclone/rclone/cmd/serve/webdav"
_ "github.com/rclone/rclone/cmd/settier"
_ "github.com/rclone/rclone/cmd/sha1sum"
_ "github.com/rclone/rclone/cmd/size"

View File

@ -23,19 +23,23 @@ func init() {
}
var commandDefinition = &cobra.Command{
Use: "authorize",
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
Short: `Remote authorization.`,
Long: `Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.
The command requires 1-3 arguments:
- fs name (e.g., "drive", "s3", etc.)
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
Use --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
Annotations: map[string]string{
"versionIntroduced": "v1.27",
// "groups": "",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 3, command, args)

View File

@ -0,0 +1,32 @@
package authorize
import (
"bytes"
"strings"
"testing"
"github.com/spf13/cobra"
)
func TestAuthorizeCommand(t *testing.T) {
// Test that the Use string is correctly formatted
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
}
// Test that help output contains the argument information
buf := &bytes.Buffer{}
cmd := &cobra.Command{}
cmd.AddCommand(commandDefinition)
cmd.SetOut(buf)
cmd.SetArgs([]string{"authorize", "--help"})
err := cmd.Execute()
if err != nil {
t.Fatalf("Failed to execute help command: %v", err)
}
helpOutput := buf.String()
if !strings.Contains(helpOutput, "authorize <fs name>") {
t.Errorf("Help output doesn't contain correct usage information")
}
}

131
cmd/gitannex/configparse.go Normal file
View File

@ -0,0 +1,131 @@
package gitannex
import (
"fmt"
"slices"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fspath"
)
type configID int
const (
configRemoteName configID = iota
configPrefix
configLayout
)
// configDefinition describes a configuration value required by this command. We
// use "GETCONFIG" messages to query git-annex for these values at runtime.
type configDefinition struct {
id configID
names []string
description string
defaultValue string
}
const (
defaultRclonePrefix = "git-annex-rclone"
defaultRcloneLayout = "nodir"
)
var requiredConfigs = []configDefinition{
{
id: configRemoteName,
names: []string{"rcloneremotename", "target"},
description: "Name of the rclone remote to use. " +
"Must match a remote known to rclone. " +
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
},
{
id: configPrefix,
names: []string{"rcloneprefix", "prefix"},
description: "Directory where rclone will write git-annex content. " +
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
"This directory will be created on init if it does not exist.",
defaultValue: defaultRclonePrefix,
},
{
id: configLayout,
names: []string{"rclonelayout", "rclone_layout"},
description: "Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
defaultValue: defaultRcloneLayout,
},
}
func (c *configDefinition) getCanonicalName() string {
if len(c.names) < 1 {
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
}
return c.names[0]
}
// fullDescription returns a single-line, human-readable description for this
// config. The returned string begins with a list of synonyms and ends with
// `c.description`.
func (c *configDefinition) fullDescription() string {
if len(c.names) <= 1 {
return c.description
}
// Exclude the canonical name from the list of synonyms.
synonyms := c.names[1:len(c.names)]
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
}
// validateRemoteName validates the "rcloneremotename" config that we receive
// from git-annex. It returns nil iff `value` is valid. Otherwise, it returns a
// descriptive error suitable for sending back to git-annex via stdout.
//
// The value is only valid when:
// 1. It is the exact name of an existing remote.
// 2. It is an fspath string that names an existing remote or a backend. The
// string may include options, but it must not include a path. (That's what
// the "rcloneprefix" config is for.)
//
// While backends are not remote names, per se, they are permitted for
// compatibility with [fstest]. We could guard this behavior behind
// [testing.Testing] to prevent users from specifying backend strings, but
// there's no obvious harm in permitting it.
func validateRemoteName(value string) error {
remoteNames := config.GetRemoteNames()
// Check whether `value` is an exact match for an existing remote.
//
// If we checked whether [cache.Get] returns [fs.ErrorNotFoundInConfigFile],
// we would incorrectly identify file names as valid remote names. We also
// avoid [config.FileSections] because it will miss remotes that are defined
// by environment variables.
if slices.Contains(remoteNames, value) {
return nil
}
parsed, err := fspath.Parse(value)
if err != nil {
return fmt.Errorf("remote could not be parsed: %s", value)
}
if parsed.Path != "" {
return fmt.Errorf("remote does not exist or incorrectly contains a path: %s", value)
}
// Now that we've established `value` is an fspath string that does not
// include a path component, we only need to check whether it names an
// existing remote or backend.
if slices.Contains(remoteNames, parsed.Name) {
return nil
}
maybeBackend := strings.HasPrefix(value, ":")
if !maybeBackend {
return fmt.Errorf("remote does not exist: %s", value)
}
// Strip the leading colon before searching for the backend. For instance,
// search for "local" instead of ":local". Note that `parsed.Name` already
// omits any config options baked into the string.
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
if _, err = fs.Find(trimmedBackendName); err != nil {
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
}
return nil
}

View File

@ -28,14 +28,11 @@ import (
"io"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@ -110,35 +107,6 @@ func (m *messageParser) finalParameter() string {
return param
}
// configDefinition describes a configuration value required by this command. We
// use "GETCONFIG" messages to query git-annex for these values at runtime.
type configDefinition struct {
names []string
description string
destination *string
defaultValue *string
}
func (c *configDefinition) getCanonicalName() string {
if len(c.names) < 1 {
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
}
return c.names[0]
}
// fullDescription returns a single-line, human-readable description for this
// config. The returned string begins with a list of synonyms and ends with
// `c.description`.
func (c *configDefinition) fullDescription() string {
if len(c.names) <= 1 {
return c.description
}
// Exclude the canonical name from the list of synonyms.
synonyms := c.names[1:len(c.names)]
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
}
// server contains this command's current state.
type server struct {
reader *bufio.Reader
@ -274,81 +242,31 @@ func (s *server) handleInitRemote() error {
return fmt.Errorf("failed to get configs: %w", err)
}
// Explicitly check whether [server.configRcloneRemoteName] names a remote.
//
// - We do not permit file paths in the remote name; that's what
// [s.configPrefix] is for. If we simply checked whether [cache.Get]
// returns [fs.ErrorNotFoundInConfigFile], we would incorrectly identify
// file names as valid remote names.
//
// - In order to support remotes defined by environment variables, we must
// use [config.GetRemoteNames] instead of [config.FileSections].
trimmedName := strings.TrimSuffix(s.configRcloneRemoteName, ":")
if slices.Contains(config.GetRemoteNames(), trimmedName) {
s.sendMsg("INITREMOTE-SUCCESS")
return nil
if err := validateRemoteName(s.configRcloneRemoteName); err != nil {
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
return fmt.Errorf("failed to init remote: %w", err)
}
// Otherwise, check whether [server.configRcloneRemoteName] is actually a
// backend string such as ":local:". These are not remote names, per se, but
// they are permitted for compatibility with [fstest]. We could guard this
// behavior behind [testing.Testing] to prevent users from specifying
// backend strings, but there's no obvious harm in permitting it.
maybeBackend := strings.HasPrefix(s.configRcloneRemoteName, ":")
if !maybeBackend {
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
return fmt.Errorf("remote does not exist: %s", s.configRcloneRemoteName)
}
parsed, err := fspath.Parse(s.configRcloneRemoteName)
if err != nil {
s.sendMsg("INITREMOTE-FAILURE remote could not be parsed as a backend: " + s.configRcloneRemoteName)
return fmt.Errorf("remote could not be parsed as a backend: %s", s.configRcloneRemoteName)
}
if parsed.Path != "" {
s.sendMsg("INITREMOTE-FAILURE backend must not have a path: " + s.configRcloneRemoteName)
return fmt.Errorf("backend must not have a path: %s", s.configRcloneRemoteName)
}
// Strip the leading colon and options before searching for the backend,
// i.e. search for "local" instead of ":local,description=hello:/tmp/foo".
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
if _, err = fs.Find(trimmedBackendName); err != nil {
s.sendMsg("INITREMOTE-FAILURE backend does not exist: " + trimmedBackendName)
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
if mode := parseLayoutMode(s.configRcloneLayout); mode == layoutModeUnknown {
err := fmt.Errorf("unknown layout mode: %s", s.configRcloneLayout)
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
return fmt.Errorf("failed to init remote: %w", err)
}
s.sendMsg("INITREMOTE-SUCCESS")
return nil
}
// Get a list of configs with pointers to fields of `s`.
func (s *server) getRequiredConfigs() []configDefinition {
defaultRclonePrefix := "git-annex-rclone"
defaultRcloneLayout := "nodir"
return []configDefinition{
{
[]string{"rcloneremotename", "target"},
"Name of the rclone remote to use. " +
"Must match a remote known to rclone. " +
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
&s.configRcloneRemoteName,
nil,
},
{
[]string{"rcloneprefix", "prefix"},
"Directory where rclone will write git-annex content. " +
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
"This directory will be created on init if it does not exist.",
&s.configPrefix,
&defaultRclonePrefix,
},
{
[]string{"rclonelayout", "rclone_layout"},
"Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
&s.configRcloneLayout,
&defaultRcloneLayout,
},
func (s *server) mustSetConfigValue(id configID, value string) {
switch id {
case configRemoteName:
s.configRcloneRemoteName = value
case configPrefix:
s.configPrefix = value
case configLayout:
s.configRcloneLayout = value
default:
panic(fmt.Errorf("unhandled configId: %v", id))
}
}
@ -360,8 +278,8 @@ func (s *server) queryConfigs() error {
// Send a "GETCONFIG" message for each required config and parse git-annex's
// "VALUE" response.
for _, config := range s.getRequiredConfigs() {
var valueReceived bool
queryNextConfig:
for _, config := range requiredConfigs {
// Try each of the config's names in sequence, starting with the
// canonical name.
for _, configName := range config.names {
@ -377,19 +295,15 @@ func (s *server) queryConfigs() error {
return fmt.Errorf("failed to parse config value: %s %s", valueKeyword, message.line)
}
value := message.finalParameter()
if value != "" {
*config.destination = value
valueReceived = true
break
if value := message.finalParameter(); value != "" {
s.mustSetConfigValue(config.id, value)
continue queryNextConfig
}
}
if !valueReceived {
if config.defaultValue == nil {
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
}
*config.destination = *config.defaultValue
if config.defaultValue == "" {
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
}
s.mustSetConfigValue(config.id, config.defaultValue)
}
s.configsDone = true
@ -408,7 +322,7 @@ func (s *server) handlePrepare() error {
// Git-annex is asking us to return the list of settings that we use. Keep this
// in sync with `handlePrepare()`.
func (s *server) handleListConfigs() {
for _, config := range s.getRequiredConfigs() {
for _, config := range requiredConfigs {
s.sendMsg(fmt.Sprintf("CONFIG %s %s", config.getCanonicalName(), config.fullDescription()))
}
s.sendMsg("CONFIGEND")

View File

@ -190,14 +190,10 @@ func TestMessageParser(t *testing.T) {
}
func TestConfigDefinitionOneName(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo"},
description: "The foo config is utterly useless.",
destination: &parsed,
defaultValue: &defaultValue,
defaultValue: "abc",
}
assert.Equal(t, "foo",
@ -209,14 +205,10 @@ func TestConfigDefinitionOneName(t *testing.T) {
}
func TestConfigDefinitionTwoNames(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo", "bar"},
description: "The foo config is utterly useless.",
destination: &parsed,
defaultValue: &defaultValue,
defaultValue: "abc",
}
assert.Equal(t, "foo",
@ -228,14 +220,10 @@ func TestConfigDefinitionTwoNames(t *testing.T) {
}
func TestConfigDefinitionThreeNames(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo", "bar", "baz"},
description: "The foo config is utterly useless.",
destination: &parsed,
defaultValue: &defaultValue,
defaultValue: "abc",
}
assert.Equal(t, "foo",
@ -503,7 +491,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
@ -513,6 +501,35 @@ var fstestTestCases = []testCase{
require.NoError(t, h.mockStdinW.Close())
},
},
{
label: "HandlesPrepareWithUnknownLayout",
testProtocolFunc: func(t *testing.T, h *testState) {
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE " + h.remoteName)
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE nonexistentLayoutMode")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
require.Equal(t, h.server.configPrefix, h.remotePrefix)
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE unknown layout mode: nonexistentLayoutMode")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "unknown layout mode: nonexistentLayoutMode",
},
{
label: "HandlesPrepareWithNonexistentRemote",
testProtocolFunc: func(t *testing.T, h *testState) {
@ -528,7 +545,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, "thisRemoteDoesNotExist")
@ -536,11 +553,11 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist: thisRemoteDoesNotExist")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist: thisRemoteDoesNotExist",
expectedError: "remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist",
},
{
label: "HandlesPrepareWithPathAsRemote",
@ -557,7 +574,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remotePrefix)
@ -567,13 +584,13 @@ var fstestTestCases = []testCase{
h.requireWriteLine("INITREMOTE")
require.Regexp(t,
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist: "),
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: "),
h.requireReadLine(),
)
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist:",
expectedError: "remote does not exist or incorrectly contains a path:",
},
{
label: "HandlesPrepareWithNonexistentBackendAsRemote",
@ -585,7 +602,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":nonexistentBackend:", h.server.configRcloneRemoteName)
@ -609,7 +626,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local:", h.server.configRcloneRemoteName)
@ -632,7 +649,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local", h.server.configRcloneRemoteName)
@ -640,11 +657,11 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed as a backend: :local")
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed: :local")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote could not be parsed as a backend:",
expectedError: "remote could not be parsed:",
},
{
label: "HandlesPrepareWithBackendContainingOptionsAsRemote",
@ -656,7 +673,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local,description=banana:", h.server.configRcloneRemoteName)
@ -679,7 +696,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local,description=banana:/bad/path", h.server.configRcloneRemoteName)
@ -687,14 +704,38 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
require.Regexp(t,
regexp.MustCompile("^INITREMOTE-FAILURE backend must not have a path: "),
h.requireReadLine(),
)
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: :local,description=banana:/bad/path")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist or incorrectly contains a path:",
},
{
label: "HandlesPrepareWithRemoteContainingOptions",
testProtocolFunc: func(t *testing.T, h *testState) {
const envVar = "RCLONE_CONFIG_fake_remote_TYPE"
require.NoError(t, os.Setenv(envVar, "memory"))
t.Cleanup(func() { require.NoError(t, os.Unsetenv(envVar)) })
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE fake_remote,banana=yes:")
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, "fake_remote,banana=yes:", h.server.configRcloneRemoteName)
require.Equal(t, "/foo", h.server.configPrefix)
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-SUCCESS")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "backend must not have a path:",
},
{
label: "HandlesPrepareWithSynonyms",
@ -715,7 +756,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE foo")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)

View File

@ -11,6 +11,8 @@ import (
"testing"
"github.com/rclone/rclone/cmd/serve/nfs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfstest"
"github.com/stretchr/testify/require"
@ -38,7 +40,7 @@ func TestMount(t *testing.T) {
nfs.Opt.HandleCacheDir = t.TempDir()
require.NoError(t, nfs.Opt.HandleCache.Set(cacheType))
// Check we can create a handler
_, err := nfs.NewHandler(context.Background(), nil, &nfs.Opt)
_, err := nfs.NewHandler(context.Background(), vfs.New(object.MemoryFs, nil), &nfs.Opt)
if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) {
t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v")
}

View File

@ -3,6 +3,7 @@ package dlna
import (
"bytes"
"context"
"encoding/xml"
"fmt"
"net"
@ -19,9 +20,12 @@ import (
"github.com/anacrolix/dms/upnp"
"github.com/anacrolix/log"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/dlna/data"
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/systemd"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
@ -29,9 +33,63 @@ import (
"github.com/spf13/cobra"
)
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "addr",
Default: ":7879",
Help: "The ip:port or :port to bind the DLNA http server to",
}, {
Name: "name",
Default: "",
Help: "Name of DLNA server",
}, {
Name: "log_trace",
Default: false,
Help: "Enable trace logging of SOAP traffic",
}, {
Name: "interface",
Default: []string{},
Help: "The interface to use for SSDP (repeat as necessary)",
}, {
Name: "announce_interval",
Default: fs.Duration(12 * time.Minute),
Help: "The interval between SSDP announcements",
}}
// Options is the type for DLNA serving options.
type Options struct {
ListenAddr string `config:"addr"`
FriendlyName string `config:"name"`
LogTrace bool `config:"log_trace"`
InterfaceNames []string `config:"interface"`
AnnounceInterval fs.Duration `config:"announce_interval"`
}
// Opt contains the options for DLNA serving.
var Opt Options
func init() {
dlnaflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
vfsflags.AddFlags(flagSet)
serve.Command.AddCommand(Command)
serve.AddRc("dlna", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt)
})
}
// Command definition for cobra.
@ -53,7 +111,19 @@ Rclone will add external subtitle files (.srt) to videos if they have the same
filename as the video file itself (except the extension), either in the same
directory as the video, or in a "Subs" subdirectory.
` + dlnaflags.Help + vfs.Help(),
### Server options
Use ` + "`--addr`" + ` to specify which IP address and port the server should
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
IPs.
Use ` + "`--name`" + ` to choose the friendly server name, which is by
default "rclone (hostname)".
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
logging of all UPNP traffic.
` + vfs.Help(),
Annotations: map[string]string{
"versionIntroduced": "v1.46",
"groups": "Filter",
@ -63,16 +133,12 @@ directory as the video, or in a "Subs" subdirectory.
f := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
s, err := newServer(f, &dlnaflags.Opt)
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt)
if err != nil {
return err
}
if err := s.Serve(); err != nil {
return err
}
defer systemd.Notify()()
s.Wait()
return nil
return s.Serve()
})
},
}
@ -108,7 +174,7 @@ type server struct {
vfs *vfs.VFS
}
func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options) (*server, error) {
friendlyName := opt.FriendlyName
if friendlyName == "" {
friendlyName = makeDefaultFriendlyName()
@ -137,7 +203,7 @@ func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
waitChan: make(chan struct{}),
httpListenAddr: opt.ListenAddr,
f: f,
vfs: vfs.New(f, &vfscommon.Opt),
vfs: vfs.New(f, vfsOpt),
}
s.services = map[string]UPnPService{
@ -168,6 +234,19 @@ func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
http.FileServer(data.Assets))))
s.handler = logging(withHeader("Server", serverField, r))
// Currently, the SSDP server only listens on an IPv4 multicast address.
// Differentiate between two INADDR_ANY addresses,
// so that 0.0.0.0 can only listen on IPv4 addresses.
network := "tcp4"
if strings.Count(s.httpListenAddr, ":") > 1 {
network = "tcp"
}
listener, err := net.Listen(network, s.httpListenAddr)
if err != nil {
return nil, err
}
s.HTTPConn = listener
return s, nil
}
@ -288,24 +367,9 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
http.ServeContent(w, r, remotePath, node.ModTime(), in)
}
// Serve runs the server - returns the error only if
// the listener was not started; does not block, so
// use s.Wait() to block on the listener indefinitely.
// Serve runs the server - returns the error only if the listener was
// not started. Blocks until the server is closed.
func (s *server) Serve() (err error) {
if s.HTTPConn == nil {
// Currently, the SSDP server only listens on an IPv4 multicast address.
// Differentiate between two INADDR_ANY addresses,
// so that 0.0.0.0 can only listen on IPv4 addresses.
network := "tcp4"
if strings.Count(s.httpListenAddr, ":") > 1 {
network = "tcp"
}
s.HTTPConn, err = net.Listen(network, s.httpListenAddr)
if err != nil {
return
}
}
go func() {
s.startSSDP()
}()
@ -319,6 +383,7 @@ func (s *server) Serve() (err error) {
}
}()
s.Wait()
return nil
}
@ -327,13 +392,19 @@ func (s *server) Wait() {
<-s.waitChan
}
func (s *server) Close() {
// Shutdown the DLNA server
func (s *server) Shutdown() error {
err := s.HTTPConn.Close()
if err != nil {
fs.Errorf(s.f, "Error closing HTTP server: %v", err)
return
}
close(s.waitChan)
if err != nil {
return fmt.Errorf("failed to shutdown DLNA server: %w", err)
}
return nil
}
// Return the first address of the server
func (s *server) Addr() net.Addr {
return s.HTTPConn.Addr()
}
// Run SSDP (multicast for server discovery) on all interfaces.

View File

@ -13,11 +13,13 @@ import (
"github.com/anacrolix/dms/soap"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -33,12 +35,14 @@ const (
)
func startServer(t *testing.T, f fs.Fs) {
opt := dlnaflags.Opt
opt := Opt
opt.ListenAddr = testBindAddress
var err error
dlnaServer, err = newServer(f, &opt)
dlnaServer, err = newServer(context.Background(), f, &opt, &vfscommon.Opt)
assert.NoError(t, err)
assert.NoError(t, dlnaServer.Serve())
go func() {
assert.NoError(t, dlnaServer.Serve())
}()
baseURL = "http://" + dlnaServer.HTTPConn.Addr().String()
}
@ -271,3 +275,10 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
}
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "dlna",
"vfs_cache_mode": "off",
})
}

View File

@ -1,69 +0,0 @@
// Package dlnaflags provides utility functionality to DLNA.
package dlnaflags
import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
// Help contains the text for the command line help and manual.
var Help = `### Server options
Use ` + "`--addr`" + ` to specify which IP address and port the server should
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
IPs.
Use ` + "`--name`" + ` to choose the friendly server name, which is by
default "rclone (hostname)".
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
logging of all UPNP traffic.
`
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "addr",
Default: ":7879",
Help: "The ip:port or :port to bind the DLNA http server to",
}, {
Name: "name",
Default: "",
Help: "Name of DLNA server",
}, {
Name: "log_trace",
Default: false,
Help: "Enable trace logging of SOAP traffic",
}, {
Name: "interface",
Default: []string{},
Help: "The interface to use for SSDP (repeat as necessary)",
}, {
Name: "announce_interval",
Default: fs.Duration(12 * time.Minute),
Help: "The interval between SSDP announcements",
}}
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
}
// Options is the type for DLNA serving options.
type Options struct {
ListenAddr string `config:"addr"`
FriendlyName string `config:"name"`
LogTrace bool `config:"log_trace"`
InterfaceNames []string `config:"interface"`
AnnounceInterval fs.Duration `config:"announce_interval"`
}
// Opt contains the options for DLNA serving.
var Opt Options
// AddFlags add the command line flags for DLNA serving.
func AddFlags(flagSet *pflag.FlagSet) {
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
}

View File

@ -12,6 +12,7 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
@ -50,6 +51,8 @@ func init() {
// Add common mount/vfs flags
mountlib.AddFlags(cmdFlags)
vfsflags.AddFlags(cmdFlags)
// Register with parent command
serve.Command.AddCommand(Command)
}
// Command definition for cobra

View File

@ -18,13 +18,16 @@ import (
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
@ -70,8 +73,8 @@ type Options struct {
ListenAddr string `config:"addr"` // Port to listen on
PublicIP string `config:"public_ip"` // Passive ports range
PassivePorts string `config:"passive_port"` // Passive ports range
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
BasicPass string `config:"pass"` // password for BasicUser
User string `config:"user"` // single username for basic auth if not using Htpasswd
Pass string `config:"pass"` // password for User
TLSCert string `config:"cert"` // TLS PEM key (concatenation of certificate and CA certificate)
TLSKey string `config:"key"` // TLS PEM Private key
}
@ -88,6 +91,29 @@ func init() {
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags())
serve.Command.AddCommand(Command)
serve.AddRc("ftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@ -121,18 +147,18 @@ You can set a single username and password with the --user and --pass flags.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt)
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if err != nil {
return err
}
return s.serve()
return s.Serve()
})
},
}
@ -157,7 +183,7 @@ func init() {
var passivePortsRe = regexp.MustCompile(`^\s*\d+\s*-\s*\d+\s*$`)
// Make a new FTP to serve the remote
func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*driver, error) {
host, port, err := net.SplitHostPort(opt.ListenAddr)
if err != nil {
return nil, fmt.Errorf("failed to parse host:port from %q", opt.ListenAddr)
@ -172,11 +198,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
ctx: ctx,
opt: *opt,
}
if proxyflags.Opt.AuthProxy != "" {
d.proxy = proxy.New(ctx, &proxyflags.Opt)
if proxy.Opt.AuthProxy != "" {
d.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
d.userPass = make(map[string]string, 16)
} else {
d.globalVFS = vfs.New(f, &vfscommon.Opt)
d.globalVFS = vfs.New(f, vfsOpt)
}
d.useTLS = d.opt.TLSKey != ""
@ -208,20 +234,58 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
return d, nil
}
// serve runs the ftp server
func (d *driver) serve() error {
// Serve runs the FTP server until it is shutdown
func (d *driver) Serve() error {
fs.Logf(d.f, "Serving FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
return d.srv.ListenAndServe()
err := d.srv.ListenAndServe()
if err == ftp.ErrServerClosed {
err = nil
}
return err
}
// close stops the ftp server
// Shutdown stops the ftp server
//
//lint:ignore U1000 unused when not building linux
func (d *driver) close() error {
func (d *driver) Shutdown() error {
fs.Logf(d.f, "Stopping FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
return d.srv.Shutdown()
}
// Return the first address of the server
func (d *driver) Addr() net.Addr {
// The FTP server doesn't let us read the listener
// so we have to synthesize the net.Addr here.
// On errors we'll return a zero item or zero parts.
addr := &net.TCPAddr{}
// Split host and port
host, port, err := net.SplitHostPort(d.opt.ListenAddr)
if err != nil {
fs.Errorf(nil, "ftp: addr: invalid address format: %v", err)
return addr
}
// Parse port
addr.Port, err = strconv.Atoi(port)
if err != nil {
fs.Errorf(nil, "ftp: addr: invalid port number: %v", err)
}
// Resolve the host to an IP address.
ipAddrs, err := net.LookupIP(host)
if err != nil {
fs.Errorf(nil, "ftp: addr: failed to resolve host: %v", err)
} else if len(ipAddrs) == 0 {
fs.Errorf(nil, "ftp: addr: no IP addresses found for host: %s", host)
} else {
// Choose the first IP address.
addr.IP = ipAddrs[0]
}
return addr
}
// Logger ftp logger output formatted message
type Logger struct{}
@ -269,7 +333,7 @@ func (d *driver) CheckPasswd(sctx *ftp.Context, user, pass string) (ok bool, err
d.userPass[user] = oPass
d.userPassMu.Unlock()
} else {
ok = d.opt.BasicUser == user && (d.opt.BasicPass == "" || d.opt.BasicPass == pass)
ok = d.opt.User == user && (d.opt.Pass == "" || d.opt.Pass == pass)
if !ok {
fs.Infof(nil, "login failed: bad credentials")
return false, nil

View File

@ -12,12 +12,15 @@ import (
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/israce"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
ftp "goftp.io/server/v2"
)
const (
@ -36,19 +39,16 @@ func TestFTP(t *testing.T) {
opt := Opt
opt.ListenAddr = testHOST + ":" + testPORT
opt.PassivePorts = testPASSIVEPORTRANGE
opt.BasicUser = testUSER
opt.BasicPass = testPASS
opt.User = testUSER
opt.Pass = testPASS
w, err := newServer(context.Background(), f, &opt)
w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
assert.NoError(t, err)
quit := make(chan struct{})
go func() {
err := w.serve()
assert.NoError(t, w.Serve())
close(quit)
if err != ftp.ErrServerClosed {
assert.NoError(t, err)
}
}()
// Config for the backend we'll use to connect to the server
@ -61,7 +61,7 @@ func TestFTP(t *testing.T) {
}
return config, func() {
err := w.close()
err := w.Shutdown()
assert.NoError(t, err)
<-quit
}
@ -69,3 +69,13 @@ func TestFTP(t *testing.T) {
servetest.Run(t, "ftp", start)
}
func TestRc(t *testing.T) {
if israce.Enabled {
t.Skip("Skipping under race detector as underlying library is racy")
}
servetest.TestRc(t, rc.Params{
"type": "ftp",
"vfs_cache_mode": "off",
})
}

View File

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path"
@ -15,10 +16,14 @@ import (
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
cmdserve "github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/systemd"
@ -28,6 +33,12 @@ import (
"github.com/spf13/cobra"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo).
Add(libhttp.TemplateConfigInfo)
// Options required for http server
type Options struct {
Auth libhttp.AuthConfig
@ -45,17 +56,42 @@ var DefaultOpt = Options{
// Opt is options set by command line flags
var Opt = DefaultOpt
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "http", Opt: &Opt, Options: OptionsInfo})
}
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() {
flagSet := Command.Flags()
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
libhttp.AddTemplateFlagsPrefix(flagSet, flagPrefix, &Opt.Template)
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
cmdserve.Command.AddCommand(Command)
cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@ -81,7 +117,7 @@ control the stats printing.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
@ -89,14 +125,12 @@ control the stats printing.
}
cmd.Run(false, true, command, func() error {
s, err := run(context.Background(), f, Opt)
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
}
defer systemd.Notify()()
s.server.Wait()
return nil
return s.Serve()
})
},
}
@ -136,19 +170,19 @@ func (s *HTTP) auth(user, pass string) (value any, err error) {
return VFS, err
}
func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *HTTP, err error) {
s = &HTTP{
f: f,
ctx: ctx,
opt: opt,
opt: *opt,
}
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(ctx, &proxyflags.Opt)
if proxyOpt.AuthProxy != "" {
s.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
// override auth
s.opt.Auth.CustomAuthFn = s.auth
} else {
s._vfs = vfs.New(f, &vfscommon.Opt)
s._vfs = vfs.New(f, vfsOpt)
}
s.server, err = libhttp.NewServer(ctx,
@ -168,11 +202,26 @@ func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
router.Get("/*", s.handler)
router.Head("/*", s.handler)
s.server.Serve()
return s, nil
}
// Serve HTTP until the server is shutdown
func (s *HTTP) Serve() error {
s.server.Serve()
s.server.Wait()
return nil
}
// Addr returns the first address of the server
func (s *HTTP) Addr() net.Addr {
return s.server.Addr()
}
// Shutdown the server
func (s *HTTP) Shutdown() error {
return s.server.Shutdown()
}
// handler reads incoming requests and dispatches them
func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
isDir := strings.HasSuffix(r.URL.Path, "/")

View File

@ -12,10 +12,13 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -39,13 +42,16 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
},
}
opts.HTTP.ListenAddr = []string{testBindAddress}
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
opts.Auth.BasicUser = testUser
opts.Auth.BasicPass = testPass
}
s, err := run(ctx, f, opts)
s, err := newServer(ctx, f, &opts, &vfscommon.Opt, &proxy.Opt)
require.NoError(t, err, "failed to start server")
go func() {
require.NoError(t, s.Serve())
}()
urls := s.server.URLs()
require.Len(t, urls, 1, "expected one URL")
@ -110,9 +116,9 @@ func testGET(t *testing.T, useProxy bool) {
cmd := "go run " + prog + " " + files
// FIXME this is untidy setting a global variable!
proxyflags.Opt.AuthProxy = cmd
proxy.Opt.AuthProxy = cmd
defer func() {
proxyflags.Opt.AuthProxy = ""
proxy.Opt.AuthProxy = ""
}()
f = nil
@ -267,3 +273,10 @@ func TestGET(t *testing.T) {
func TestAuthProxy(t *testing.T) {
testGET(t, true)
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "http",
"vfs_cache_mode": "off",
})
}

View File

@ -3,6 +3,7 @@
package nfs
import (
"bytes"
"crypto/md5"
"encoding/hex"
"errors"
@ -30,6 +31,15 @@ var (
ErrorSymlinkCacheNoPermission = errors.New("symlink cache must be run as root or with CAP_DAC_READ_SEARCH")
)
// Metadata files have the file handle of their source file with this
// suffixed so we can look them up directly from the file handle.
//
// Note that this is 4 bytes - using a non multiple of 4 will cause
// the Linux NFS client not to be able to read any files.
//
// The value is big endian 0x00000001
var metadataSuffix = []byte{0x00, 0x00, 0x00, 0x01}
// Cache controls the file handle cache implementation
type Cache interface {
// ToHandle takes a file and represents it with an opaque handle to reference it.
@ -77,7 +87,9 @@ type diskHandler struct {
write func(fh []byte, cachePath string, fullPath string) ([]byte, error)
read func(fh []byte, cachePath string) ([]byte, error)
remove func(fh []byte, cachePath string) error
handleType int32 //nolint:unused // used by the symlink cache
suffix func(fh []byte) []byte // returns nil for no suffix or the suffix
handleType int32 //nolint:unused // used by the symlink cache
metadata string // extension for metadata
}
// Create a new disk handler
@ -102,6 +114,8 @@ func newDiskHandler(h *Handler) (dh *diskHandler, err error) {
write: dh.diskCacheWrite,
read: dh.diskCacheRead,
remove: dh.diskCacheRemove,
suffix: dh.diskCacheSuffix,
metadata: h.vfs.Opt.MetadataExtension,
}
fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir)
return dh, nil
@ -124,6 +138,17 @@ func (dh *diskHandler) handleToPath(fh []byte) (cachePath string) {
return cachePath
}
// Return true if name represents a metadata file
//
// It returns the underlying path
func (dh *diskHandler) isMetadataFile(name string) (rawName string, found bool) {
if dh.metadata == "" {
return name, false
}
rawName, found = strings.CutSuffix(name, dh.metadata)
return rawName, found
}
// ToHandle takes a file and represents it with an opaque handle to reference it.
// In stateless nfs (when it's serving a unix fs) this can be the device + inode
// but we can generalize with a stateful local cache of handed out IDs.
@ -131,6 +156,8 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
dh.mu.Lock()
defer dh.mu.Unlock()
fullPath := path.Join(splitPath...)
// metadata file has file handle of original file
fullPath, isMetadataFile := dh.isMetadataFile(fullPath)
fh = hashPath(fullPath)
cachePath := dh.handleToPath(fh)
cacheDir := filepath.Dir(cachePath)
@ -144,6 +171,10 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
fs.Errorf("nfs", "Couldn't create cache file handle: %v", err)
return fh
}
// metadata file handle is suffixed with metadataSuffix
if isMetadataFile {
fh = append(fh, metadataSuffix...)
}
return fh
}
@ -152,18 +183,43 @@ func (dh *diskHandler) diskCacheWrite(fh []byte, cachePath string, fullPath stri
return fh, os.WriteFile(cachePath, []byte(fullPath), 0600)
}
var errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
var (
errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
)
// Test to see if a fh is a metadata handle and if so return the underlying handle
func (dh *diskHandler) isMetadataHandle(fh []byte) (isMetadata bool, newFh []byte, err error) {
if dh.metadata == "" {
return false, fh, nil
}
suffix := dh.suffix(fh)
if len(suffix) == 0 {
// OK
return false, fh, nil
} else if bytes.Equal(suffix, metadataSuffix) {
return true, fh[:len(fh)-len(suffix)], nil
}
fs.Errorf("nfs", "Bad file handle suffix %X", suffix)
return false, nil, errStaleHandle
}
// FromHandle converts from an opaque handle to the file it represents
func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []string, err error) {
dh.mu.RLock()
defer dh.mu.RUnlock()
isMetadata, fh, err := dh.isMetadataHandle(fh)
if err != nil {
return nil, nil, err
}
cachePath := dh.handleToPath(fh)
fullPathBytes, err := dh.read(fh, cachePath)
if err != nil {
fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err)
return nil, nil, errStaleHandle
}
if isMetadata {
fullPathBytes = append(fullPathBytes, []byte(dh.metadata)...)
}
splitPath = strings.Split(string(fullPathBytes), "/")
return dh.billyFS, splitPath, nil
}
@ -177,8 +233,16 @@ func (dh *diskHandler) diskCacheRead(fh []byte, cachePath string) ([]byte, error
func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error {
dh.mu.Lock()
defer dh.mu.Unlock()
isMetadata, fh, err := dh.isMetadataHandle(fh)
if err != nil {
return err
}
if isMetadata {
// Can't invalidate a metadata handle as it is synthetic
return nil
}
cachePath := dh.handleToPath(fh)
err := dh.remove(fh, cachePath)
err = dh.remove(fh, cachePath)
if err != nil {
fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err)
}
@ -190,6 +254,14 @@ func (dh *diskHandler) diskCacheRemove(fh []byte, cachePath string) error {
return os.Remove(cachePath)
}
// Return a suffix for the file handle or nil
func (dh *diskHandler) diskCacheSuffix(fh []byte) []byte {
if len(fh) <= md5.Size {
return nil
}
return fh[md5.Size:]
}
// HandleLimit exports how many file handles can be safely stored by this cache.
func (dh *diskHandler) HandleLimit() int {
return math.MaxInt

View File

@ -5,10 +5,13 @@ package nfs
import (
"context"
"fmt"
"strings"
"sync"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/vfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -18,6 +21,8 @@ const testSymlinkCache = "go test -c && sudo setcap cap_dac_read_search+ep ./nfs
// Check basic CRUD operations
func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
isMetadata := strings.HasSuffix(fileName, ".metadata")
// Check reading a non existent handle returns an error
_, _, err := c.FromHandle([]byte{10})
assert.Error(t, err)
@ -26,6 +31,11 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
splitPath := []string{"dir", fileName}
fh := c.ToHandle(h.billyFS, splitPath)
assert.True(t, len(fh) > 0)
if isMetadata {
assert.Equal(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):])
} else {
assert.NotEqual(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):])
}
// Read the handle back
newFs, newSplitPath, err := c.FromHandle(fh)
@ -43,8 +53,13 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
// Check the handle is gone and returning stale handle error
_, _, err = c.FromHandle(fh)
require.Error(t, err)
assert.Equal(t, errStaleHandle, err)
if !isMetadata {
require.Error(t, err)
assert.Equal(t, errStaleHandle, err)
} else {
// Can't invalidate metadata handles
require.NoError(t, err)
}
}
// Thrash the cache operations in parallel on different files
@ -113,8 +128,10 @@ func TestCache(t *testing.T) {
cacheType := cacheType
t.Run(cacheType.String(), func(t *testing.T) {
h := &Handler{
vfs: vfs.New(object.MemoryFs, nil),
billyFS: billyFS,
}
h.vfs.Opt.MetadataExtension = ".metadata"
h.opt.HandleLimit = 1000
h.opt.HandleCache = cacheType
h.opt.HandleCacheDir = t.TempDir()
@ -151,6 +168,10 @@ func TestCache(t *testing.T) {
t.Run("ThrashSame", func(t *testing.T) {
testCacheThrashSame(t, h, c)
})
// Metadata file handles only supported on non memory
t.Run("CRUDMetadata", func(t *testing.T) {
testCacheCRUD(t, h, c, "file.metadata")
})
}
})
}

View File

@ -14,8 +14,11 @@ import (
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
@ -83,6 +86,24 @@ func AddFlags(flagSet *pflag.FlagSet) {
func init() {
vfsflags.AddFlags(Command.Flags())
AddFlags(Command.Flags())
serve.Command.AddCommand(Command)
serve.AddRc("nfs", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Create VFS
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
VFS := vfs.New(f, &vfsOpt)
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return NewServer(ctx, VFS, &opt)
})
}
// Run the command
@ -169,6 +190,12 @@ Where |$PORT| is the same port number used in the |serve nfs| command
and |$HOSTNAME| is the network address of the machine that |serve nfs|
was run on.
If |--vfs-metadata-extension| is in use then for the |--nfs-cache-type disk|
and |--nfs-cache-type cache| the metadata files will have the file
handle of their parent file suffixed with |0x00, 0x00, 0x00, 0x01|.
This means they can be looked up directly from the parent file handle
is desired.
This command is only available on Unix platforms.
`, "|", "`") + vfs.Help(),

19
cmd/serve/nfs/nfs_test.go Normal file
View File

@ -0,0 +1,19 @@
//go:build unix
// The serving is tested in cmd/nfsmount - here we test anything else
package nfs
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs/rc"
)
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "nfs",
"vfs_cache_mode": "off",
})
}

View File

@ -27,6 +27,7 @@ package nfs
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"os"
@ -81,10 +82,36 @@ func (dh *diskHandler) makeSymlinkCache() error {
dh.read = dh.symlinkCacheRead
dh.write = dh.symlinkCacheWrite
dh.remove = dh.symlinkCacheRemove
dh.suffix = dh.symlinkCacheSuffix
return nil
}
// Prefixes a []byte with its length as a 4-byte big-endian integer.
func addLengthPrefix(data []byte) []byte {
length := uint32(len(data))
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, length)
if err != nil {
// This should never fail
panic(err)
}
buf.Write(data)
return buf.Bytes()
}
// Removes the 4-byte big-endian length prefix from a []byte.
func removeLengthPrefix(data []byte) ([]byte, error) {
if len(data) < 4 {
return nil, errors.New("file handle too short")
}
length := binary.BigEndian.Uint32(data[:4])
if int(length) != len(data)-4 {
return nil, errors.New("file handle invalid length")
}
return data[4 : 4+length], nil
}
// Write the fullPath into cachePath returning the possibly updated fh
//
// This writes the fullPath into the file with the cachePath given and
@ -115,7 +142,8 @@ func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath s
dh.handleType = handle.Type()
}
return handle.Bytes(), nil
// Adjust the raw handle so it has a length prefix
return addLengthPrefix(handle.Bytes()), nil
}
// Read the contents of (fh, cachePath)
@ -128,6 +156,12 @@ func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath s
func (dh *diskHandler) symlinkCacheRead(fh []byte, cachePath string) (fullPath []byte, err error) {
//defer log.Trace(nil, "fh=%x, cachePath=%q", fh, cachePath)("fullPath=%q, err=%v", &fullPath, &err)
// First check and remove the file handle prefix length
fh, err = removeLengthPrefix(fh)
if err != nil {
return nil, fmt.Errorf("symlink cache open by handle at: %w", err)
}
// Find the file with the handle passed in
handle := unix.NewFileHandle(dh.handleType, fh)
fd, err := unix.OpenByHandleAt(unix.AT_FDCWD, handle, unix.O_RDONLY|unix.O_PATH|unix.O_NOFOLLOW) // needs O_PATH for symlinks
@ -175,3 +209,15 @@ func (dh *diskHandler) symlinkCacheRemove(fh []byte, cachePath string) error {
return os.Remove(cachePath)
}
// Return a suffix for the file handle or nil
func (dh *diskHandler) symlinkCacheSuffix(fh []byte) []byte {
if len(fh) < 4 {
return nil
}
length := int(binary.BigEndian.Uint32(fh[:4])) + 4
if len(fh) <= length {
return nil
}
return fh[length:]
}

View File

@ -106,14 +106,23 @@ backend that rclone supports.
`, "|", "`")
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "auth_proxy",
Default: "",
Help: "A program to use to create the backend from the auth",
}}
// Options is options for creating the proxy
type Options struct {
AuthProxy string
AuthProxy string `config:"auth_proxy"`
}
// DefaultOpt is the default values uses for Opt
var DefaultOpt = Options{
AuthProxy: "",
// Opt is the default options
var Opt Options
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "proxy", Opt: &Opt, Options: OptionsInfo})
}
// Proxy represents a proxy to turn auth requests into a VFS
@ -122,6 +131,7 @@ type Proxy struct {
vfsCache *libcache.Cache
ctx context.Context // for global config
Opt Options
vfsOpt vfscommon.Options
}
// cacheEntry is what is stored in the vfsCache
@ -131,12 +141,15 @@ type cacheEntry struct {
}
// New creates a new proxy with the Options passed in
func New(ctx context.Context, opt *Options) *Proxy {
//
// Any VFS are created with the vfsOpt passed in.
func New(ctx context.Context, opt *Options, vfsOpt *vfscommon.Options) *Proxy {
return &Proxy{
ctx: ctx,
Opt: *opt,
cmdLine: strings.Fields(opt.AuthProxy),
vfsCache: libcache.New(),
vfsOpt: *vfsOpt,
}
}
@ -242,7 +255,7 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value any, err error)
// need to in memory. An attacker would find it easier to go
// after the unencrypted password in memory most likely.
entry := cacheEntry{
vfs: vfs.New(f, &vfscommon.Opt),
vfs: vfs.New(f, &p.vfsOpt),
pwHash: sha256.Sum256([]byte(auth)),
}
return entry, true, nil

View File

@ -13,16 +13,17 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/ssh"
)
func TestRun(t *testing.T) {
opt := DefaultOpt
opt := Opt
cmd := "go run proxy_code.go"
opt.AuthProxy = cmd
p := New(context.Background(), &opt)
p := New(context.Background(), &opt, &vfscommon.Opt)
t.Run("Normal", func(t *testing.T) {
config, err := p.run(map[string]string{

View File

@ -7,12 +7,7 @@ import (
"github.com/spf13/pflag"
)
// Options set by command line flags
var (
Opt = proxy.DefaultOpt
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth", "")
flags.AddFlagsFromOptions(flagSet, "", proxy.OptionsInfo)
}

355
cmd/serve/rc.go Normal file
View File

@ -0,0 +1,355 @@
package serve
import (
"cmp"
"context"
"errors"
"fmt"
"math/rand/v2"
"net"
"slices"
"sort"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/errcount"
)
// Handle describes what a server can do
type Handle interface {
// Addr returns the listening address of the server
Addr() net.Addr
// Shutdown stops the server
Shutdown() error
// Serve starts the server - doesn't return until Shutdown is called.
Serve() (err error)
}
// Describes a running server
type server struct {
ID string `json:"id"` // id of the server
Addr string `json:"addr"` // address of the server
Params rc.Params `json:"params"` // Parameters used to start the server
h Handle `json:"-"` // control the server
errChan chan error `json:"-"` // receive errors from the server process
}
// Fn starts an rclone serve command
type Fn func(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error)
// Globals
var (
// mutex to protect all the variables in this block
serveMu sync.Mutex
// Serve functions available
serveFns = map[string]Fn{}
// Running servers
servers = map[string]*server{}
)
// AddRc adds the named serve function to the rc
func AddRc(name string, serveFunction Fn) {
serveMu.Lock()
defer serveMu.Unlock()
serveFns[name] = serveFunction
}
// unquote `
func q(s string) string {
return strings.ReplaceAll(s, "|", "`")
}
func init() {
rc.Add(rc.Call{
Path: "serve/start",
AuthRequired: true,
Fn: startRc,
Title: "Create a new server",
Help: q(`Create a new server with the specified parameters.
This takes the following parameters:
- |type| - type of server: |http|, |webdav|, |ftp|, |sftp|, |nfs|, etc.
- |fs| - remote storage path to serve
- |addr| - the ip:port to run the server on, eg ":1234" or "localhost:1234"
Other parameters are as described in the documentation for the
relevant [rclone serve](/commands/rclone_serve/) command line options.
To translate a command line option to an rc parameter, remove the
leading |--| and replace |-| with |_|, so |--vfs-cache-mode| becomes
|vfs_cache_mode|. Note that global parameters must be set with
|_config| and |_filter| as described above.
Examples:
rclone rc serve/start type=nfs fs=remote: addr=:4321 vfs_cache_mode=full
rclone rc serve/start --json '{"type":"nfs","fs":"remote:","addr":":1234","vfs_cache_mode":"full"}'
This will give the reply
|||json
{
"addr": "[::]:4321", // Address the server was started on
"id": "nfs-ecfc6852" // Unique identifier for the server instance
}
|||
Or an error if it failed to start.
Stop the server with |serve/stop| and list the running servers with |serve/list|.
`),
})
}
// startRc allows the serve command to be run from rc
func startRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
serveType, err := in.GetString("type")
serveMu.Lock()
defer serveMu.Unlock()
serveFn := serveFns[serveType]
if serveFn == nil {
return nil, fmt.Errorf("could not find serve type=%q", serveType)
}
// Get Fs.fs to be served from fs parameter in the params
f, err := rc.GetFs(ctx, in)
if err != nil {
return nil, err
}
// Make a background context and copy the config back.
newCtx := context.Background()
newCtx = fs.CopyConfig(newCtx, ctx)
newCtx = filter.CopyConfig(newCtx, ctx)
// Start the server
h, err := serveFn(newCtx, f, in)
if err != nil {
return nil, fmt.Errorf("could not start serve %q: %w", serveType, err)
}
// Start the server running in the background
errChan := make(chan error, 1)
go func() {
errChan <- h.Serve()
close(errChan)
}()
// Wait for a short length of time to see if an error occurred
select {
case err = <-errChan:
if err == nil {
err = errors.New("server stopped immediately")
}
case <-time.After(100 * time.Millisecond):
err = nil
}
if err != nil {
return nil, fmt.Errorf("error when starting serve %q: %w", serveType, err)
}
// Store it for later
runningServer := server{
ID: fmt.Sprintf("%s-%08x", serveType, rand.Uint32()),
Params: in,
Addr: h.Addr().String(),
h: h,
errChan: errChan,
}
servers[runningServer.ID] = &runningServer
out = rc.Params{
"id": runningServer.ID,
"addr": runningServer.Addr,
}
fs.Debugf(f, "Started serve %s on %s", serveType, runningServer.Addr)
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "serve/stop",
AuthRequired: true,
Fn: stopRc,
Title: "Unserve selected active serve",
Help: q(`Stops a running |serve| instance by ID.
This takes the following parameters:
- id: as returned by serve/start
This will give an empty response if successful or an error if not.
Example:
rclone rc serve/stop id=12345
`),
})
}
// stopRc stops the server process
func stopRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
id, err := in.GetString("id")
if err != nil {
return nil, err
}
serveMu.Lock()
defer serveMu.Unlock()
s := servers[id]
if s == nil {
return nil, fmt.Errorf("server with id=%q not found", id)
}
err = s.h.Shutdown()
<-s.errChan // ignore server return error - likely is "use of closed network connection"
delete(servers, id)
return nil, err
}
func init() {
rc.Add(rc.Call{
Path: "serve/types",
AuthRequired: true,
Fn: serveTypesRc,
Title: "Show all possible serve types",
Help: q(`This shows all possible serve types and returns them as a list.
This takes no parameters and returns
- types: list of serve types, eg "nfs", "sftp", etc
The serve types are strings like "serve", "serve2", "cserve" and can
be passed to serve/start as the serveType parameter.
Eg
rclone rc serve/types
Returns
|||json
{
"types": [
"http",
"sftp",
"nfs"
]
}
|||
`),
})
}
// serveTypesRc returns a list of available serve types.
func serveTypesRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
var serveTypes = []string{}
serveMu.Lock()
defer serveMu.Unlock()
for serveType := range serveFns {
serveTypes = append(serveTypes, serveType)
}
sort.Strings(serveTypes)
return rc.Params{
"types": serveTypes,
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "serve/list",
AuthRequired: true,
Fn: listRc,
Title: "Show running servers",
Help: q(`Show running servers with IDs.
This takes no parameters and returns
- list: list of running serve commands
Each list element will have
- id: ID of the server
- addr: address the server is running on
- params: parameters used to start the server
Eg
rclone rc serve/list
Returns
|||json
{
"list": [
{
"addr": "[::]:4321",
"id": "nfs-ffc2a4e5",
"params": {
"fs": "remote:",
"opt": {
"ListenAddr": ":4321"
},
"type": "nfs",
"vfsOpt": {
"CacheMode": "full"
}
}
}
]
}
|||
`),
})
}
// listRc returns a list of current serves sorted by serve path
func listRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
serveMu.Lock()
defer serveMu.Unlock()
list := []*server{}
for _, item := range servers {
list = append(list, item)
}
slices.SortFunc(list, func(a, b *server) int {
return cmp.Compare(a.ID, b.ID)
})
return rc.Params{
"list": list,
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "serve/stopall",
AuthRequired: true,
Fn: stopAll,
Title: "Stop all active servers",
Help: q(`Stop all active servers.
This will stop all active servers.
rclone rc serve/stopall
`),
})
}
// stopAll shuts all the servers down
func stopAll(_ context.Context, in rc.Params) (out rc.Params, err error) {
serveMu.Lock()
defer serveMu.Unlock()
ec := errcount.New()
for id, s := range servers {
ec.Add(s.h.Shutdown())
<-s.errChan // ignore server return error - likely is "use of closed network connection"
delete(servers, id)
}
return nil, ec.Err("error when stopping server")
}

180
cmd/serve/rc_test.go Normal file
View File

@ -0,0 +1,180 @@
package serve
import (
"context"
"errors"
"net"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type dummyServer struct {
addr *net.TCPAddr
shutdownCh chan struct{}
shutdownCalled bool
}
func (d *dummyServer) Addr() net.Addr {
return d.addr
}
func (d *dummyServer) Shutdown() error {
d.shutdownCalled = true
close(d.shutdownCh)
return nil
}
func (d *dummyServer) Serve() error {
<-d.shutdownCh
return nil
}
func newServer(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) {
return &dummyServer{
addr: &net.TCPAddr{
IP: net.IPv4(127, 0, 0, 1),
Port: 8080,
},
shutdownCh: make(chan struct{}),
}, nil
}
func newServerError(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) {
return nil, errors.New("serve error")
}
func newServerImmediateStop(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) {
h, _ := newServer(ctx, f, in)
close(h.(*dummyServer).shutdownCh)
return h, nil
}
func resetGlobals() {
serveMu.Lock()
defer serveMu.Unlock()
serveFns = make(map[string]Fn)
servers = make(map[string]*server)
}
func newTest(t *testing.T) {
_, err := fs.Find("mockfs")
if err != nil {
mockfs.Register()
}
resetGlobals()
t.Cleanup(resetGlobals)
}
func TestRcStartServeType(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
in := rc.Params{"fs": ":mockfs:", "type": "nonexistent"}
_, err := serveStart.Fn(context.Background(), in)
assert.ErrorContains(t, err, "could not find serve type")
}
func TestRcStartServeFnError(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
AddRc("error", newServerError)
in := rc.Params{"fs": ":mockfs:", "type": "error"}
_, err := serveStart.Fn(context.Background(), in)
assert.ErrorContains(t, err, "could not start serve")
}
func TestRcStartImmediateStop(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
AddRc("immediate", newServerImmediateStop)
in := rc.Params{"fs": ":mockfs:", "type": "immediate"}
_, err := serveStart.Fn(context.Background(), in)
assert.ErrorContains(t, err, "server stopped immediately")
}
func TestRcStartAndStop(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
serveStop := rc.Calls.Get("serve/stop")
AddRc("dummy", newServer)
in := rc.Params{"fs": ":mockfs:", "type": "dummy"}
out, err := serveStart.Fn(context.Background(), in)
require.NoError(t, err)
id := out["id"].(string)
assert.Contains(t, id, "dummy")
assert.Equal(t, 1, len(servers))
_, err = serveStop.Fn(context.Background(), rc.Params{"id": id})
require.NoError(t, err)
assert.Equal(t, 0, len(servers))
}
func TestRcStopNonexistent(t *testing.T) {
newTest(t)
serveStop := rc.Calls.Get("serve/stop")
_, err := serveStop.Fn(context.Background(), rc.Params{"id": "nonexistent"})
assert.ErrorContains(t, err, "not found")
}
func TestRcServeTypes(t *testing.T) {
newTest(t)
serveTypes := rc.Calls.Get("serve/types")
AddRc("a", newServer)
AddRc("c", newServer)
AddRc("b", newServer)
out, err := serveTypes.Fn(context.Background(), nil)
require.NoError(t, err)
types := out["types"].([]string)
assert.Equal(t, types, []string{"a", "b", "c"})
}
func TestRcList(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
serveList := rc.Calls.Get("serve/list")
AddRc("dummy", newServer)
// Start two servers.
_, err := serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
_, err = serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
// Check list
out, err := serveList.Fn(context.Background(), nil)
require.NoError(t, err)
list := out["list"].([]*server)
assert.Equal(t, 2, len(list))
}
func TestRcStopAll(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
serveStopAll := rc.Calls.Get("serve/stopall")
AddRc("dummy", newServer)
_, err := serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
_, err = serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
assert.Equal(t, 2, len(servers))
_, err = serveStopAll.Fn(context.Background(), nil)
require.NoError(t, err)
assert.Equal(t, 0, len(servers))
}

View File

@ -6,6 +6,7 @@ import (
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"os"
"path"
@ -16,10 +17,13 @@ import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
cmdserve "github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
@ -29,37 +33,63 @@ import (
"golang.org/x/net/http2"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "stdio",
Default: false,
Help: "Run an HTTP2 server on stdin/stdout",
}, {
Name: "append_only",
Default: false,
Help: "Disallow deletion of repository data",
}, {
Name: "private_repos",
Default: false,
Help: "Users can only access their private repo",
}, {
Name: "cache_objects",
Default: true,
Help: "Cache listed objects",
}}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo)
// Options required for http server
type Options struct {
Auth libhttp.AuthConfig
HTTP libhttp.Config
Stdio bool
AppendOnly bool
PrivateRepos bool
CacheObjects bool
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
Auth: libhttp.DefaultAuthCfg(),
HTTP: libhttp.DefaultCfg(),
Stdio bool `config:"stdio"`
AppendOnly bool `config:"append_only"`
PrivateRepos bool `config:"private_repos"`
CacheObjects bool `config:"cache_objects"`
}
// Opt is options set by command line flags
var Opt = DefaultOpt
var Opt Options
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "restic", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout", "")
flags.BoolVarP(flagSet, &Opt.AppendOnly, "append-only", "", false, "Disallow deletion of repository data", "")
flags.BoolVarP(flagSet, &Opt.PrivateRepos, "private-repos", "", false, "Users can only access their private repo", "")
flags.BoolVarP(flagSet, &Opt.CacheObjects, "cache-objects", "", true, "Cache listed objects", "")
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
cmdserve.Command.AddCommand(Command)
cmdserve.AddRc("restic", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
// Read opts
var opt = Opt // set default opts
err := configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
if opt.Stdio {
return nil, errors.New("can't use --stdio via the rc")
}
// Create server
return newServer(ctx, f, &opt)
})
}
// Command definition for cobra
@ -173,17 +203,15 @@ with a path of ` + "`/<username>/`" + `.
httpSrv := &http2.Server{}
opts := &http2.ServeConnOpts{
Handler: s.Server.Router(),
Handler: s.server.Router(),
}
httpSrv.ServeConn(conn, opts)
return nil
}
fs.Logf(s.f, "Serving restic REST API on %s", s.URLs())
fs.Logf(s.f, "Serving restic REST API on %s", s.server.URLs())
defer systemd.Notify()()
s.Wait()
return nil
return s.Serve()
})
},
}
@ -239,10 +267,10 @@ func checkPrivate(next http.Handler) http.Handler {
// server contains everything to run the server
type server struct {
*libhttp.Server
f fs.Fs
cache *cache
opt Options
server *libhttp.Server
f fs.Fs
cache *cache
opt Options
}
func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *server, err error) {
@ -255,19 +283,35 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *server, err error
if opt.Stdio {
opt.HTTP.ListenAddr = nil
}
s.Server, err = libhttp.NewServer(ctx,
s.server, err = libhttp.NewServer(ctx,
libhttp.WithConfig(opt.HTTP),
libhttp.WithAuth(opt.Auth),
)
if err != nil {
return nil, fmt.Errorf("failed to init server: %w", err)
}
router := s.Router()
router := s.server.Router()
s.Bind(router)
s.Server.Serve()
return s, nil
}
// Serve restic until the server is shutdown
func (s *server) Serve() error {
s.server.Serve()
s.server.Wait()
return nil
}
// Return the first address of the server
func (s *server) Addr() net.Addr {
return s.server.Addr()
}
// Shutdown the server
func (s *server) Shutdown() error {
return s.server.Shutdown()
}
// bind helper for main Bind method
func (s *server) bind(router chi.Router) {
router.MethodFunc("GET", "/*", func(w http.ResponseWriter, r *http.Request) {

View File

@ -119,7 +119,7 @@ func TestResticHandler(t *testing.T) {
f := cmd.NewFsSrc([]string{tempdir})
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.Server.Router()
router := s.server.Router()
// create the repo
checkRequest(t, router.ServeHTTP,

View File

@ -41,7 +41,7 @@ func TestResticPrivateRepositories(t *testing.T) {
f := cmd.NewFsSrc([]string{tempdir})
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.Server.Router()
router := s.server.Router()
// Requesting /test/ should allow access
reqs := []*http.Request{

View File

@ -14,7 +14,9 @@ import (
_ "github.com/rclone/rclone/backend/all"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -26,7 +28,7 @@ const (
)
func newOpt() Options {
opt := DefaultOpt
opt := Opt
opt.HTTP.ListenAddr = []string{testBindAddress}
return opt
}
@ -56,7 +58,10 @@ func TestResticIntegration(t *testing.T) {
// Start the server
s, err := newServer(ctx, fremote, &opt)
require.NoError(t, err)
testURL := s.Server.URLs()[0]
go func() {
require.NoError(t, s.Serve())
}()
testURL := s.server.URLs()[0]
defer func() {
_ = s.Shutdown()
}()
@ -136,7 +141,7 @@ func TestListErrors(t *testing.T) {
f := &listErrorFs{Fs: cmd.NewFsSrc([]string{tempdir})}
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.Server.Router()
router := s.server.Router()
req := newRequest(t, "GET", "/test/snapshots/", nil)
checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusInternalServerError)})
@ -161,7 +166,7 @@ func TestServeErrors(t *testing.T) {
f := &newObjectErrorFs{Fs: cmd.NewFsSrc([]string{tempdir})}
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.Server.Router()
router := s.server.Router()
f.err = errors.New("oops")
req := newRequest(t, "GET", "/test/config", nil)
@ -170,3 +175,9 @@ func TestServeErrors(t *testing.T) {
f.err = fs.ErrorObjectNotFound
checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusNotFound)})
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "restic",
})
}

View File

@ -25,15 +25,13 @@ var (
// s3Backend implements the gofacess3.Backend interface to make an S3
// backend for gofakes3
type s3Backend struct {
opt *Options
s *Server
meta *sync.Map
}
// newBackend creates a new SimpleBucketBackend.
func newBackend(s *Server, opt *Options) gofakes3.Backend {
func newBackend(s *Server) gofakes3.Backend {
return &s3Backend{
opt: opt,
s: s,
meta: new(sync.Map),
}
@ -136,7 +134,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
fobj := entry.(fs.Object)
size := node.Size()
hash := getFileHashByte(fobj)
hash := getFileHashByte(fobj, b.s.etagHashType)
meta := map[string]string{
"Last-Modified": formatHeaderTime(node.ModTime()),
@ -187,7 +185,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
file := node.(*vfs.File)
size := node.Size()
hash := getFileHashByte(fobj)
hash := getFileHashByte(fobj, b.s.etagHashType)
in, err := file.Open(os.O_RDONLY)
if err != nil {

View File

@ -39,7 +39,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
item := &gofakes3.Content{
Key: objectPath,
LastModified: gofakes3.NewContentTime(entry.ModTime()),
ETag: getFileHash(entry),
ETag: getFileHash(entry, b.s.etagHashType),
Size: entry.Size(),
StorageClass: gofakes3.StorageStandard,
}

View File

@ -6,41 +6,86 @@ import (
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
)
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
pathBucketMode: true,
hashName: "MD5",
hashType: hash.MD5,
noCleanup: false,
Auth: httplib.DefaultAuthCfg(),
HTTP: httplib.DefaultCfg(),
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "force_path_style",
Default: true,
Help: "If true use path style access if false use virtual hosted style",
}, {
Name: "etag_hash",
Default: "MD5",
Help: "Which hash to use for the ETag, or auto or blank for off",
}, {
Name: "auth_key",
Default: []string{},
Help: "Set key pair for v4 authorization: access_key_id,secret_access_key",
}, {
Name: "no_cleanup",
Default: false,
Help: "Not to cleanup empty folder after object is deleted",
}}.
Add(httplib.ConfigInfo).
Add(httplib.AuthConfigInfo)
// Options contains options for the s3 Server
type Options struct {
//TODO add more options
ForcePathStyle bool `config:"force_path_style"`
EtagHash string `config:"etag_hash"`
AuthKey []string `config:"auth_key"`
NoCleanup bool `config:"no_cleanup"`
Auth httplib.AuthConfig
HTTP httplib.Config
}
// Opt is options set by command line flags
var Opt = DefaultOpt
var Opt Options
const flagPrefix = ""
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "s3", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
httplib.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
httplib.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
flags.BoolVarP(flagSet, &Opt.pathBucketMode, "force-path-style", "", Opt.pathBucketMode, "If true use path style access if false use virtual hosted style (default true)", "")
flags.StringVarP(flagSet, &Opt.hashName, "etag-hash", "", Opt.hashName, "Which hash to use for the ETag, or auto or blank for off", "")
flags.StringArrayVarP(flagSet, &Opt.authPair, "auth-key", "", Opt.authPair, "Set key pair for v4 authorization: access_key_id,secret_access_key", "")
flags.BoolVarP(flagSet, &Opt.noCleanup, "no-cleanup", "", Opt.noCleanup, "Not to cleanup empty folder after object is deleted", "")
serve.Command.AddCommand(Command)
serve.AddRc("s3", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
//go:embed serve_s3.md
@ -63,34 +108,19 @@ var Command = &cobra.Command{
Long: help() + httplib.AuthHelp(flagPrefix) + httplib.Help(flagPrefix) + vfs.Help(),
RunE: func(command *cobra.Command, args []string) error {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
if Opt.hashName == "auto" {
Opt.hashType = f.Hashes().GetOne()
} else if Opt.hashName != "" {
err := Opt.hashType.Set(Opt.hashName)
if err != nil {
return err
}
}
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt)
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if err != nil {
return err
}
router := s.server.Router()
s.Bind(router)
err = s.Serve()
if err != nil {
return err
}
s.server.Wait()
return nil
return s.Serve()
})
return nil
},

View File

@ -18,15 +18,16 @@ import (
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -36,23 +37,16 @@ const (
)
// Configure and serve the server
func serveS3(f fs.Fs) (testURL string, keyid string, keysec string, w *Server) {
func serveS3(t *testing.T, f fs.Fs) (testURL string, keyid string, keysec string, w *Server) {
keyid = random.String(16)
keysec = random.String(16)
serveropt := &Options{
HTTP: httplib.DefaultCfg(),
pathBucketMode: true,
hashName: "",
hashType: hash.None,
authPair: []string{fmt.Sprintf("%s,%s", keyid, keysec)},
}
serveropt.HTTP.ListenAddr = []string{endpoint}
w, _ = newServer(context.Background(), f, serveropt)
router := w.server.Router()
w.Bind(router)
_ = w.Serve()
opt := Opt // copy default options
opt.AuthKey = []string{fmt.Sprintf("%s,%s", keyid, keysec)}
opt.HTTP.ListenAddr = []string{endpoint}
w, _ = newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
go func() {
require.NoError(t, w.Serve())
}()
testURL = w.server.URLs()[0]
return
@ -62,7 +56,7 @@ func serveS3(f fs.Fs) (testURL string, keyid string, keysec string, w *Server) {
// s3 remote against it.
func TestS3(t *testing.T) {
start := func(f fs.Fs) (configmap.Simple, func()) {
testURL, keyid, keysec, _ := serveS3(f)
testURL, keyid, keysec, _ := serveS3(t, f)
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "s3",
@ -125,7 +119,7 @@ func TestEncodingWithMinioClient(t *testing.T) {
_, err = f.Put(context.Background(), in, obji)
assert.NoError(t, err)
endpoint, keyid, keysec, _ := serveS3(f)
endpoint, keyid, keysec, _ := serveS3(t, f)
testURL, _ := url.Parse(endpoint)
minioClient, err := minio.New(testURL.Host, &minio.Options{
Creds: credentials.NewStaticV4(keyid, keysec, ""),
@ -173,9 +167,9 @@ func testListBuckets(t *testing.T, cases []TestCase, useProxy bool) {
cmd := "go run " + prog + " " + files
// FIXME: this is untidy setting a global variable!
proxyflags.Opt.AuthProxy = cmd
proxy.Opt.AuthProxy = cmd
defer func() {
proxyflags.Opt.AuthProxy = ""
proxy.Opt.AuthProxy = ""
}()
f = nil
@ -188,7 +182,7 @@ func testListBuckets(t *testing.T, cases []TestCase, useProxy bool) {
for _, tt := range cases {
t.Run(tt.description, func(t *testing.T) {
endpoint, keyid, keysec, s := serveS3(f)
endpoint, keyid, keysec, s := serveS3(t, f)
defer func() {
assert.NoError(t, s.server.Shutdown())
}()
@ -296,3 +290,10 @@ func TestListBucketsAuthProxy(t *testing.T) {
testListBuckets(t, cases, true)
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "s3",
"vfs_cache_mode": "off",
})
}

View File

@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"math/rand"
"net"
"net/http"
"strings"
@ -15,7 +16,6 @@ import (
"github.com/rclone/gofakes3"
"github.com/rclone/gofakes3/signature"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
httplib "github.com/rclone/rclone/lib/http"
@ -29,67 +29,71 @@ const (
ctxKeyID ctxKey = iota
)
// Options contains options for the http Server
type Options struct {
//TODO add more options
pathBucketMode bool
hashName string
hashType hash.Type
authPair []string
noCleanup bool
Auth httplib.AuthConfig
HTTP httplib.Config
}
// Server is a s3.FileSystem interface
type Server struct {
server *httplib.Server
f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS
faker *gofakes3.GoFakeS3
handler http.Handler
proxy *proxy.Proxy
ctx context.Context // for global config
s3Secret string
server *httplib.Server
opt Options
f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS
faker *gofakes3.GoFakeS3
handler http.Handler
proxy *proxy.Proxy
ctx context.Context // for global config
s3Secret string
etagHashType hash.Type
}
// Make a new S3 Server to serve the remote
func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *Server, err error) {
w := &Server{
f: f,
ctx: ctx,
f: f,
ctx: ctx,
opt: *opt,
etagHashType: hash.None,
}
if len(opt.authPair) == 0 {
if w.opt.EtagHash == "auto" {
w.etagHashType = f.Hashes().GetOne()
} else if w.opt.EtagHash != "" {
err := w.etagHashType.Set(w.opt.EtagHash)
if err != nil {
return nil, err
}
}
if w.etagHashType != hash.None {
fs.Debugf(f, "Using hash %v for ETag", w.etagHashType)
}
if len(opt.AuthKey) == 0 {
fs.Logf("serve s3", "No auth provided so allowing anonymous access")
} else {
w.s3Secret = getAuthSecret(opt.authPair)
w.s3Secret = getAuthSecret(opt.AuthKey)
}
var newLogger logger
w.faker = gofakes3.New(
newBackend(w, opt),
gofakes3.WithHostBucket(!opt.pathBucketMode),
newBackend(w),
gofakes3.WithHostBucket(!opt.ForcePathStyle),
gofakes3.WithLogger(newLogger),
gofakes3.WithRequestID(rand.Uint64()),
gofakes3.WithoutVersioning(),
gofakes3.WithV4Auth(authlistResolver(opt.authPair)),
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
)
w.handler = http.NewServeMux()
w.handler = w.faker.Server()
if proxyflags.Opt.AuthProxy != "" {
w.proxy = proxy.New(ctx, &proxyflags.Opt)
if proxy.Opt.AuthProxy != "" {
w.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
// proxy auth middleware
w.handler = proxyAuthMiddleware(w.handler, w)
w.handler = authPairMiddleware(w.handler, w)
} else {
w._vfs = vfs.New(f, &vfscommon.Opt)
w._vfs = vfs.New(f, vfsOpt)
if len(opt.authPair) > 0 {
w.faker.AddAuthKeys(authlistResolver(opt.authPair))
if len(opt.AuthKey) > 0 {
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
}
}
@ -101,6 +105,9 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error
return nil, fmt.Errorf("failed to init server: %w", err)
}
router := w.server.Router()
w.Bind(router)
return w, nil
}
@ -135,13 +142,24 @@ func (w *Server) Bind(router chi.Router) {
router.Handle("/*", w.handler)
}
// Serve serves the s3 server
// Serve serves the s3 server until the server is shutdown
func (w *Server) Serve() error {
w.server.Serve()
fs.Logf(w.f, "Starting s3 server on %s", w.server.URLs())
w.server.Wait()
return nil
}
// Addr returns the first address of the server
func (w *Server) Addr() net.Addr {
return w.server.Addr()
}
// Shutdown the server
func (w *Server) Shutdown() error {
return w.server.Shutdown()
}
func authPairMiddleware(next http.Handler, ws *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
accessKey, _ := parseAccessKeyID(r)

View File

@ -36,15 +36,15 @@ func getDirEntries(prefix string, VFS *vfs.VFS) (vfs.Nodes, error) {
return dirEntries, nil
}
func getFileHashByte(node any) []byte {
b, err := hex.DecodeString(getFileHash(node))
func getFileHashByte(node any, hashType hash.Type) []byte {
b, err := hex.DecodeString(getFileHash(node, hashType))
if err != nil {
return nil
}
return b
}
func getFileHash(node any) string {
func getFileHash(node any, hashType hash.Type) string {
var o fs.Object
switch b := node.(type) {
@ -59,7 +59,7 @@ func getFileHash(node any) string {
defer func() {
_ = in.Close()
}()
h, err := hash.NewMultiHasherTypes(hash.NewHashSet(Opt.hashType))
h, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return ""
}
@ -67,14 +67,14 @@ func getFileHash(node any) string {
if err != nil {
return ""
}
return h.Sums()[Opt.hashType]
return h.Sums()[hashType]
}
o = fsObj
case fs.Object:
o = b
}
hash, err := o.Hash(context.Background(), Opt.hashType)
hash, err := o.Hash(context.Background(), hashType)
if err != nil {
return ""
}

View File

@ -5,44 +5,10 @@ import (
"errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/dlna"
"github.com/rclone/rclone/cmd/serve/docker"
"github.com/rclone/rclone/cmd/serve/ftp"
"github.com/rclone/rclone/cmd/serve/http"
"github.com/rclone/rclone/cmd/serve/nfs"
"github.com/rclone/rclone/cmd/serve/restic"
"github.com/rclone/rclone/cmd/serve/s3"
"github.com/rclone/rclone/cmd/serve/sftp"
"github.com/rclone/rclone/cmd/serve/webdav"
"github.com/spf13/cobra"
)
func init() {
Command.AddCommand(http.Command)
if webdav.Command != nil {
Command.AddCommand(webdav.Command)
}
if restic.Command != nil {
Command.AddCommand(restic.Command)
}
if dlna.Command != nil {
Command.AddCommand(dlna.Command)
}
if ftp.Command != nil {
Command.AddCommand(ftp.Command)
}
if sftp.Command != nil {
Command.AddCommand(sftp.Command)
}
if docker.Command != nil {
Command.AddCommand(docker.Command)
}
if nfs.Command != nil {
Command.AddCommand(nfs.Command)
}
if s3.Command != nil {
Command.AddCommand(s3.Command)
}
cmd.Root.AddCommand(Command)
}

77
cmd/serve/servetest/rc.go Normal file
View File

@ -0,0 +1,77 @@
package servetest
import (
"context"
"fmt"
"net"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// GetEphemeralPort opens a listening port on localhost:0, closes it,
// and returns the address as "localhost:port".
func GetEphemeralPort(t *testing.T) string {
listener, err := net.Listen("tcp", "localhost:0") // Listen on any available port
require.NoError(t, err)
defer func() {
require.NoError(t, listener.Close())
}()
return listener.Addr().String()
}
// checkTCP attempts to establish a TCP connection to the given address,
// and closes it if successful. Returns an error if the connection fails.
func checkTCP(address string) error {
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
if err != nil {
return fmt.Errorf("failed to connect to %s: %w", address, err)
}
err = conn.Close()
if err != nil {
return fmt.Errorf("failed to close connection to %s: %w", address, err)
}
return nil
}
// TestRc tests the rc interface for the servers
//
// in should contain any options necessary however this code will add
// "fs", "addr".
func TestRc(t *testing.T, in rc.Params) {
ctx := context.Background()
dir := t.TempDir()
serveStart := rc.Calls.Get("serve/start")
serveStop := rc.Calls.Get("serve/stop")
name := in["type"].(string)
addr := GetEphemeralPort(t)
// Start the server
in["fs"] = dir
in["addr"] = addr
out, err := serveStart.Fn(ctx, in)
require.NoError(t, err)
id := out["id"].(string)
assert.True(t, strings.HasPrefix(id, name+"-"))
gotAddr := out["addr"].(string)
assert.Equal(t, addr, gotAddr)
// Check we can make a TCP connection to the server
t.Logf("Checking connection on %q", addr)
err = checkTCP(addr)
assert.NoError(t, err)
// Stop the server
_, err = serveStop.Fn(ctx, rc.Params{"id": id})
require.NoError(t, err)
// Check we can make no longer make connections to the server
err = checkTCP(addr)
assert.Error(t, err)
}

View File

@ -13,7 +13,7 @@ import (
"strings"
"testing"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
@ -50,9 +50,9 @@ func run(t *testing.T, name string, start StartFn, useProxy bool) {
cmd := "go run " + prog + " " + fremote.Root()
// FIXME this is untidy setting a global variable!
proxyflags.Opt.AuthProxy = cmd
proxy.Opt.AuthProxy = cmd
defer func() {
proxyflags.Opt.AuthProxy = ""
proxy.Opt.AuthProxy = ""
}()
}
config, cleanup := start(f)

View File

@ -16,13 +16,13 @@ import (
"encoding/pem"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/env"
@ -41,23 +41,27 @@ type server struct {
ctx context.Context // for global config
config *ssh.ServerConfig
listener net.Listener
waitChan chan struct{} // for waiting on the listener to close
stopped chan struct{} // for waiting on the listener to stop
proxy *proxy.Proxy
}
func newServer(ctx context.Context, f fs.Fs, opt *Options) *server {
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*server, error) {
s := &server{
f: f,
ctx: ctx,
opt: *opt,
waitChan: make(chan struct{}),
f: f,
ctx: ctx,
opt: *opt,
stopped: make(chan struct{}),
}
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(ctx, &proxyflags.Opt)
if proxy.Opt.AuthProxy != "" {
s.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
} else {
s.vfs = vfs.New(f, &vfscommon.Opt)
s.vfs = vfs.New(f, vfsOpt)
}
return s
err := s.configure()
if err != nil {
return nil, fmt.Errorf("sftp configuration failed: %w", err)
}
return s, nil
}
// getVFS gets the vfs from s or the proxy
@ -129,17 +133,19 @@ func (s *server) acceptConnections() {
}
}
// configure the server
//
// Based on example server code from golang.org/x/crypto/ssh and server_standalone
func (s *server) serve() (err error) {
func (s *server) configure() (err error) {
var authorizedKeysMap map[string]struct{}
// ensure the user isn't trying to use conflicting flags
if proxyflags.Opt.AuthProxy != "" && s.opt.AuthorizedKeys != "" && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
if proxy.Opt.AuthProxy != "" && s.opt.AuthorizedKeys != "" && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
return errors.New("--auth-proxy and --authorized-keys cannot be used at the same time")
}
// Load the authorized keys
if s.opt.AuthorizedKeys != "" && proxyflags.Opt.AuthProxy == "" {
if s.opt.AuthorizedKeys != "" && proxy.Opt.AuthProxy == "" {
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
// If user set the flag away from the default then report an error
@ -293,42 +299,35 @@ func (s *server) serve() (err error) {
}
}
s.listener = listener
return nil
}
// Serve SFTP until the server is Shutdown
func (s *server) Serve() (err error) {
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
go s.acceptConnections()
s.acceptConnections()
close(s.stopped)
return nil
}
// Addr returns the address the server is listening on
func (s *server) Addr() string {
return s.listener.Addr().String()
}
// Serve runs the sftp server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.serve()
if err != nil {
return err
}
return nil
func (s *server) Addr() net.Addr {
return s.listener.Addr()
}
// Wait blocks while the listener is open.
func (s *server) Wait() {
<-s.waitChan
<-s.stopped
}
// Close shuts the running server down
func (s *server) Close() {
// Shutdown shuts the running server down
func (s *server) Shutdown() error {
err := s.listener.Close()
if err != nil {
fs.Errorf(nil, "Error on closing SFTP server: %v", err)
return
if errors.Is(err, io.ErrUnexpectedEOF) {
err = nil
}
close(s.waitChan)
s.Wait()
return err
}
func loadPrivateKey(keyPath string) (ssh.Signer, error) {

View File

@ -5,14 +5,19 @@ package sftp
import (
"context"
"fmt"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/systemd"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@ -76,6 +81,29 @@ func init() {
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags(), &Opt)
serve.Command.AddCommand(Command)
serve.AddRc("sftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@ -152,7 +180,7 @@ provided by OpenSSH in this case.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
@ -162,14 +190,12 @@ provided by OpenSSH in this case.
if Opt.Stdio {
return serveStdio(f)
}
s := newServer(context.Background(), f, &Opt)
err := s.Serve()
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if err != nil {
return err
fs.Fatal(nil, fmt.Sprint(err))
}
defer systemd.Notify()()
s.Wait()
return nil
return s.Serve()
})
},
}

View File

@ -14,10 +14,14 @@ import (
"github.com/pkg/sftp"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -45,11 +49,14 @@ func TestSftp(t *testing.T) {
opt.User = testUser
opt.Pass = testPass
w := newServer(context.Background(), f, &opt)
require.NoError(t, w.serve())
w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
require.NoError(t, err)
go func() {
require.NoError(t, w.Serve())
}()
// Read the host and port we started on
addr := w.Addr()
addr := w.Addr().String()
colon := strings.LastIndex(addr, ":")
// Config for the backend we'll use to connect to the server
@ -63,10 +70,18 @@ func TestSftp(t *testing.T) {
// return a stop function
return config, func() {
w.Close()
w.Wait()
assert.NoError(t, w.Shutdown())
}
}
servetest.Run(t, "sftp", start)
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "sftp",
"user": "test",
"pass": obscure.MustObscure("test"),
"vfs_cache_mode": "off",
})
}

View File

@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"mime"
"net"
"net/http"
"os"
"path"
@ -17,11 +18,14 @@ import (
chi "github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
cmdserve "github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/systemd"
@ -32,41 +36,65 @@ import (
"golang.org/x/net/webdav"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "etag_hash",
Default: "",
Help: "Which hash to use for the ETag, or auto or blank for off",
}, {
Name: "disable_dir_list",
Default: false,
Help: "Disable HTML directory list on GET request for a directory",
}}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo).
Add(libhttp.TemplateConfigInfo)
// Options required for http server
type Options struct {
Auth libhttp.AuthConfig
HTTP libhttp.Config
Template libhttp.TemplateConfig
HashName string
HashType hash.Type
DisableGETDir bool
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
Auth: libhttp.DefaultAuthCfg(),
HTTP: libhttp.DefaultCfg(),
Template: libhttp.DefaultTemplateCfg(),
HashType: hash.None,
DisableGETDir: false,
Auth libhttp.AuthConfig
HTTP libhttp.Config
Template libhttp.TemplateConfig
EtagHash string `config:"etag_hash"`
DisableDirList bool `config:"disable_dir_list"`
}
// Opt is options set by command line flags
var Opt = DefaultOpt
var Opt Options
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "webdav", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
flags.StringVarP(flagSet, &Opt.HashName, "etag-hash", "", "", "Which hash to use for the ETag, or auto or blank for off", "")
flags.BoolVarP(flagSet, &Opt.DisableGETDir, "disable-dir-list", "", false, "Disable HTML directory list on GET request for a directory", "")
cmdserve.Command.AddCommand(Command)
cmdserve.AddRc("webdav", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newWebDAV(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@ -135,36 +163,19 @@ done by the permissions on the socket.
},
RunE: func(command *cobra.Command, args []string) error {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
if proxy.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
Opt.HashType = hash.None
if Opt.HashName == "auto" {
Opt.HashType = f.Hashes().GetOne()
} else if Opt.HashName != "" {
err := Opt.HashType.Set(Opt.HashName)
if err != nil {
return err
}
}
if Opt.HashType != hash.None {
fs.Debugf(f, "Using hash %v for ETag", Opt.HashType)
}
cmd.Run(false, false, command, func() error {
s, err := newWebDAV(context.Background(), f, &Opt)
if err != nil {
return err
}
err = s.serve()
s, err := newWebDAV(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if err != nil {
return err
}
defer systemd.Notify()()
s.Wait()
return nil
return s.Serve()
})
return nil
},
@ -183,34 +194,47 @@ done by the permissions on the socket.
// might apply". In particular, whether or not renaming a file or directory
// overwriting another existing file or directory is an error is OS-dependent.
type WebDAV struct {
*libhttp.Server
server *libhttp.Server
opt Options
f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS
webdavhandler *webdav.Handler
proxy *proxy.Proxy
ctx context.Context // for global config
etagHashType hash.Type
}
// check interface
var _ webdav.FileSystem = (*WebDAV)(nil)
// Make a new WebDAV to serve the remote
func newWebDAV(ctx context.Context, f fs.Fs, opt *Options) (w *WebDAV, err error) {
func newWebDAV(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (w *WebDAV, err error) {
w = &WebDAV{
f: f,
ctx: ctx,
opt: *opt,
f: f,
ctx: ctx,
opt: *opt,
etagHashType: hash.None,
}
if proxyflags.Opt.AuthProxy != "" {
w.proxy = proxy.New(ctx, &proxyflags.Opt)
if opt.EtagHash == "auto" {
w.etagHashType = f.Hashes().GetOne()
} else if opt.EtagHash != "" {
err := w.etagHashType.Set(opt.EtagHash)
if err != nil {
return nil, err
}
}
if w.etagHashType != hash.None {
fs.Debugf(f, "Using hash %v for ETag", w.etagHashType)
}
if proxyOpt.AuthProxy != "" {
w.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
// override auth
w.opt.Auth.CustomAuthFn = w.auth
} else {
w._vfs = vfs.New(f, &vfscommon.Opt)
w._vfs = vfs.New(f, vfsOpt)
}
w.Server, err = libhttp.NewServer(ctx,
w.server, err = libhttp.NewServer(ctx,
libhttp.WithConfig(w.opt.HTTP),
libhttp.WithAuth(w.opt.Auth),
libhttp.WithTemplate(w.opt.Template),
@ -230,7 +254,7 @@ func newWebDAV(ctx context.Context, f fs.Fs, opt *Options) (w *WebDAV, err error
}
w.webdavhandler = webdavHandler
router := w.Server.Router()
router := w.server.Router()
router.Use(
middleware.SetHeader("Accept-Ranges", "bytes"),
middleware.SetHeader("Server", "rclone/"+fs.Version),
@ -331,7 +355,7 @@ func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
urlPath := r.URL.Path
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
if !w.opt.DisableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
if !w.opt.DisableDirList && (r.Method == "GET" || r.Method == "HEAD") && isDir {
w.serveDir(rw, r, remote)
return
}
@ -378,7 +402,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
}
// Make the entries for display
directory := serve.NewDirectory(dirRemote, w.Server.HTMLTemplate())
directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate())
for _, node := range dirEntries {
if vfscommon.Opt.NoModTime {
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
@ -394,15 +418,26 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
directory.Serve(rw, r)
}
// serve runs the http server in the background.
// Serve HTTP until the server is shutdown
//
// Use s.Close() and s.Wait() to shutdown server
func (w *WebDAV) serve() error {
w.Serve()
fs.Logf(w.f, "WebDav Server started on %s", w.URLs())
func (w *WebDAV) Serve() error {
w.server.Serve()
fs.Logf(w.f, "WebDav Server started on %s", w.server.URLs())
w.server.Wait()
return nil
}
// Addr returns the first address of the server
func (w *WebDAV) Addr() net.Addr {
return w.server.Addr()
}
// Shutdown the server
func (w *WebDAV) Shutdown() error {
return w.server.Shutdown()
}
// logRequest is called by the webdav module on every request
func (w *WebDAV) logRequest(r *http.Request, err error) {
fs.Infof(r.URL.Path, "%s from %s", r.Method, r.RemoteAddr)
@ -515,16 +550,16 @@ func (h Handle) DeadProps() (map[xml.Name]webdav.Property, error) {
property webdav.Property
properties = make(map[xml.Name]webdav.Property)
)
if h.w.opt.HashType != hash.None {
if h.w.etagHashType != hash.None {
entry := h.Handle.Node().DirEntry()
if o, ok := entry.(fs.Object); ok {
hash, err := o.Hash(h.ctx, h.w.opt.HashType)
hash, err := o.Hash(h.ctx, h.w.etagHashType)
if err == nil {
xmlName.Space = "http://owncloud.org/ns"
xmlName.Local = "checksums"
property.XMLName = xmlName
property.InnerXML = append(property.InnerXML, "<checksum xmlns=\"http://owncloud.org/ns\">"...)
property.InnerXML = append(property.InnerXML, strings.ToUpper(h.w.opt.HashType.String())...)
property.InnerXML = append(property.InnerXML, strings.ToUpper(h.w.etagHashType.String())...)
property.InnerXML = append(property.InnerXML, ':')
property.InnerXML = append(property.InnerXML, hash...)
property.InnerXML = append(property.InnerXML, "</checksum>"...)
@ -577,7 +612,7 @@ type FileInfo struct {
// ETag returns an ETag for the FileInfo
func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
// defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
if fi.w.opt.HashType == hash.None {
if fi.w.etagHashType == hash.None {
return "", webdav.ErrNotImplemented
}
node, ok := (fi.FileInfo).(vfs.Node)
@ -590,7 +625,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
if !ok {
return "", webdav.ErrNotImplemented
}
hash, err := o.Hash(ctx, fi.w.opt.HashType)
hash, err := o.Hash(ctx, fi.w.etagHashType)
if err != nil || hash == "" {
return "", webdav.ErrNotImplemented
}

View File

@ -18,12 +18,14 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/webdav"
@ -48,31 +50,32 @@ var (
func TestWebDav(t *testing.T) {
// Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
opt := DefaultOpt
opt := Opt
opt.HTTP.ListenAddr = []string{testBindAddress}
opt.HTTP.BaseURL = "/prefix"
opt.Auth.BasicUser = testUser
opt.Auth.BasicPass = testPass
opt.Template.Path = testTemplate
opt.HashType = hash.MD5
opt.EtagHash = "MD5"
// Start the server
w, err := newWebDAV(context.Background(), f, &opt)
w, err := newWebDAV(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
require.NoError(t, err)
require.NoError(t, w.serve())
go func() {
require.NoError(t, w.Serve())
}()
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "webdav",
"vendor": "rclone",
"url": w.Server.URLs()[0],
"url": w.server.URLs()[0],
"user": testUser,
"pass": obscure.MustObscure(testPass),
}
return config, func() {
assert.NoError(t, w.Shutdown())
w.Wait()
}
}
@ -98,19 +101,20 @@ func TestHTTPFunction(t *testing.T) {
f, err := fs.NewFs(context.Background(), "../http/testdata/files")
assert.NoError(t, err)
opt := DefaultOpt
opt := Opt
opt.HTTP.ListenAddr = []string{testBindAddress}
opt.Template.Path = testTemplate
// Start the server
w, err := newWebDAV(context.Background(), f, &opt)
w, err := newWebDAV(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
assert.NoError(t, err)
require.NoError(t, w.serve())
go func() {
require.NoError(t, w.Serve())
}()
defer func() {
assert.NoError(t, w.Shutdown())
w.Wait()
}()
testURL := w.Server.URLs()[0]
testURL := w.server.URLs()[0]
pause := time.Millisecond
i := 0
for ; i < 10; i++ {
@ -260,3 +264,10 @@ func HelpTestGET(t *testing.T, testURL string) {
checkGolden(t, test.Golden, body)
}
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "webdav",
"vfs_cache_mode": "off",
})
}

View File

@ -948,3 +948,14 @@ put them back in again.` >}}
* Dave Vasilevsky <djvasi@gmail.com> <dave@vasilevsky.ca>
* luzpaz <luzpaz@users.noreply.github.com>
* jack <9480542+jackusm@users.noreply.github.com>
* Jörn Friedrich Dreyer <jfd@butonic.de>
* alingse <alingse@foxmail.com>
* Fernando Fernández <ferferga@hotmail.com>
* eccoisle <167755281+eccoisle@users.noreply.github.com>
* Klaas Freitag <kraft@freisturz.de>
* Danny Garside <dannygarside@outlook.com>
* Samantha Bowen <sam@bbowen.net>
* simonmcnair <101189766+simonmcnair@users.noreply.github.com>
* huanghaojun <jasen.huang@ugreen.com>
* Enduriel <endur1el@protonmail.com>
* Markus Gerstel <markus.gerstel@osirium.com>

View File

@ -87,7 +87,7 @@ machine with no Internet browser available.
Note that rclone runs a webserver on your local machine to collect the
token as returned from Box. This only runs from the moment it opens
your browser to the moment you get back the verification code. This
is on `http://127.0.0.1:53682/` and this it may require you to unblock
is on `http://127.0.0.1:53682/` and this may require you to unblock
it temporarily if you are running a host firewall.
Once configured you can then use `rclone` like this,

View File

@ -206,6 +206,28 @@ Properties:
- Type: Duration
- Default: 0s
#### --cloudinary-adjust-media-files-extensions
Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems
Properties:
- Config: adjust_media_files_extensions
- Env Var: RCLONE_CLOUDINARY_ADJUST_MEDIA_FILES_EXTENSIONS
- Type: bool
- Default: true
#### --cloudinary-media-extensions
Cloudinary supported media extensions
Properties:
- Config: media_extensions
- Env Var: RCLONE_CLOUDINARY_MEDIA_EXTENSIONS
- Type: stringArray
- Default: [3ds 3g2 3gp ai arw avi avif bmp bw cr2 cr3 djvu dng eps3 fbx flif flv gif glb gltf hdp heic heif ico indd jp2 jpe jpeg jpg jxl jxr m2ts mov mp4 mpeg mts mxf obj ogv pdf ply png psd svg tga tif tiff ts u3ma usdz wdp webm webp wmv]
#### --cloudinary-description
Description of the remote.

View File

@ -45,7 +45,7 @@ on the host.
The _FUSE_ driver is a prerequisite for rclone mounting and should be
installed on host:
```
sudo apt-get -y install fuse
sudo apt-get -y install fuse3
```
Create two directories required by rclone docker plugin:

View File

@ -1448,6 +1448,19 @@ backends and the VFS. There are individual flags for just enabling it
for the VFS `--vfs-links` and the local backend `--local-links` if
required.
### --list-cutoff N {#list-cutoff}
When syncing rclone needs to sort directory entries before comparing
them. Below this threshold (1,000,000) by default, rclone will store
the directory entries in memory. 1,000,000 entries will take approx
1GB of RAM to store. Above this threshold rclone will store directory
entries on disk and sort them without using a lot of memory.
Doing this is slightly less efficient then sorting them in memory and
will only work well for the bucket based backends (eg s3, b2,
azureblob, swift) but these are the only backends likely to have
millions of entries in a directory.
### --log-file=FILE ###
Log all of rclone's output to FILE. This is not active by default.

View File

@ -233,12 +233,18 @@ value, say `export GOGC=20`. This will make the garbage collector
work harder, reducing memory size at the expense of CPU usage.
The most common cause of rclone using lots of memory is a single
directory with millions of files in. Rclone has to load this entirely
into memory as rclone objects. Each rclone object takes 0.5k-1k of
memory. There is
directory with millions of files in.
Before rclone v1.70 has to load this entirely into memory as rclone
objects. Each rclone object takes 0.5k-1k of memory. There is
[a workaround for this](https://github.com/rclone/rclone/wiki/Big-syncs-with-millions-of-files)
which involves a bit of scripting.
However with rclone v1.70 and later rclone will automatically save
directory entries to disk when a directory with more than
[`--list-cutoff`](/docs/#list-cutoff) (1,000,000 by default) entries
is detected.
From v1.70 rclone also has the [--max-buffer-memory](/docs/#max-buffer-memory)
flag which helps particularly when multi-thread transfers are using
too much memory.

View File

@ -432,7 +432,7 @@ format. Each block describes a single option.
| Field | Type | Optional | Description |
|-------|------|----------|-------------|
| Name | string | N | name of the option in snake_case |
| FieldName | string | N | name of the field used in the rc - if blank use Name |
| FieldName | string | N | name of the field used in the rc - if blank use Name. May contain "." for nested fields. |
| Help | string | N | help, started with a single sentence on a single line |
| Groups | string | Y | groups this option belongs to - comma separated string for options classification |
| Provider | string | Y | set to filter on provider |

View File

@ -771,7 +771,8 @@ tries to access data from the glacier storage class you will see an error like b
2017/09/11 19:07:43 Failed to sync: failed to open source object: Object in GLACIER, restore first: path/to/file
In this case you need to [restore](http://docs.aws.amazon.com/AmazonS3/latest/user-guide/restore-archived-objects.html)
the object(s) in question before using rclone.
the object(s) in question before accessing object contents.
The [restore](#restore) section below shows how to do this with rclone.
Note that rclone only speaks the S3 API it does not speak the Glacier
Vault API, so rclone cannot directly access Glacier Vaults.

View File

@ -28,6 +28,14 @@
<a href="https://rcloneview.com/?utm_source=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor RcloneView"><img src="/img/logos/rcloneview-banner.svg"></a><br />
</div>
</div>
<div class="card">
<div class="card-header" style="padding: 5px 15px;">
Silver Sponsor
</div>
<div class="card-body">
<a href="https://rcloneui.com" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br />
</div>
</div>
{{end}}
<div class="card">

View File

@ -19,6 +19,7 @@ type RcloneCollector struct {
deletes *prometheus.Desc
deletedDirs *prometheus.Desc
renames *prometheus.Desc
listed *prometheus.Desc
fatalError *prometheus.Desc
retryError *prometheus.Desc
}
@ -59,6 +60,10 @@ func NewRcloneCollector(ctx context.Context) *RcloneCollector {
"Total number of files renamed",
nil, nil,
),
listed: prometheus.NewDesc(namespace+"entries_listed_total",
"Total number of entries listed",
nil, nil,
),
fatalError: prometheus.NewDesc(namespace+"fatal_error",
"Whether a fatal error has occurred",
nil, nil,
@ -80,6 +85,7 @@ func (c *RcloneCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.deletes
ch <- c.deletedDirs
ch <- c.renames
ch <- c.listed
ch <- c.fatalError
ch <- c.retryError
}
@ -97,6 +103,7 @@ func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(c.deletes, prometheus.CounterValue, float64(s.deletes))
ch <- prometheus.MustNewConstMetric(c.deletedDirs, prometheus.CounterValue, float64(s.deletedDirs))
ch <- prometheus.MustNewConstMetric(c.renames, prometheus.CounterValue, float64(s.renames))
ch <- prometheus.MustNewConstMetric(c.listed, prometheus.CounterValue, float64(s.listed))
ch <- prometheus.MustNewConstMetric(c.fatalError, prometheus.GaugeValue, bool2Float(s.fatalError))
ch <- prometheus.MustNewConstMetric(c.retryError, prometheus.GaugeValue, bool2Float(s.retryError))

View File

@ -46,6 +46,7 @@ type StatsInfo struct {
transferring *transferMap
transferQueue int
transferQueueSize int64
listed int64
renames int64
renameQueue int
renameQueueSize int64
@ -117,6 +118,7 @@ func (s *StatsInfo) RemoteStats(short bool) (out rc.Params, err error) {
out["deletes"] = s.deletes
out["deletedDirs"] = s.deletedDirs
out["renames"] = s.renames
out["listed"] = s.listed
out["elapsedTime"] = time.Since(s.startTime).Seconds()
out["serverSideCopies"] = s.serverSideCopies
out["serverSideCopyBytes"] = s.serverSideCopyBytes
@ -500,9 +502,9 @@ func (s *StatsInfo) String() string {
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
s.errors, errorDetails)
}
if s.checks != 0 || ts.totalChecks != 0 {
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
s.checks, ts.totalChecks, percent(s.checks, ts.totalChecks))
if s.checks != 0 || ts.totalChecks != 0 || s.listed != 0 {
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s, Listed %d\n",
s.checks, ts.totalChecks, percent(s.checks, ts.totalChecks), s.listed)
}
if s.deletes != 0 || s.deletedDirs != 0 {
_, _ = fmt.Fprintf(buf, "Deleted: %10d (files), %d (dirs), %s (freed)\n", s.deletes, s.deletedDirs, fs.SizeSuffix(s.deletesSize).ByteUnit())
@ -718,7 +720,15 @@ func (s *StatsInfo) Renames(renames int64) int64 {
return s.renames
}
// ResetCounters sets the counters (bytes, checks, errors, transfers, deletes, renames) to 0 and resets lastError, fatalError and retryError
// Listed updates the stats for listed objects
func (s *StatsInfo) Listed(listed int64) int64 {
s.mu.Lock()
defer s.mu.Unlock()
s.listed += listed
return s.listed
}
// ResetCounters sets the counters (bytes, checks, errors, transfers, deletes, renames, listed) to 0 and resets lastError, fatalError and retryError
func (s *StatsInfo) ResetCounters() {
s.mu.Lock()
defer s.mu.Unlock()
@ -734,6 +744,7 @@ func (s *StatsInfo) ResetCounters() {
s.deletesSize = 0
s.deletedDirs = 0
s.renames = 0
s.listed = 0
s.startedTransfers = nil
s.oldDuration = 0

View File

@ -96,6 +96,7 @@ Returns the following values:
"fatalError": boolean whether there has been at least one fatal error,
"lastError": last error string,
"renames" : number of files renamed,
"listed" : number of directory entries listed,
"retryError": boolean showing whether there has been at least one non-NoRetryError,
"serverSideCopies": number of server side copies done,
"serverSideCopyBytes": number bytes server side copied,
@ -383,6 +384,7 @@ func (sg *statsGroups) sum(ctx context.Context) *StatsInfo {
sum.transfers += stats.transfers
sum.transferring.merge(stats.transferring)
sum.transferQueueSize += stats.transferQueueSize
sum.listed += stats.listed
sum.renames += stats.renames
sum.renameQueue += stats.renameQueue
sum.renameQueueSize += stats.renameQueueSize

View File

@ -277,6 +277,11 @@ var ConfigOptionsInfo = Options{{
Default: false,
Help: "Use recursive list if available; uses more memory but fewer transactions",
Groups: "Listing",
}, {
Name: "list_cutoff",
Default: 1_000_000,
Help: "To save memory, sort directory listings on disk above this threshold",
Groups: "Sync",
}, {
Name: "tpslimit",
Default: 0.0,
@ -616,6 +621,7 @@ type ConfigInfo struct {
Suffix string `config:"suffix"`
SuffixKeepExtension bool `config:"suffix_keep_extension"`
UseListR bool `config:"fast_list"`
ListCutoff int `config:"list_cutoff"`
BufferSize SizeSuffix `config:"buffer_size"`
BwLimit BwTimetable `config:"bwlimit"`
BwLimitFile BwTimetable `config:"bwlimit_file"`

View File

@ -86,6 +86,62 @@ func StringToInterface(def any, in string) (newValue any, err error) {
return newValue, nil
}
// InterfaceToString turns in into a string
//
// This supports a subset of builtin types, string, integer types,
// bool, time.Duration and []string.
//
// Builtin types are expected to be encoding as their natural
// stringificatons as produced by fmt.Sprint except for []string which
// is expected to be encoded a a CSV with empty array encoded as "".
//
// Any other types are expected to be encoded by their String()
// methods and decoded by their `Set(s string) error` methods.
func InterfaceToString(in any) (strValue string, err error) {
switch x := in.(type) {
case string:
// return strings unmodified
strValue = x
case int, int8, int16, int32, int64,
uint, uint8, uint16, uint32, uint64, uintptr,
float32, float64:
strValue = fmt.Sprint(in)
case bool:
strValue = fmt.Sprint(in)
case time.Duration:
strValue = fmt.Sprint(in)
case []string:
// CSV encode arrays of strings - ideally we would use
// fs.CommaSepList here but we can't as it would cause
// a circular import.
if len(x) == 0 {
strValue = ""
} else if len(x) == 1 && len(x[0]) == 0 {
strValue = `""`
} else {
var buf strings.Builder
w := csv.NewWriter(&buf)
err := w.Write(x)
if err != nil {
return "", err
}
w.Flush()
strValue = strings.TrimSpace(buf.String())
}
default:
// Try using a String method
if do, ok := in.(fmt.Stringer); ok {
strValue = do.String()
} else {
err = errors.New("don't know how to convert this")
}
}
if err != nil {
return "", fmt.Errorf("interpreting %T as string failed: %w", in, err)
}
return strValue, nil
}
// Item describes a single entry in the options structure
type Item struct {
Name string // snake_case
@ -139,6 +195,7 @@ func Items(opt any) (items []Item, err error) {
if hasTag {
newItem.Name = configName + "_" + newItem.Name
}
newItem.Field = fieldName + "." + newItem.Field
items = append(items, newItem)
}
} else {
@ -156,6 +213,22 @@ func Items(opt any) (items []Item, err error) {
return items, nil
}
// setValue sets newValue to configValue returning an updated newValue
func setValue(newValue any, configValue string) (any, error) {
newNewValue, err := StringToInterface(newValue, configValue)
if err != nil {
// Mask errors if setting an empty string as
// it isn't valid for all types. This makes
// empty string be the equivalent of unset.
if configValue != "" {
return nil, err
}
} else {
newValue = newNewValue
}
return newValue, nil
}
// Set interprets the field names in defaults and looks up config
// values in the config passed in. Any values found in config will be
// set in the opt structure.
@ -177,17 +250,60 @@ func Set(config configmap.Getter, opt any) (err error) {
for _, defaultItem := range defaultItems {
newValue := defaultItem.Value
if configValue, ok := config.Get(defaultItem.Name); ok {
var newNewValue any
newNewValue, err = StringToInterface(newValue, configValue)
newValue, err = setValue(newValue, configValue)
if err != nil {
// Mask errors if setting an empty string as
// it isn't valid for all types. This makes
// empty string be the equivalent of unset.
if configValue != "" {
return fmt.Errorf("couldn't parse config item %q = %q as %T: %w", defaultItem.Name, configValue, defaultItem.Value, err)
}
} else {
newValue = newNewValue
return fmt.Errorf("couldn't parse config item %q = %q as %T: %w", defaultItem.Name, configValue, defaultItem.Value, err)
}
}
defaultItem.Set(newValue)
}
return nil
}
// setIfSameType set aPtr with b if they are the same type or returns false.
func setIfSameType(aPtr interface{}, b interface{}) bool {
aVal := reflect.ValueOf(aPtr).Elem()
bVal := reflect.ValueOf(b)
if aVal.Type() != bVal.Type() {
return false
}
aVal.Set(bVal)
return true
}
// SetAny interprets the field names in defaults and looks up config
// values in the config passed in. Any values found in config will be
// set in the opt structure.
//
// opt must be a pointer to a struct. The struct should have entirely
// public fields. The field names are converted from CamelCase to
// snake_case and looked up in the config supplied or a
// `config:"field_name"` is looked up.
//
// If items are found then they are set directly if the correct type,
// otherwise they are converted to string and then converted from
// string to native types and set in opt.
//
// All the field types in the struct must implement fmt.Scanner.
func SetAny(config map[string]any, opt any) (err error) {
defaultItems, err := Items(opt)
if err != nil {
return err
}
for _, defaultItem := range defaultItems {
newValue := defaultItem.Value
if configValue, ok := config[defaultItem.Name]; ok {
if !setIfSameType(&newValue, configValue) {
// Convert the config value to be a string
stringConfigValue, err := InterfaceToString(configValue)
if err != nil {
return err
}
newValue, err = setValue(newValue, stringConfigValue)
if err != nil {
return fmt.Errorf("couldn't parse config item %q = %q as %T: %w", defaultItem.Name, stringConfigValue, defaultItem.Value, err)
}
}
}
defaultItem.Set(newValue)

View File

@ -100,17 +100,17 @@ func TestItemsNested(t *testing.T) {
got, err := configstruct.Items(&in)
require.NoError(t, err)
want := []configstruct.Item{
{Name: "a", Field: "A", Value: string("1")},
{Name: "b", Field: "B", Value: string("2")},
{Name: "sub_a", Field: "A", Value: string("3")},
{Name: "sub_b", Field: "B", Value: string("4")},
{Name: "spud_pie", Field: "PotatoPie", Value: string("yum")},
{Name: "bean_stew", Field: "BeanStew", Value: true},
{Name: "raisin_roll", Field: "RaisinRoll", Value: int(42)},
{Name: "sausage_on_stick", Field: "SausageOnStick", Value: int64(101)},
{Name: "forbidden_fruit", Field: "ForbiddenFruit", Value: uint(6)},
{Name: "cooking_time", Field: "CookingTime", Value: fs.Duration(42 * time.Second)},
{Name: "total_weight", Field: "TotalWeight", Value: fs.SizeSuffix(17 << 20)},
{Name: "a", Field: "Conf.A", Value: string("1")},
{Name: "b", Field: "Conf.B", Value: string("2")},
{Name: "sub_a", Field: "Sub1.A", Value: string("3")},
{Name: "sub_b", Field: "Sub1.B", Value: string("4")},
{Name: "spud_pie", Field: "Sub2.PotatoPie", Value: string("yum")},
{Name: "bean_stew", Field: "Sub2.BeanStew", Value: true},
{Name: "raisin_roll", Field: "Sub2.RaisinRoll", Value: int(42)},
{Name: "sausage_on_stick", Field: "Sub2.SausageOnStick", Value: int64(101)},
{Name: "forbidden_fruit", Field: "Sub2.ForbiddenFruit", Value: uint(6)},
{Name: "cooking_time", Field: "Sub2.CookingTime", Value: fs.Duration(42 * time.Second)},
{Name: "total_weight", Field: "Sub2.TotalWeight", Value: fs.SizeSuffix(17 << 20)},
{Name: "c", Field: "C", Value: string("normal")},
{Name: "d", Field: "D", Value: fs.Tristate{Value: true, Valid: true}},
}
@ -176,6 +176,39 @@ func TestSetFull(t *testing.T) {
assert.Equal(t, want, in)
}
func TestSetAnyFull(t *testing.T) {
in := &Conf2{
PotatoPie: "yum",
BeanStew: true,
RaisinRoll: 42,
SausageOnStick: 101,
ForbiddenFruit: 6,
CookingTime: fs.Duration(42 * time.Second),
TotalWeight: fs.SizeSuffix(17 << 20),
}
m := map[string]any{
"spud_pie": "YUM",
"bean_stew": false,
"raisin_roll": "43 ",
"sausage_on_stick": " 102 ",
"forbidden_fruit": "0x7",
"cooking_time": 43 * time.Second,
"total_weight": "18M",
}
want := &Conf2{
PotatoPie: "YUM",
BeanStew: false,
RaisinRoll: 43,
SausageOnStick: 102,
ForbiddenFruit: 7,
CookingTime: fs.Duration(43 * time.Second),
TotalWeight: fs.SizeSuffix(18 << 20),
}
err := configstruct.SetAny(m, in)
require.NoError(t, err)
assert.Equal(t, want, in)
}
func TestStringToInterface(t *testing.T) {
item := struct{ A int }{2}
for _, test := range []struct {
@ -227,3 +260,47 @@ func TestStringToInterface(t *testing.T) {
}
}
}
func TestInterfaceToString(t *testing.T) {
item := struct{ A int }{2}
for _, test := range []struct {
in any
want string
err string
}{
{nil, "", "interpreting <nil> as string failed: don't know how to convert this"},
{"", "", ""},
{" string ", " string ", ""},
{int(123), "123", ""},
{int(0x123), "291", ""},
{int(-123), "-123", ""},
{false, "false", ""},
{true, "true", ""},
{uint(123), "123", ""},
{int64(123), "123", ""},
{item, "", "interpreting struct { A int } as string failed: don't know how to convert this"},
{fs.Duration(time.Second), "1s", ""},
{fs.Duration(61 * time.Second), "1m1s", ""},
{[]string{}, ``, ""},
{[]string{""}, `""`, ""},
{[]string{"", ""}, `,`, ""},
{[]string{"hello"}, `hello`, ""},
{[]string{"hello", "world"}, `hello,world`, ""},
{[]string{"hello", "", "world"}, `hello,,world`, ""},
{[]string{`hello, world`, `goodbye, world!`}, `"hello, world","goodbye, world!"`, ""},
{time.Second, "1s", ""},
{61 * time.Second, "1m1s", ""},
{fs.Mebi, "1Mi", ""},
{fs.Gibi, "1Gi", ""},
} {
what := fmt.Sprintf("interpret %#v as string", test.in)
got, err := configstruct.InterfaceToString(test.in)
if test.err == "" {
require.NoError(t, err, what)
assert.Equal(t, test.want, got, what)
} else {
assert.Equal(t, "", got)
assert.EqualError(t, err, test.err, what)
}
}
}

View File

@ -7,6 +7,7 @@ import (
"context"
"errors"
"fmt"
"io"
"os"
"slices"
"sort"
@ -28,7 +29,7 @@ import (
var ReadLine = func() string {
buf := bufio.NewReader(os.Stdin)
line, err := buf.ReadString('\n')
if err != nil {
if err != nil && (line == "" || err != io.EOF) {
fs.Fatalf(nil, "Failed to read line: %v", err)
}
return strings.TrimSpace(line)

View File

@ -159,6 +159,21 @@ type Features struct {
// of listing recursively that doing a directory traversal.
ListR ListRFn
// ListP lists the objects and directories of the Fs starting
// from dir non recursively to out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
ListP func(ctx context.Context, dir string, callback ListRCallback) error
// About gets quota information from the Fs
About func(ctx context.Context) (*Usage, error)
@ -327,6 +342,9 @@ func (ft *Features) Fill(ctx context.Context, f Fs) *Features {
if do, ok := f.(ListRer); ok {
ft.ListR = do.ListR
}
if do, ok := f.(ListPer); ok {
ft.ListP = do.ListP
}
if do, ok := f.(Abouter); ok {
ft.About = do.About
}
@ -435,6 +453,9 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
if mask.ListR == nil {
ft.ListR = nil
}
if mask.ListP == nil {
ft.ListP = nil
}
if mask.About == nil {
ft.About = nil
}
@ -663,6 +684,24 @@ type ListRer interface {
ListR(ctx context.Context, dir string, callback ListRCallback) error
}
// ListPer is an optional interfaces for Fs
type ListPer interface {
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
ListP(ctx context.Context, dir string, callback ListRCallback) error
}
// RangeSeeker is the interface that wraps the RangeSeek method.
//
// Some of the returns from Object.Open() may optionally implement

View File

@ -5,6 +5,7 @@ import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/hex"
"errors"
@ -87,6 +88,9 @@ var (
// SHA256 indicates SHA-256 support
SHA256 Type
// SHA512 indicates SHA-512 support
SHA512 Type
)
func init() {
@ -95,6 +99,7 @@ func init() {
Whirlpool = RegisterHash("whirlpool", "Whirlpool", 128, whirlpool.New)
CRC32 = RegisterHash("crc32", "CRC-32", 8, func() hash.Hash { return crc32.NewIEEE() })
SHA256 = RegisterHash("sha256", "SHA-256", 64, sha256.New)
SHA512 = RegisterHash("sha512", "SHA-512", 128, sha512.New)
}
// Supported returns a set of all the supported hashes by

View File

@ -77,6 +77,7 @@ var hashTestSet = []hashTest{
hash.Whirlpool: "eddf52133d4566d763f716e853d6e4efbabd29e2c2e63f56747b1596172851d34c2df9944beb6640dbdbe3d9b4eb61180720a79e3d15baff31c91e43d63869a4",
hash.CRC32: "a6041d7e",
hash.SHA256: "c839e57675862af5c21bd0a15413c3ec579e0d5522dab600bc6c3489b05b8f54",
hash.SHA512: "008e7e9b5d94d37bf5e07c955890f730f137a41b8b0db16cb535a9b4cb5632c2bccff31685ec470130fe10e2258a0ab50ab587472258f3132ccf7d7d59fb91db",
},
},
// Empty data set
@ -88,6 +89,7 @@ var hashTestSet = []hashTest{
hash.Whirlpool: "19fa61d75522a4669b44e39c1d2e1726c530232130d407f89afee0964997f7a73e83be698b288febcf88e3e03c4f0757ea8964e59b63d93708b138cc42a66eb3",
hash.CRC32: "00000000",
hash.SHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
hash.SHA512: "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e",
},
},
}

61
fs/list/helpers.go Normal file
View File

@ -0,0 +1,61 @@
package list
import (
"context"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
)
// Listing helpers used by backends
// Helper is used in the implementation of ListR to accumulate DirEntries
type Helper struct {
callback fs.ListRCallback
entries fs.DirEntries
}
// NewHelper should be called from ListR with the callback passed in
func NewHelper(callback fs.ListRCallback) *Helper {
return &Helper{
callback: callback,
}
}
// send sends the stored entries to the callback if there are >= max
// entries.
func (lh *Helper) send(max int) (err error) {
if len(lh.entries) >= max {
err = lh.callback(lh.entries)
lh.entries = lh.entries[:0]
}
return err
}
// Add an entry to the stored entries and send them if there are more
// than a certain amount
func (lh *Helper) Add(entry fs.DirEntry) error {
if entry == nil {
return nil
}
lh.entries = append(lh.entries, entry)
return lh.send(100)
}
// Flush the stored entries (if any) sending them to the callback
func (lh *Helper) Flush() error {
return lh.send(1)
}
// WithListP implements the List interface with ListP
//
// It should be used in backends which support ListP to implement
// List.
func WithListP(ctx context.Context, dir string, list fs.ListPer) (entries fs.DirEntries, err error) {
err = list.ListP(ctx, dir, func(newEntries fs.DirEntries) error {
accounting.Stats(ctx).Listed(int64(len(newEntries)))
entries = append(entries, newEntries...)
return nil
})
return entries, err
}

145
fs/list/helpers_test.go Normal file
View File

@ -0,0 +1,145 @@
package list
import (
"context"
"errors"
"fmt"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Mock callback to collect the entries
func mockCallback(entries fs.DirEntries) error {
// Do nothing or log for testing purposes
return nil
}
func TestNewListRHelper(t *testing.T) {
callback := mockCallback
helper := NewHelper(callback)
assert.NotNil(t, helper)
assert.Equal(t, fmt.Sprintf("%p", callback), fmt.Sprintf("%p", helper.callback))
assert.Empty(t, helper.entries)
}
func TestListRHelperAdd(t *testing.T) {
callbackInvoked := false
callback := func(entries fs.DirEntries) error {
callbackInvoked = true
return nil
}
helper := NewHelper(callback)
entry := mockobject.Object("A")
require.NoError(t, helper.Add(entry))
assert.Len(t, helper.entries, 1)
assert.False(t, callbackInvoked, "Callback should not be invoked before reaching 100 entries")
// Check adding a nil entry doesn't change anything
require.NoError(t, helper.Add(nil))
assert.Len(t, helper.entries, 1)
assert.False(t, callbackInvoked, "Callback should not be invoked before reaching 100 entries")
}
func TestListRHelperSend(t *testing.T) {
entry := mockobject.Object("A")
callbackInvoked := false
callback := func(entries fs.DirEntries) error {
callbackInvoked = true
assert.Equal(t, 100, len(entries))
for _, obj := range entries {
assert.Equal(t, entry, obj)
}
return nil
}
helper := NewHelper(callback)
// Add 100 entries to force the callback to be invoked
for i := 0; i < 100; i++ {
require.NoError(t, helper.Add(entry))
}
assert.Len(t, helper.entries, 0)
assert.True(t, callbackInvoked, "Callback should be invoked after 100 entries")
}
func TestListRHelperFlush(t *testing.T) {
entry := mockobject.Object("A")
callbackInvoked := false
callback := func(entries fs.DirEntries) error {
callbackInvoked = true
assert.Equal(t, 1, len(entries))
for _, obj := range entries {
assert.Equal(t, entry, obj)
}
return nil
}
helper := NewHelper(callback)
require.NoError(t, helper.Add(entry))
assert.False(t, callbackInvoked, "Callback should not have been invoked yet")
require.NoError(t, helper.Flush())
assert.True(t, callbackInvoked, "Callback should be invoked on flush")
assert.Len(t, helper.entries, 0, "Entries should be cleared after flush")
}
type mockListPfs struct {
t *testing.T
entries fs.DirEntries
err error
errorAfter int
}
func (f *mockListPfs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
assert.Equal(f.t, "dir", dir)
count := 0
for entries := f.entries; len(entries) > 0; entries = entries[2:] {
err = callback(entries[:2])
if err != nil {
return err
}
count += 2
if f.err != nil && count >= f.errorAfter {
return f.err
}
}
return nil
}
// check interface
var _ fs.ListPer = (*mockListPfs)(nil)
func TestListWithListP(t *testing.T) {
ctx := context.Background()
var entries fs.DirEntries
for i := 0; i < 26; i++ {
entries = append(entries, mockobject.New(fmt.Sprintf("%c", 'A'+i)))
}
t.Run("NoError", func(t *testing.T) {
f := &mockListPfs{
t: t,
entries: entries,
}
gotEntries, err := WithListP(ctx, "dir", f)
require.NoError(t, err)
assert.Equal(t, entries, gotEntries)
})
t.Run("Error", func(t *testing.T) {
f := &mockListPfs{t: t,
entries: entries,
err: errors.New("BOOM"),
errorAfter: 10,
}
gotEntries, err := WithListP(ctx, "dir", f)
assert.Equal(t, f.err, err)
assert.Equal(t, entries[:10], gotEntries)
})
}

View File

@ -8,6 +8,7 @@ import (
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/lib/bucket"
)
@ -23,6 +24,7 @@ import (
func DirSorted(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
// Get unfiltered entries from the fs
entries, err = f.List(ctx, dir)
accounting.Stats(ctx).Listed(int64(len(entries)))
if err != nil {
return nil, err
}
@ -37,8 +39,64 @@ func DirSorted(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entri
return filterAndSortDir(ctx, entries, includeAll, dir, fi.IncludeObject, fi.IncludeDirectory(ctx, f))
}
// filter (if required) and check the entries, then sort them
func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll bool, dir string,
// listP for every backend
func listP(ctx context.Context, f fs.Fs, dir string, callback fs.ListRCallback) error {
if doListP := f.Features().ListP; doListP != nil {
return doListP(ctx, dir, callback)
}
// Fallback to List
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
return callback(entries)
}
// DirSortedFn reads Object and *Dir into entries for the given Fs.
//
// dir is the start directory, "" for root
//
// If includeAll is specified all files will be added, otherwise only
// files and directories passing the filter will be added.
//
// Files will be returned through callback in sorted order
func DirSortedFn(ctx context.Context, f fs.Fs, includeAll bool, dir string, callback fs.ListRCallback, keyFn KeyFn) (err error) {
stats := accounting.Stats(ctx)
fi := filter.GetConfig(ctx)
// Sort the entries, in or out of memory
sorter, err := NewSorter(ctx, f, callback, keyFn)
if err != nil {
return fmt.Errorf("failed to create directory sorter: %w", err)
}
defer sorter.CleanUp()
// Get unfiltered entries from the fs
err = listP(ctx, f, dir, func(entries fs.DirEntries) error {
stats.Listed(int64(len(entries)))
// This should happen only if exclude files lives in the
// starting directory, otherwise ListDirSorted should not be
// called.
if !includeAll && fi.ListContainsExcludeFile(entries) {
fs.Debugf(dir, "Excluded")
return nil
}
entries, err := filterDir(ctx, entries, includeAll, dir, fi.IncludeObject, fi.IncludeDirectory(ctx, f))
if err != nil {
return err
}
return sorter.Add(entries)
})
if err != nil {
return err
}
return sorter.Send()
}
// Filter the entries passed in
func filterDir(ctx context.Context, entries fs.DirEntries, includeAll bool, dir string,
IncludeObject func(ctx context.Context, o fs.Object) bool,
IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
@ -94,7 +152,18 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
newEntries = append(newEntries, entry)
}
}
entries = newEntries
return newEntries, nil
}
// filter and sort the entries
func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll bool, dir string,
IncludeObject func(ctx context.Context, o fs.Object) bool,
IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
// Filter the directory entries (in place)
entries, err = filterDir(ctx, entries, includeAll, dir, IncludeObject, IncludeDirectory)
if err != nil {
return nil, err
}
// Sort the directory entries by Remote
//

342
fs/list/sorter.go Normal file
View File

@ -0,0 +1,342 @@
package list
import (
"cmp"
"context"
"errors"
"fmt"
"slices"
"strings"
"sync"
"time"
"github.com/lanrat/extsort"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/sync/errgroup"
)
// NewObjecter is the minimum facilities we need from the fs.Fs passed into NewSorter.
type NewObjecter interface {
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
NewObject(ctx context.Context, remote string) (fs.Object, error)
}
// Sorter implements an efficient mechanism for sorting list entries.
//
// If there are a large number of entries (above `--list-cutoff`),
// this may be done on disk instead of in memory.
//
// Supply entries with the Add method, call Send at the end to deliver
// the sorted entries and finalise with CleanUp regardless of whether
// you called Add or Send.
//
// Sorted entries are delivered to the callback supplied to NewSorter
// when the Send method is called.
type Sorter struct {
ctx context.Context // context for everything
ci *fs.ConfigInfo // config we are using
cancel func() // cancel all background operations
mu sync.Mutex // protect the below
f NewObjecter // fs that we are listing
callback fs.ListRCallback // where to send the sorted entries to
entries fs.DirEntries // accumulated entries
keyFn KeyFn // transform an entry into a sort key
cutoff int // number of entries above which we start extsort
extSort bool // true if we are ext sorting
inputChan chan string // for sending data to the ext sort
outputChan chan string // for receiving data from the ext sort
errChan chan error // for getting errors from the ext sort
sorter *extsort.StringSorter // external string sort
errs *errcount.ErrCount // accumulate errors
}
// KeyFn turns an entry into a sort key
type KeyFn func(entry fs.DirEntry) string
// identityKeyFn maps an entry to its Remote
func identityKeyFn(entry fs.DirEntry) string {
return entry.Remote()
}
// NewSorter creates a new Sorter with callback for sorted entries to
// be delivered to. keyFn is used to process each entry to get a key
// function, if nil then it will just use entry.Remote()
func NewSorter(ctx context.Context, f NewObjecter, callback fs.ListRCallback, keyFn KeyFn) (*Sorter, error) {
ci := fs.GetConfig(ctx)
ctx, cancel := context.WithCancel(ctx)
if keyFn == nil {
keyFn = identityKeyFn
}
return &Sorter{
ctx: ctx,
ci: ci,
cancel: cancel,
f: f,
callback: callback,
keyFn: keyFn,
cutoff: ci.ListCutoff,
errs: errcount.New(),
}, nil
}
// Turn a directory entry into a combined key and data for extsort
func (ls *Sorter) entryToKey(entry fs.DirEntry) string {
// To start with we just use the Remote to recover the object
// To make more efficient we would serialize the object here
remote := entry.Remote()
remote = strings.TrimRight(remote, "/")
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
key := ls.keyFn(entry) + "\x00" + remote
return key
}
// Turn an exsort key back into a directory entry
func (ls *Sorter) keyToEntry(ctx context.Context, key string) (entry fs.DirEntry, err error) {
null := strings.IndexRune(key, '\x00')
if null < 0 {
return nil, errors.New("sorter: failed to deserialize: missing null")
}
remote := key[null+1:]
if remote, isDir := strings.CutSuffix(remote, "/"); isDir {
// Is a directory
//
// Note this creates a very minimal directory entry which should be fine for the
// bucket based remotes this code will be run on.
entry = fs.NewDir(remote, time.Time{})
} else {
obj, err := ls.f.NewObject(ctx, remote)
if err != nil {
fs.Errorf(ls.f, "sorter: failed to re-create object %q: %v", remote, err)
return nil, fmt.Errorf("sorter: failed to re-create object: %w", err)
}
entry = obj
}
return entry, nil
}
func (ls *Sorter) sendEntriesToExtSort(entries fs.DirEntries) (err error) {
for _, entry := range entries {
select {
case ls.inputChan <- ls.entryToKey(entry):
case err = <-ls.errChan:
if err != nil {
return err
}
}
}
select {
case err = <-ls.errChan:
default:
}
return err
}
func (ls *Sorter) startExtSort() (err error) {
fs.Logf(ls.f, "Switching to on disk sorting as more than %d entries in one directory detected", ls.cutoff)
ls.inputChan = make(chan string, 100)
// Options to control the extsort
opt := extsort.Config{
NumWorkers: 8, // small effect
ChanBuffSize: 1024, // small effect
SortedChanBuffSize: 1024, // makes a lot of difference
ChunkSize: 32 * 1024, // tuned for 50 char records (UUID sized)
// Defaults
// ChunkSize: int(1e6), // amount of records to store in each chunk which will be written to disk
// NumWorkers: 2, // maximum number of workers to use for parallel sorting
// ChanBuffSize: 1, // buffer size for merging chunks
// SortedChanBuffSize: 10, // buffer size for passing records to output
// TempFilesDir: "", // empty for use OS default ex: /tmp
}
ls.sorter, ls.outputChan, ls.errChan = extsort.Strings(ls.inputChan, &opt)
go ls.sorter.Sort(ls.ctx)
// Show we are extsorting now
ls.extSort = true
// Send the accumulated entries to the sorter
fs.Debugf(ls.f, "Sending accumulated directory entries to disk")
err = ls.sendEntriesToExtSort(ls.entries)
fs.Debugf(ls.f, "Done sending accumulated directory entries to disk")
clear(ls.entries)
ls.entries = nil
return err
}
// Add entries to the list sorter.
//
// Does not call the callback.
//
// Safe to call from concurrent go routines
func (ls *Sorter) Add(entries fs.DirEntries) error {
ls.mu.Lock()
defer ls.mu.Unlock()
if ls.extSort {
err := ls.sendEntriesToExtSort(entries)
if err != nil {
return err
}
} else {
ls.entries = append(ls.entries, entries...)
if len(ls.entries) >= ls.cutoff {
err := ls.startExtSort()
if err != nil {
return err
}
}
}
return nil
}
// Number of entries to batch in list helper
const listHelperBatchSize = 100
// listHelper is used to turn keys into entries concurrently
type listHelper struct {
ls *Sorter // parent
keys []string // keys being built up
entries fs.DirEntries // entries processed concurrently as a batch
errs []error // errors processed concurrently
}
// NewlistHelper should be with the callback passed in
func (ls *Sorter) newListHelper() *listHelper {
return &listHelper{
ls: ls,
entries: make(fs.DirEntries, listHelperBatchSize),
errs: make([]error, listHelperBatchSize),
}
}
// send sends the stored entries to the callback if there are >= max
// entries.
func (lh *listHelper) send(max int) (err error) {
if len(lh.keys) < max {
return nil
}
// Turn this batch into objects in parallel
g, gCtx := errgroup.WithContext(lh.ls.ctx)
g.SetLimit(lh.ls.ci.Checkers)
for i, key := range lh.keys {
i, key := i, key // can remove when go1.22 is minimum version
g.Go(func() error {
lh.entries[i], lh.errs[i] = lh.ls.keyToEntry(gCtx, key)
return nil
})
}
err = g.Wait()
if err != nil {
return err
}
// Account errors and collect OK entries
toSend := lh.entries[:0]
for i := range lh.keys {
entry, err := lh.entries[i], lh.errs[i]
if err != nil {
lh.ls.errs.Add(err)
} else if entry != nil {
toSend = append(toSend, entry)
}
}
// fmt.Println(lh.keys)
// fmt.Println(toSend)
err = lh.ls.callback(toSend)
clear(lh.entries)
clear(lh.errs)
lh.keys = lh.keys[:0]
return err
}
// Add an entry to the stored entries and send them if there are more
// than a certain amount
func (lh *listHelper) Add(key string) error {
lh.keys = append(lh.keys, key)
return lh.send(100)
}
// Flush the stored entries (if any) sending them to the callback
func (lh *listHelper) Flush() error {
return lh.send(1)
}
// Send the sorted entries to the callback.
func (ls *Sorter) Send() (err error) {
ls.mu.Lock()
defer ls.mu.Unlock()
if ls.extSort {
close(ls.inputChan)
list := ls.newListHelper()
outer:
for {
select {
case key, ok := <-ls.outputChan:
if !ok {
break outer
}
err := list.Add(key)
if err != nil {
return err
}
case err := <-ls.errChan:
if err != nil {
return err
}
}
}
err = list.Flush()
if err != nil {
return err
}
return ls.errs.Err("sorter")
}
// Sort the directory entries by Remote
//
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
slices.SortStableFunc(ls.entries, func(a, b fs.DirEntry) int {
return cmp.Compare(ls.keyFn(a), ls.keyFn(b))
})
return ls.callback(ls.entries)
}
// CleanUp the Sorter, cleaning up any memory / files.
//
// It is safe and encouraged to call this regardless of whether you
// called Send or not.
//
// This does not call the callback
func (ls *Sorter) CleanUp() {
ls.mu.Lock()
defer ls.mu.Unlock()
ls.cancel()
clear(ls.entries)
ls.entries = nil
ls.extSort = false
}
// SortToChan makes a callback for the Sorter which sends the output
// to the channel provided.
func SortToChan(out chan<- fs.DirEntry) fs.ListRCallback {
return func(entries fs.DirEntries) error {
for _, entry := range entries {
out <- entry
}
return nil
}
}

314
fs/list/sorter_test.go Normal file
View File

@ -0,0 +1,314 @@
package list
import (
"cmp"
"context"
"fmt"
"slices"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSorter(t *testing.T) {
ctx := context.Background()
da := mockdir.New("a")
oA := mockobject.Object("A")
callback := func(entries fs.DirEntries) error {
require.Equal(t, fs.DirEntries{oA, da}, entries)
return nil
}
ls, err := NewSorter(ctx, nil, callback, nil)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%p", callback), fmt.Sprintf("%p", ls.callback))
assert.Equal(t, fmt.Sprintf("%p", identityKeyFn), fmt.Sprintf("%p", ls.keyFn))
assert.Equal(t, fs.DirEntries(nil), ls.entries)
// Test Add
err = ls.Add(fs.DirEntries{da})
require.NoError(t, err)
assert.Equal(t, fs.DirEntries{da}, ls.entries)
err = ls.Add(fs.DirEntries{oA})
require.NoError(t, err)
assert.Equal(t, fs.DirEntries{da, oA}, ls.entries)
// Test Send
err = ls.Send()
require.NoError(t, err)
// Test Cleanup
ls.CleanUp()
assert.Equal(t, fs.DirEntries(nil), ls.entries)
}
func TestSorterIdentity(t *testing.T) {
ctx := context.Background()
cmpFn := func(a, b fs.DirEntry) int {
return cmp.Compare(a.Remote(), b.Remote())
}
callback := func(entries fs.DirEntries) error {
assert.True(t, slices.IsSortedFunc(entries, cmpFn))
assert.Equal(t, "a", entries[0].Remote())
return nil
}
ls, err := NewSorter(ctx, nil, callback, nil)
require.NoError(t, err)
defer ls.CleanUp()
// Add things in reverse alphabetical order
for i := 'z'; i >= 'a'; i-- {
err = ls.Add(fs.DirEntries{mockobject.Object(string(i))})
require.NoError(t, err)
}
assert.Equal(t, "z", ls.entries[0].Remote())
assert.False(t, slices.IsSortedFunc(ls.entries, cmpFn))
// Check they get sorted
err = ls.Send()
require.NoError(t, err)
}
func TestSorterKeyFn(t *testing.T) {
ctx := context.Background()
keyFn := func(entry fs.DirEntry) string {
s := entry.Remote()
return string('z' - s[0])
}
cmpFn := func(a, b fs.DirEntry) int {
return cmp.Compare(keyFn(a), keyFn(b))
}
callback := func(entries fs.DirEntries) error {
assert.True(t, slices.IsSortedFunc(entries, cmpFn))
assert.Equal(t, "z", entries[0].Remote())
return nil
}
ls, err := NewSorter(ctx, nil, callback, keyFn)
require.NoError(t, err)
defer ls.CleanUp()
// Add things in reverse sorted order
for i := 'a'; i <= 'z'; i++ {
err = ls.Add(fs.DirEntries{mockobject.Object(string(i))})
require.NoError(t, err)
}
assert.Equal(t, "a", ls.entries[0].Remote())
assert.False(t, slices.IsSortedFunc(ls.entries, cmpFn))
// Check they get sorted
err = ls.Send()
require.NoError(t, err)
}
// testFs implements enough of the fs.Fs interface for Sorter
type testFs struct {
t *testing.T
entriesMap map[string]fs.DirEntry
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *testFs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entry, ok := f.entriesMap[remote]
assert.True(f.t, ok, "entry not found")
if !ok {
return nil, fs.ErrorObjectNotFound
}
obj, ok := entry.(fs.Object)
assert.True(f.t, ok, "expected entry to be object: %#v", entry)
if !ok {
return nil, fs.ErrorObjectNotFound
}
return obj, nil
}
// String outputs info about the Fs
func (f *testFs) String() string {
return "testFs"
}
// used to sort the entries case insensitively
func keyCaseInsensitive(entry fs.DirEntry) string {
return strings.ToLower(entry.Remote())
}
// Test the external sorting
func testSorterExt(t *testing.T, cutoff, N int, wantExtSort bool, keyFn KeyFn) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
ci.ListCutoff = cutoff
// Make the directory entries
entriesMap := make(map[string]fs.DirEntry, N)
for i := 0; i < N; i++ {
remote := fmt.Sprintf("%010d", i)
prefix := "a"
if i%3 == 0 {
prefix = "A"
}
remote = prefix + remote
if i%2 == 0 {
entriesMap[remote] = mockobject.New(remote)
} else {
entriesMap[remote] = mockdir.New(remote)
}
}
assert.Equal(t, N, len(entriesMap))
f := &testFs{t: t, entriesMap: entriesMap}
// In the callback delete entries from the map when they are
// found
prevKey := ""
callback := func(entries fs.DirEntries) error {
for _, gotEntry := range entries {
remote := gotEntry.Remote()
key := remote
if keyFn != nil {
key = keyFn(gotEntry)
}
require.Less(t, prevKey, key, "Not sorted")
prevKey = key
wantEntry, ok := entriesMap[remote]
assert.True(t, ok, "Entry not found %q", remote)
_, wantDir := wantEntry.(fs.Directory)
_, gotDir := wantEntry.(fs.Directory)
_, wantObj := wantEntry.(fs.Object)
_, gotObj := wantEntry.(fs.Object)
require.True(t, (wantDir && gotDir) || (wantObj && gotObj), "Wrong types %#v, %#v", wantEntry, gotEntry)
delete(entriesMap, remote)
}
return nil
}
ls, err := NewSorter(ctx, f, callback, keyFn)
require.NoError(t, err)
// Send the entries in random (map) order
for _, entry := range entriesMap {
err = ls.Add(fs.DirEntries{entry})
require.NoError(t, err)
}
// Check we are extsorting if required
assert.Equal(t, wantExtSort, ls.extSort)
// Test Send
err = ls.Send()
require.NoError(t, err)
// All the entries should have been seen
assert.Equal(t, 0, len(entriesMap))
// Test Cleanup
ls.CleanUp()
assert.Equal(t, fs.DirEntries(nil), ls.entries)
}
// Test the external sorting
func TestSorterExt(t *testing.T) {
for _, test := range []struct {
cutoff int
N int
wantExtSort bool
keyFn KeyFn
}{
{cutoff: 1000, N: 100, wantExtSort: false},
{cutoff: 100, N: 1000, wantExtSort: true},
{cutoff: 1000, N: 100, wantExtSort: false, keyFn: keyCaseInsensitive},
{cutoff: 100, N: 1000, wantExtSort: true, keyFn: keyCaseInsensitive},
{cutoff: 100001, N: 100000, wantExtSort: false},
{cutoff: 100000, N: 100001, wantExtSort: true},
// {cutoff: 100_000, N: 1_000_000, wantExtSort: true},
// {cutoff: 100_000, N: 10_000_000, wantExtSort: true},
} {
t.Run(fmt.Sprintf("cutoff=%d,N=%d,wantExtSort=%v,keyFn=%v", test.cutoff, test.N, test.wantExtSort, test.keyFn != nil), func(t *testing.T) {
testSorterExt(t, test.cutoff, test.N, test.wantExtSort, test.keyFn)
})
}
}
// benchFs implements enough of the fs.Fs interface for Sorter
type benchFs struct{}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (benchFs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Recreate the mock objects
return mockobject.New(remote), nil
}
// String outputs info about the Fs
func (benchFs) String() string {
return "benchFs"
}
func BenchmarkSorterExt(t *testing.B) {
const cutoff = 1000
const N = 10_000_000
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
ci.ListCutoff = cutoff
keyFn := keyCaseInsensitive
// In the callback check entries are in order
prevKey := ""
entriesReceived := 0
callback := func(entries fs.DirEntries) error {
for _, gotEntry := range entries {
remote := gotEntry.Remote()
key := remote
if keyFn != nil {
key = keyFn(gotEntry)
}
require.Less(t, prevKey, key, "Not sorted")
prevKey = key
entriesReceived++
}
return nil
}
f := benchFs{}
ls, err := NewSorter(ctx, f, callback, keyFn)
require.NoError(t, err)
// Send the entries in reverse order in batches of 1000 like the backends do
var entries = make(fs.DirEntries, 0, 1000)
for i := N - 1; i >= 0; i-- {
remote := fmt.Sprintf("%050d", i) // UUID length plus a bit
prefix := "a"
if i%3 == 0 {
prefix = "A"
}
remote = prefix + remote
if i%2 == 0 {
entries = append(entries, mockobject.New(remote))
} else {
entries = append(entries, mockdir.New(remote))
}
if len(entries) > 1000 {
err = ls.Add(entries)
require.NoError(t, err)
entries = entries[:0]
}
}
err = ls.Add(entries)
require.NoError(t, err)
// Check we are extsorting
assert.True(t, ls.extSort)
// Test Send
err = ls.Send()
require.NoError(t, err)
// All the entries should have been seen
assert.Equal(t, N, entriesReceived)
// Cleanup
ls.CleanUp()
}

View File

@ -2,10 +2,11 @@
package march
import (
"cmp"
"context"
"fmt"
"path"
"sort"
"slices"
"strings"
"sync"
@ -14,9 +15,17 @@ import (
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
"golang.org/x/text/unicode/norm"
)
// matchTransformFn converts a name into a form which is used for
// comparison in matchListings.
type matchTransformFn func(name string) string
// list a directory into callback returning err
type listDirFn func(dir string, callback fs.ListRCallback) (err error)
// March holds the data used to traverse two Fs simultaneously,
// calling Callback for each match
type March struct {
@ -35,7 +44,6 @@ type March struct {
srcListDir listDirFn // function to call to list a directory in the src
dstListDir listDirFn // function to call to list a directory in the dst
transforms []matchTransformFn
limiter chan struct{} // make sure we don't do too many operations at once
}
// Marcher is called on each match
@ -70,12 +78,19 @@ func (m *March) init(ctx context.Context) {
if m.Fdst.Features().CaseInsensitive || ci.IgnoreCaseSync {
m.transforms = append(m.transforms, strings.ToLower)
}
// Limit parallelism for operations
m.limiter = make(chan struct{}, ci.Checkers)
}
// list a directory into entries, err
type listDirFn func(dir string) (entries fs.DirEntries, err error)
// key turns a directory entry into a sort key using the defined transforms.
func (m *March) key(entry fs.DirEntry) string {
if entry == nil {
return ""
}
name := path.Base(entry.Remote())
for _, transform := range m.transforms {
name = transform(name)
}
return name
}
// makeListDir makes constructs a listing function for the given fs
// and includeAll flags for marching through the file system.
@ -85,9 +100,9 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listD
fi := filter.GetConfig(ctx)
if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and
!(ci.NoTraverse && fi.HaveFilesFrom()) { // !(--files-from and --no-traverse)
return func(dir string) (entries fs.DirEntries, err error) {
return func(dir string, callback fs.ListRCallback) (err error) {
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
return list.DirSorted(dirCtx, f, includeAll, dir)
return list.DirSortedFn(dirCtx, f, includeAll, dir, callback, m.key)
}
}
@ -99,7 +114,7 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listD
dirs dirtree.DirTree
dirsErr error
)
return func(dir string) (entries fs.DirEntries, err error) {
return func(dir string, callback fs.ListRCallback) (err error) {
mu.Lock()
defer mu.Unlock()
if !started {
@ -108,15 +123,23 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listD
started = true
}
if dirsErr != nil {
return nil, dirsErr
return dirsErr
}
entries, ok := dirs[dir]
if !ok {
err = fs.ErrorDirNotFound
} else {
delete(dirs, dir)
return fs.ErrorDirNotFound
}
return entries, err
delete(dirs, dir)
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
return cmp.Compare(m.key(a), m.key(b))
})
return callback(entries)
}
}
@ -233,148 +256,95 @@ func (m *March) aborting() bool {
return false
}
// matchEntry is an entry plus transformed name
type matchEntry struct {
entry fs.DirEntry
leaf string
name string
}
// matchEntries contains many matchEntry~s
type matchEntries []matchEntry
// Len is part of sort.Interface.
func (es matchEntries) Len() int { return len(es) }
// Swap is part of sort.Interface.
func (es matchEntries) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
// Less is part of sort.Interface.
//
// Compare in order (name, leaf, remote)
func (es matchEntries) Less(i, j int) bool {
ei, ej := &es[i], &es[j]
if ei.name == ej.name {
if ei.leaf == ej.leaf {
return fs.CompareDirEntries(ei.entry, ej.entry) < 0
}
return ei.leaf < ej.leaf
}
return ei.name < ej.name
}
// Sort the directory entries by (name, leaf, remote)
//
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
func (es matchEntries) sort() {
sort.Stable(es)
}
// make a matchEntries from a newMatch entries
func newMatchEntries(entries fs.DirEntries, transforms []matchTransformFn) matchEntries {
es := make(matchEntries, len(entries))
for i := range es {
es[i].entry = entries[i]
name := path.Base(entries[i].Remote())
es[i].leaf = name
for _, transform := range transforms {
name = transform(name)
}
es[i].name = name
}
es.sort()
return es
}
// matchPair is a matched pair of direntries returned by matchListings
type matchPair struct {
src, dst fs.DirEntry
}
// matchTransformFn converts a name into a form which is used for
// comparison in matchListings.
type matchTransformFn func(name string) string
// Process the two listings, matching up the items in the two slices
// using the transform function on each name first.
//
// Into srcOnly go Entries which only exist in the srcList
// Into dstOnly go Entries which only exist in the dstList
// Into matches go matchPair's of src and dst which have the same name
// Into match go matchPair's of src and dst which have the same name
//
// This checks for duplicates and checks the list is sorted.
func matchListings(srcListEntries, dstListEntries fs.DirEntries, transforms []matchTransformFn) (srcOnly fs.DirEntries, dstOnly fs.DirEntries, matches []matchPair) {
srcList := newMatchEntries(srcListEntries, transforms)
dstList := newMatchEntries(dstListEntries, transforms)
for iSrc, iDst := 0, 0; ; iSrc, iDst = iSrc+1, iDst+1 {
var src, dst fs.DirEntry
var srcName, dstName string
if iSrc < len(srcList) {
src = srcList[iSrc].entry
srcName = srcList[iSrc].name
func (m *March) matchListings(srcChan, dstChan <-chan fs.DirEntry, srcOnly, dstOnly func(fs.DirEntry), match func(dst, src fs.DirEntry)) error {
var (
srcPrev, dstPrev fs.DirEntry
srcPrevName, dstPrevName string
src, dst fs.DirEntry
srcName, dstName string
)
srcDone := func() {
srcPrevName = srcName
srcPrev = src
src = nil
srcName = ""
}
dstDone := func() {
dstPrevName = dstName
dstPrev = dst
dst = nil
dstName = ""
}
for {
if m.aborting() {
return m.Ctx.Err()
}
if iDst < len(dstList) {
dst = dstList[iDst].entry
dstName = dstList[iDst].name
// Reload src and dst if needed - we set them to nil if used
if src == nil {
src = <-srcChan
srcName = m.key(src)
}
if dst == nil {
dst = <-dstChan
dstName = m.key(dst)
}
if src == nil && dst == nil {
break
}
if src != nil && iSrc > 0 {
prev := srcList[iSrc-1].entry
prevName := srcList[iSrc-1].name
if srcName == prevName && fs.DirEntryType(prev) == fs.DirEntryType(src) {
if src != nil && srcPrev != nil {
if srcName == srcPrevName && fs.DirEntryType(srcPrev) == fs.DirEntryType(src) {
fs.Logf(src, "Duplicate %s found in source - ignoring", fs.DirEntryType(src))
iDst-- // ignore the src and retry the dst
srcDone() // skip the src and retry the dst
continue
} else if srcName < prevName {
} else if srcName < srcPrevName {
// this should never happen since we sort the listings
panic("Out of order listing in source")
}
}
if dst != nil && iDst > 0 {
prev := dstList[iDst-1].entry
prevName := dstList[iDst-1].name
if dstName == prevName && fs.DirEntryType(dst) == fs.DirEntryType(prev) {
if dst != nil && dstPrev != nil {
if dstName == dstPrevName && fs.DirEntryType(dst) == fs.DirEntryType(dstPrev) {
fs.Logf(dst, "Duplicate %s found in destination - ignoring", fs.DirEntryType(dst))
iSrc-- // ignore the dst and retry the src
dstDone() // skip the dst and retry the src
continue
} else if dstName < prevName {
} else if dstName < dstPrevName {
// this should never happen since we sort the listings
panic("Out of order listing in destination")
}
}
if src != nil && dst != nil {
switch {
case src != nil && dst != nil:
// we can't use CompareDirEntries because srcName, dstName could
// be different then src.Remote() or dst.Remote()
// be different from src.Remote() or dst.Remote()
srcType := fs.DirEntryType(src)
dstType := fs.DirEntryType(dst)
if srcName > dstName || (srcName == dstName && srcType > dstType) {
src = nil
iSrc--
dstOnly(dst)
dstDone()
} else if srcName < dstName || (srcName == dstName && srcType < dstType) {
dst = nil
iDst--
srcOnly(src)
srcDone()
} else {
match(dst, src)
dstDone()
srcDone()
}
}
// Debugf(nil, "src = %v, dst = %v", src, dst)
switch {
case src == nil && dst == nil:
// do nothing
case src == nil:
dstOnly = append(dstOnly, dst)
dstOnly(dst)
dstDone()
case dst == nil:
srcOnly = append(srcOnly, src)
default:
matches = append(matches, matchPair{src: src, dst: dst})
srcOnly(src)
srcDone()
}
}
return
return nil
}
// processJob processes a listDirJob listing the source and
@ -385,27 +355,125 @@ func matchListings(srcListEntries, dstListEntries fs.DirEntries, transforms []ma
func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
var (
jobs []listDirJob
srcList, dstList fs.DirEntries
srcChan = make(chan fs.DirEntry, 100)
dstChan = make(chan fs.DirEntry, 100)
srcListErr, dstListErr error
wg sync.WaitGroup
mu sync.Mutex
ci = fs.GetConfig(m.Ctx)
)
// List the src and dst directories
if !job.noSrc {
srcChan := srcChan // duplicate this as we may override it later
wg.Add(1)
go func() {
defer wg.Done()
srcList, srcListErr = m.srcListDir(job.srcRemote)
srcListErr = m.srcListDir(job.srcRemote, func(entries fs.DirEntries) error {
for _, entry := range entries {
srcChan <- entry
}
return nil
})
close(srcChan)
}()
} else {
close(srcChan)
}
startedDst := false
if !m.NoTraverse && !job.noDst {
startedDst = true
wg.Add(1)
go func() {
defer wg.Done()
dstListErr = m.dstListDir(job.dstRemote, func(entries fs.DirEntries) error {
for _, entry := range entries {
dstChan <- entry
}
return nil
})
close(dstChan)
}()
}
if !m.NoTraverse && !job.noDst {
// If NoTraverse is set, then try to find a matching object
// for each item in the srcList to head dst object
if m.NoTraverse && !m.NoCheckDest {
originalSrcChan := srcChan
srcChan = make(chan fs.DirEntry, 100)
ls, err := list.NewSorter(m.Ctx, m.Fdst, list.SortToChan(dstChan), m.key)
if err != nil {
return nil, err
}
startedDst = true
wg.Add(1)
go func() {
defer wg.Done()
dstList, dstListErr = m.dstListDir(job.dstRemote)
defer ls.CleanUp()
g, gCtx := errgroup.WithContext(m.Ctx)
g.SetLimit(ci.Checkers)
for src := range originalSrcChan {
srcChan <- src
if srcObj, ok := src.(fs.Object); ok {
g.Go(func() error {
leaf := path.Base(srcObj.Remote())
dstObj, err := m.Fdst.NewObject(gCtx, path.Join(job.dstRemote, leaf))
if err == nil {
_ = ls.Add(fs.DirEntries{dstObj}) // ignore errors
}
return nil // ignore errors
})
}
}
dstListErr = g.Wait()
sendErr := ls.Send()
if dstListErr == nil {
dstListErr = sendErr
}
close(srcChan)
close(dstChan)
}()
}
if !startedDst {
close(dstChan)
}
// Work out what to do and do it
err := m.matchListings(srcChan, dstChan, func(src fs.DirEntry) {
recurse := m.Callback.SrcOnly(src)
if recurse && job.srcDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: src.Remote(),
dstRemote: src.Remote(),
srcDepth: job.srcDepth - 1,
noDst: true,
})
}
}, func(dst fs.DirEntry) {
recurse := m.Callback.DstOnly(dst)
if recurse && job.dstDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: dst.Remote(),
dstRemote: dst.Remote(),
dstDepth: job.dstDepth - 1,
noSrc: true,
})
}
}, func(dst, src fs.DirEntry) {
recurse := m.Callback.Match(m.Ctx, dst, src)
if recurse && job.srcDepth > 0 && job.dstDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: src.Remote(),
dstRemote: dst.Remote(),
srcDepth: job.srcDepth - 1,
dstDepth: job.dstDepth - 1,
})
}
})
if err != nil {
return nil, err
}
// Wait for listings to complete and report errors
wg.Wait()
@ -430,73 +498,5 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
return nil, dstListErr
}
// If NoTraverse is set, then try to find a matching object
// for each item in the srcList to head dst object
if m.NoTraverse && !m.NoCheckDest {
for _, src := range srcList {
wg.Add(1)
m.limiter <- struct{}{}
go func(src fs.DirEntry) {
defer wg.Done()
if srcObj, ok := src.(fs.Object); ok {
leaf := path.Base(srcObj.Remote())
dstObj, err := m.Fdst.NewObject(m.Ctx, path.Join(job.dstRemote, leaf))
if err == nil {
mu.Lock()
dstList = append(dstList, dstObj)
mu.Unlock()
}
}
<-m.limiter
}(src)
}
wg.Wait()
}
// Work out what to do and do it
srcOnly, dstOnly, matches := matchListings(srcList, dstList, m.transforms)
for _, src := range srcOnly {
if m.aborting() {
return nil, m.Ctx.Err()
}
recurse := m.Callback.SrcOnly(src)
if recurse && job.srcDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: src.Remote(),
dstRemote: src.Remote(),
srcDepth: job.srcDepth - 1,
noDst: true,
})
}
}
for _, dst := range dstOnly {
if m.aborting() {
return nil, m.Ctx.Err()
}
recurse := m.Callback.DstOnly(dst)
if recurse && job.dstDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: dst.Remote(),
dstRemote: dst.Remote(),
dstDepth: job.dstDepth - 1,
noSrc: true,
})
}
}
for _, match := range matches {
if m.aborting() {
return nil, m.Ctx.Err()
}
recurse := m.Callback.Match(m.Ctx, match.dst, match.src)
if recurse && job.srcDepth > 0 && job.dstDepth > 0 {
jobs = append(jobs, listDirJob{
srcRemote: match.src.Remote(),
dstRemote: match.dst.Remote(),
srcDepth: job.srcDepth - 1,
dstDepth: job.dstDepth - 1,
})
}
}
return jobs, nil
}

View File

@ -14,6 +14,8 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockobject"
@ -147,6 +149,8 @@ func TestMarch(t *testing.T) {
dirDstOnly []string
fileMatch []string
dirMatch []string
noTraverse bool
fastList bool
}{
{
what: "source only",
@ -167,6 +171,45 @@ func TestMarch(t *testing.T) {
fileDstOnly: []string{"dstOnly", "dstOnlyDir/sub"},
dirDstOnly: []string{"dstOnlyDir"},
},
{
what: "no traverse source only",
fileSrcOnly: []string{"test", "test2", "test3", "sub dir/test4"},
dirSrcOnly: []string{"sub dir"},
noTraverse: true,
},
{
what: "no traverse identical",
fileMatch: []string{"test", "test2", "sub dir/test3", "sub dir/sub sub dir/test4"},
noTraverse: true,
},
{
what: "no traverse typical sync",
fileSrcOnly: []string{"srcOnly", "srcOnlyDir/sub"},
fileMatch: []string{"match", "matchDir/match file"},
noTraverse: true,
},
{
what: "fast list source only",
fileSrcOnly: []string{"test", "test2", "test3", "sub dir/test4"},
dirSrcOnly: []string{"sub dir"},
fastList: true,
},
{
what: "fast list identical",
fileMatch: []string{"test", "test2", "sub dir/test3", "sub dir/sub sub dir/test4"},
dirMatch: []string{"sub dir", "sub dir/sub sub dir"},
fastList: true,
},
{
what: "fast list typical sync",
fileSrcOnly: []string{"srcOnly", "srcOnlyDir/sub"},
dirSrcOnly: []string{"srcOnlyDir"},
fileMatch: []string{"match", "matchDir/match file"},
dirMatch: []string{"matchDir"},
fileDstOnly: []string{"dstOnly", "dstOnlyDir/sub"},
dirDstOnly: []string{"dstOnlyDir"},
fastList: true,
},
} {
t.Run(fmt.Sprintf("TestMarch-%s", test.what), func(t *testing.T) {
r := fstest.NewRun(t)
@ -187,18 +230,33 @@ func TestMarch(t *testing.T) {
match = append(match, r.WriteBoth(ctx, f, "hello world", t1))
}
ctx, ci := fs.AddConfig(ctx)
ci.UseListR = test.fastList
fi := filter.GetConfig(ctx)
// Local backend doesn't implement ListR, so monkey patch it for this test
if test.fastList && r.Flocal.Features().ListR == nil {
r.Flocal.Features().ListR = func(ctx context.Context, dir string, callback fs.ListRCallback) error {
r.Flocal.Features().ListR = nil // disable ListR to avoid infinite recursion
return walk.ListR(ctx, r.Flocal, dir, true, -1, walk.ListAll, callback)
}
defer func() {
r.Flocal.Features().ListR = nil
}()
}
mt := &marchTester{
ctx: ctx,
cancel: cancel,
noTraverse: false,
noTraverse: test.noTraverse,
}
fi := filter.GetConfig(ctx)
m := &March{
Ctx: ctx,
Fdst: r.Fremote,
Fsrc: r.Flocal,
Dir: "",
NoTraverse: mt.noTraverse,
NoTraverse: test.noTraverse,
Callback: mt,
DstIncludeAll: fi.Opt.DeleteExcluded,
}
@ -216,95 +274,9 @@ func TestMarch(t *testing.T) {
}
}
func TestMarchNoTraverse(t *testing.T) {
for _, test := range []struct {
what string
fileSrcOnly []string
dirSrcOnly []string
fileMatch []string
dirMatch []string
}{
{
what: "source only",
fileSrcOnly: []string{"test", "test2", "test3", "sub dir/test4"},
dirSrcOnly: []string{"sub dir"},
},
{
what: "identical",
fileMatch: []string{"test", "test2", "sub dir/test3", "sub dir/sub sub dir/test4"},
},
{
what: "typical sync",
fileSrcOnly: []string{"srcOnly", "srcOnlyDir/sub"},
fileMatch: []string{"match", "matchDir/match file"},
},
} {
t.Run(fmt.Sprintf("TestMarch-%s", test.what), func(t *testing.T) {
r := fstest.NewRun(t)
var srcOnly []fstest.Item
var match []fstest.Item
ctx, cancel := context.WithCancel(context.Background())
for _, f := range test.fileSrcOnly {
srcOnly = append(srcOnly, r.WriteFile(f, "hello world", t1))
}
for _, f := range test.fileMatch {
match = append(match, r.WriteBoth(ctx, f, "hello world", t1))
}
mt := &marchTester{
ctx: ctx,
cancel: cancel,
noTraverse: true,
}
fi := filter.GetConfig(ctx)
m := &March{
Ctx: ctx,
Fdst: r.Fremote,
Fsrc: r.Flocal,
Dir: "",
NoTraverse: mt.noTraverse,
Callback: mt,
DstIncludeAll: fi.Opt.DeleteExcluded,
}
mt.processError(m.Run(ctx))
mt.cancel()
err := mt.currentError()
require.NoError(t, err)
precision := fs.GetModifyWindow(ctx, r.Fremote, r.Flocal)
fstest.CompareItems(t, mt.srcOnly, srcOnly, test.dirSrcOnly, precision, "srcOnly")
fstest.CompareItems(t, mt.match, match, test.dirMatch, precision, "match")
})
}
}
func TestNewMatchEntries(t *testing.T) {
var (
a = mockobject.Object("path/a")
A = mockobject.Object("path/A")
B = mockobject.Object("path/B")
c = mockobject.Object("path/c")
)
es := newMatchEntries(fs.DirEntries{a, A, B, c}, nil)
assert.Equal(t, es, matchEntries{
{name: "A", leaf: "A", entry: A},
{name: "B", leaf: "B", entry: B},
{name: "a", leaf: "a", entry: a},
{name: "c", leaf: "c", entry: c},
})
es = newMatchEntries(fs.DirEntries{a, A, B, c}, []matchTransformFn{strings.ToLower})
assert.Equal(t, es, matchEntries{
{name: "a", leaf: "A", entry: A},
{name: "a", leaf: "a", entry: a},
{name: "b", leaf: "B", entry: B},
{name: "c", leaf: "c", entry: c},
})
// matchPair is a matched pair of direntries returned by matchListings
type matchPair struct {
src, dst fs.DirEntry
}
func TestMatchListings(t *testing.T) {
@ -414,11 +386,11 @@ func TestMatchListings(t *testing.T) {
{
what: "Case insensitive duplicate - transform to lower case",
input: fs.DirEntries{
a, a,
A, A,
a, A,
A, a,
},
matches: []matchPair{
{A, A},
{a, A}, // the first duplicate will be returned with a stable sort
},
transforms: []matchTransformFn{strings.ToLower},
},
@ -507,22 +479,61 @@ func TestMatchListings(t *testing.T) {
},
} {
t.Run(fmt.Sprintf("TestMatchListings-%s", test.what), func(t *testing.T) {
var srcList, dstList fs.DirEntries
for i := 0; i < len(test.input); i += 2 {
src, dst := test.input[i], test.input[i+1]
if src != nil {
srcList = append(srcList, src)
}
if dst != nil {
dstList = append(dstList, dst)
}
ctx := context.Background()
var wg sync.WaitGroup
// Skeleton March for testing
m := March{
Ctx: context.Background(),
transforms: test.transforms,
}
srcOnly, dstOnly, matches := matchListings(srcList, dstList, test.transforms)
// Make a channel to send the source (0) or dest (1) using a list.Sorter
makeChan := func(offset int) <-chan fs.DirEntry {
out := make(chan fs.DirEntry)
ls, err := list.NewSorter(ctx, nil, list.SortToChan(out), m.key)
require.NoError(t, err)
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < len(test.input); i += 2 {
entry := test.input[i+offset]
if entry != nil {
require.NoError(t, ls.Add(fs.DirEntries{entry}))
}
}
require.NoError(t, ls.Send())
ls.CleanUp()
close(out)
}()
return out
}
var srcOnly fs.DirEntries
srcOnlyFn := func(entry fs.DirEntry) {
srcOnly = append(srcOnly, entry)
}
var dstOnly fs.DirEntries
dstOnlyFn := func(entry fs.DirEntry) {
dstOnly = append(dstOnly, entry)
}
var matches []matchPair
matchFn := func(dst, src fs.DirEntry) {
matches = append(matches, matchPair{dst: dst, src: src})
}
err := m.matchListings(makeChan(0), makeChan(1), srcOnlyFn, dstOnlyFn, matchFn)
require.NoError(t, err)
wg.Wait()
assert.Equal(t, test.srcOnly, srcOnly, test.what, "srcOnly differ")
assert.Equal(t, test.dstOnly, dstOnly, test.what, "dstOnly differ")
assert.Equal(t, test.matches, matches, test.what, "matches differ")
// now swap src and dst
dstOnly, srcOnly, matches = matchListings(dstList, srcList, test.transforms)
srcOnly, dstOnly, matches = nil, nil, nil
err = m.matchListings(makeChan(0), makeChan(1), srcOnlyFn, dstOnlyFn, matchFn)
require.NoError(t, err)
wg.Wait()
assert.Equal(t, test.srcOnly, srcOnly, test.what, "srcOnly differ")
assert.Equal(t, test.dstOnly, dstOnly, test.what, "dstOnly differ")
assert.Equal(t, test.matches, matches, test.what, "matches differ")

View File

@ -12,9 +12,9 @@ import (
"github.com/stretchr/testify/require"
)
// TestListDirSorted is integration testing code in fs/list/list.go
// testListDirSorted is integration testing code in fs/list/list.go
// which can't be tested there due to import loops.
func TestListDirSorted(t *testing.T) {
func testListDirSorted(t *testing.T, listFn func(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)) {
r := fstest.NewRun(t)
ctx := context.Background()
@ -52,20 +52,20 @@ func TestListDirSorted(t *testing.T) {
return name
}
items, err = list.DirSorted(context.Background(), r.Fremote, true, "")
items, err = listFn(context.Background(), r.Fremote, true, "")
require.NoError(t, err)
require.Len(t, items, 3)
assert.Equal(t, "a.txt", str(0))
assert.Equal(t, "sub dir/", str(1))
assert.Equal(t, "zend.txt", str(2))
items, err = list.DirSorted(context.Background(), r.Fremote, false, "")
items, err = listFn(context.Background(), r.Fremote, false, "")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/", str(0))
assert.Equal(t, "zend.txt", str(1))
items, err = list.DirSorted(context.Background(), r.Fremote, true, "sub dir")
items, err = listFn(context.Background(), r.Fremote, true, "sub dir")
require.NoError(t, err)
require.Len(t, items, 4)
assert.Equal(t, "sub dir/hello world", str(0))
@ -73,7 +73,7 @@ func TestListDirSorted(t *testing.T) {
assert.Equal(t, "sub dir/ignore dir/", str(2))
assert.Equal(t, "sub dir/sub sub dir/", str(3))
items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir")
items, err = listFn(context.Background(), r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/", str(0))
@ -82,25 +82,45 @@ func TestListDirSorted(t *testing.T) {
// testing ignore file
fi.Opt.ExcludeFile = []string{".ignore"}
items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir")
items, err = listFn(context.Background(), r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 1)
assert.Equal(t, "sub dir/sub sub dir/", str(0))
items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir/ignore dir")
items, err = listFn(context.Background(), r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 0)
items, err = list.DirSorted(context.Background(), r.Fremote, true, "sub dir/ignore dir")
items, err = listFn(context.Background(), r.Fremote, true, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
fi.Opt.ExcludeFile = nil
items, err = list.DirSorted(context.Background(), r.Fremote, false, "sub dir/ignore dir")
items, err = listFn(context.Background(), r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
}
// TestListDirSorted is integration testing code in fs/list/list.go
// which can't be tested there due to import loops.
func TestListDirSorted(t *testing.T) {
testListDirSorted(t, list.DirSorted)
}
// TestListDirSortedFn is integration testing code in fs/list/list.go
// which can't be tested there due to import loops.
func TestListDirSortedFn(t *testing.T) {
listFn := func(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
callback := func(newEntries fs.DirEntries) error {
entries = append(entries, newEntries...)
return nil
}
err = list.DirSortedFn(ctx, f, includeAll, dir, callback, nil)
return entries, err
}
testListDirSorted(t, listFn)
}

View File

@ -10,6 +10,7 @@ import (
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
)
@ -283,7 +284,8 @@ func StatJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt)
return nil, nil
}
// Check the root directory exists
_, err := fsrc.List(ctx, "")
entries, err := fsrc.List(ctx, "")
accounting.Stats(ctx).Listed(int64(len(entries)))
if err != nil {
return nil, err
}
@ -322,6 +324,7 @@ func StatJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt)
parent = ""
}
entries, err := fsrc.List(ctx, parent)
accounting.Stats(ctx).Listed(int64(len(entries)))
if err == fs.ErrorDirNotFound {
return nil, nil
} else if err != nil {

View File

@ -311,6 +311,7 @@ func DirectoryOptionalInterfaces(d Directory) (supported, unsupported []string)
type ListRCallback func(entries DirEntries) error
// ListRFn is defines the call used to recursively list a directory
// with ListR or page through a directory with ListP
type ListRFn func(ctx context.Context, dir string, callback ListRCallback) error
// Flagger describes the interface rclone config types flags must satisfy

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/list"
@ -273,7 +274,7 @@ func (dm *dirMap) sendEntries(fn fs.ListRCallback) (err error) {
sort.Strings(dirs)
// Now convert to bulkier Dir in batches and send
now := time.Now()
list := NewListRHelper(fn)
list := list.NewHelper(fn)
for _, dir := range dirs {
err = list.Add(fs.NewDir(dir, now))
if err != nil {
@ -296,6 +297,7 @@ func listR(ctx context.Context, f fs.Fs, path string, includeAll bool, listType
}
var mu sync.Mutex
err := doListR(ctx, path, func(entries fs.DirEntries) (err error) {
accounting.Stats(ctx).Listed(int64(len(entries)))
if synthesizeDirs {
err = dm.addEntries(entries)
if err != nil {
@ -465,6 +467,7 @@ func walkRDirTree(ctx context.Context, f fs.Fs, startPath string, includeAll boo
includeDirectory := fi.IncludeDirectory(ctx, f)
var mu sync.Mutex
err := listR(ctx, startPath, func(entries fs.DirEntries) error {
accounting.Stats(ctx).Listed(int64(len(entries)))
mu.Lock()
defer mu.Unlock()
for _, entry := range entries {
@ -641,41 +644,3 @@ func GetAll(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel
})
return
}
// ListRHelper is used in the implementation of ListR to accumulate DirEntries
type ListRHelper struct {
callback fs.ListRCallback
entries fs.DirEntries
}
// NewListRHelper should be called from ListR with the callback passed in
func NewListRHelper(callback fs.ListRCallback) *ListRHelper {
return &ListRHelper{
callback: callback,
}
}
// send sends the stored entries to the callback if there are >= max
// entries.
func (lh *ListRHelper) send(max int) (err error) {
if len(lh.entries) >= max {
err = lh.callback(lh.entries)
lh.entries = lh.entries[:0]
}
return err
}
// Add an entry to the stored entries and send them if there are more
// than a certain amount
func (lh *ListRHelper) Add(entry fs.DirEntry) error {
if entry == nil {
return nil
}
lh.entries = append(lh.entries, entry)
return lh.send(100)
}
// Flush the stored entries (if any) sending them to the callback
func (lh *ListRHelper) Flush() error {
return lh.send(1)
}

1
go.mod
View File

@ -47,6 +47,7 @@ require (
github.com/klauspost/compress v1.18.0
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6
github.com/lanrat/extsort v1.0.2
github.com/mattn/go-colorable v0.1.14
github.com/mattn/go-runewidth v0.0.16
github.com/minio/minio-go/v7 v7.0.87

Some files were not shown because too many files have changed in this diff Show More