mirror of
https://github.com/rclone/rclone.git
synced 2025-04-19 01:59:00 +08:00
add helper and fix types
This commit is contained in:
parent
e8fe6ba8f1
commit
99f683b7ec
@ -16,7 +16,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@ -28,11 +27,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
defaultChunkSize = int64(524288)
|
||||
maxPartNum = 10000
|
||||
|
||||
rootURL = "https://openapi.alipan.com"
|
||||
authURL = "https://openapi.alipan.com/oauth/authorize"
|
||||
tokenURL = "https://openapi.alipan.com/oauth/access_token"
|
||||
@ -108,12 +109,13 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
client *Client // Aliyun Drive client
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
m configmap.Mapper
|
||||
driveID string
|
||||
rootID string // the id of the root folder
|
||||
}
|
||||
|
||||
// Object describes a adrive object
|
||||
@ -126,8 +128,8 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
mimeType string // The object MIME type
|
||||
parent string // ID of the parent directory
|
||||
parentID string // ID of the parent directory
|
||||
sha1 string // SHA-1 of the object content
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@ -143,112 +145,41 @@ type Options struct {
|
||||
ExpiresAt string `config:"expires_at"`
|
||||
}
|
||||
|
||||
// parsePath parses a box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
403,
|
||||
404,
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
if errResponse.Message == "" {
|
||||
errResponse.Message = resp.Status
|
||||
}
|
||||
if errResponse.Code == "" {
|
||||
errResponse.Code = strconv.Itoa(resp.StatusCode)
|
||||
}
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
// User function to process a File item from listAll
|
||||
//
|
||||
// Should return true to finish processing
|
||||
type listAllFn func(*api.Item) bool
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, directoryID string, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, directoryID string) (items []api.Item, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/adrive/v1.0/openFile/list",
|
||||
}
|
||||
|
||||
request := api.ListRequest{
|
||||
request := api.FileListReq{
|
||||
DriveID: f.driveID,
|
||||
ParentFileID: directoryID,
|
||||
}
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
var result api.List
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
if len(result.Items) == 0 {
|
||||
break
|
||||
}
|
||||
for i := range result.Items {
|
||||
item := &result.Items[i]
|
||||
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
var result api.FileListResp
|
||||
_, err = f.client.CallJSON(ctx, &opts, &request, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
|
||||
return result.Items, nil
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
path := " /adrive/v1.0/openFile"
|
||||
path := "/adrive/v1.0/openFile"
|
||||
if f.opt.RemoveWay == RemoveWayTrash {
|
||||
path += "/recyclebin/trash"
|
||||
}
|
||||
|
||||
if f.opt.RemoveWay == RemoveWayDelete {
|
||||
} else if f.opt.RemoveWay == RemoveWayDelete {
|
||||
path += "/delete"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: path,
|
||||
RootURL: rootURL,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
@ -257,10 +188,8 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
FileID: id,
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, req, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
_, err := f.client.CallJSON(ctx, &opts, &req, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@ -277,11 +206,11 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
parent: dirID,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
parentID: dirID,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
}
|
||||
return o, leaf, dirID, nil
|
||||
}
|
||||
@ -327,32 +256,6 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
leaf, dirID, err := f.dirCache.FindPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, dirID, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getUserInfo gets UserInfo from API
|
||||
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
|
||||
opts := rest.Opts{
|
||||
@ -361,7 +264,7 @@ func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.client.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -380,14 +283,14 @@ func (f *Fs) getDriveInfo(ctx context.Context) error {
|
||||
var info *api.DriveInfo
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.client.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get driveinfo: %w", err)
|
||||
}
|
||||
|
||||
f.driveID = info.DefaultDriveID
|
||||
f.client.driveID = info.DefaultDriveID
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -400,7 +303,7 @@ func (f *Fs) getSpaceInfo(ctx context.Context) (info *api.SpaceInfo, err error)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.client.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -417,7 +320,7 @@ func (f *Fs) getVipInfo(ctx context.Context) (info *api.VipInfo, err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
resp, err = f.client.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -442,7 +345,7 @@ func (f *Fs) move(ctx context.Context, id, leaf, directoryID string) (info *api.
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
resp, err = f.client.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -452,16 +355,6 @@ func (f *Fs) move(ctx context.Context, id, leaf, directoryID string) (info *api.
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) createFile(ctx context.Context) error {
|
||||
// TODO
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadUrl(ctx context.Context) error {
|
||||
// TODO
|
||||
return nil
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Type == ItemTypeFolder {
|
||||
@ -474,6 +367,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
o.size = int64(info.Size)
|
||||
o.modTime = info.CreatedAt
|
||||
o.id = info.FileID
|
||||
o.parentID = info.ParentFileID
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -484,16 +378,28 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
|
||||
leaf, dirID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := o.fs.listAll(ctx, dirID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
var info api.Item
|
||||
for _, item := range list {
|
||||
if item.Type == ItemTypeFile && strings.EqualFold(item.Name, leaf) {
|
||||
info = item
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// TODO
|
||||
return nil
|
||||
return o.setMetaData(&info)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -552,7 +458,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var copyResp api.FileCopyResp
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, ©Resp)
|
||||
resp, err = f.client.CallJSON(ctx, &opts, &request, ©Resp)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -593,7 +499,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@ -619,7 +525,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID string, leaf string) (newID s
|
||||
CheckNameMode: "refuse",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &req, &info)
|
||||
resp, err = f.client.CallJSON(ctx, &opts, &req, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -631,13 +537,18 @@ func (f *Fs) CreateDir(ctx context.Context, pathID string, leaf string) (newID s
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, func(item *api.Item) bool {
|
||||
items, err := f.listAll(ctx, pathID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
for _, item := range items {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.FileID
|
||||
return true
|
||||
found = true
|
||||
break
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
return pathIDOut, found, err
|
||||
}
|
||||
|
||||
@ -649,26 +560,21 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
var iErr error
|
||||
|
||||
_, err = f.listAll(ctx, directoryID, func(info *api.Item) bool {
|
||||
list, err := f.listAll(ctx, directoryID)
|
||||
for _, info := range list {
|
||||
remote := path.Join(dir, info.Name)
|
||||
switch info.Type {
|
||||
case ItemTypeFolder:
|
||||
// cache the directory ID for later lookups
|
||||
if info.Type == ItemTypeFolder {
|
||||
f.dirCache.Put(remote, info.FileID)
|
||||
d := fs.NewDir(remote, info.UpdatedAt).SetID(info.FileID).SetParentID(dir)
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
case ItemTypeFile:
|
||||
var o fs.Object
|
||||
o, err = f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &info)
|
||||
if err == nil {
|
||||
entries = append(entries, o)
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -686,7 +592,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// Put implements fs.Fs.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// TODO
|
||||
panic("unimplemented")
|
||||
o := &Object{}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
@ -817,7 +724,10 @@ func (o *Object) Size() int64 {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", nil
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.sha1, nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
@ -831,14 +741,48 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
}
|
||||
|
||||
// Open implements fs.Object.
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// TODO
|
||||
panic("unimplemented")
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/adrive/v1.0/openFile/getDownloadUrl",
|
||||
}
|
||||
|
||||
req := api.DownloadReq{
|
||||
DriveID: o.fs.driveID,
|
||||
FileID: o.id,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var download api.DownloadResp
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.client.CallJSON(ctx, &opts, &req, &download)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.size)
|
||||
|
||||
opts = rest.Opts{
|
||||
Method: download.Method,
|
||||
RootURL: download.Url,
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.client.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update implements fs.Object.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return o.upload(ctx, in, src, options...)
|
||||
// return o.upload(ctx, in, src, options...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove implements fs.Object.
|
||||
@ -848,94 +792,102 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
|
||||
// ParentID implements fs.ParentIDer.
|
||||
func (o *Object) ParentID() string {
|
||||
return o.parent
|
||||
return o.parentID
|
||||
}
|
||||
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
|
||||
// Create HTTP client
|
||||
client := fshttp.NewClient(ctx)
|
||||
var ts *oauthutil.TokenSource
|
||||
|
||||
if opt.AccessToken == "" {
|
||||
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
return nil, fmt.Errorf("failed to configure Aliyun Drive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create filesystem
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
m: m,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(
|
||||
pacer.MinSleep(minSleep),
|
||||
pacer.MaxSleep(maxSleep),
|
||||
pacer.DecayConstant(decayConstant),
|
||||
)),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
client: NewClient(client, rootURL),
|
||||
m: m,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set up authentication
|
||||
if f.opt.AccessToken != "" {
|
||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||
f.client.c.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||
}
|
||||
|
||||
if ts != nil {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err = f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
rootID := f.opt.RootFolderID
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
// Set the root folder ID
|
||||
if f.opt.RootFolderID != "" {
|
||||
f.rootID = f.opt.RootFolderID
|
||||
} else {
|
||||
f.rootID = "root"
|
||||
}
|
||||
f.dirCache = dircache.New(root, f.rootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.dirCache = dircache.New(newRoot, f.rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err = tempF.newObjectWithInfo(ctx, remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(ctx, &tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
f.getDriveInfo(ctx)
|
||||
// Get drive info
|
||||
err = f.getDriveInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get drive info: %w", err)
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
// Check interfaces
|
||||
var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
|
@ -31,13 +31,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%s)", e.Message, e.Code)
|
||||
out := fmt.Sprintf("Error %q (%v)", e.Message, e.Code)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
@ -72,86 +72,523 @@ func (e *Token) Expiry() (t time.Time) {
|
||||
return
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Avatar string `json:"avatar"`
|
||||
Phone *string `json:"phone"`
|
||||
// UserInfo represents the user information returned by the /oauth/users/info endpoint
|
||||
type UserInfo struct {
|
||||
ID string `json:"id"` // User ID, unique identifier
|
||||
Name string `json:"name"` // Nickname, returns default nickname if not set
|
||||
Avatar string `json:"avatar"` // Avatar URL, empty if not set
|
||||
Phone string `json:"phone"` // Phone number, requires user:phone permission
|
||||
}
|
||||
|
||||
// DriveInfo represents the user and drive information returned by the /adrive/v1.0/user/getDriveInfo endpoint
|
||||
type DriveInfo struct {
|
||||
UserID string `json:"user_id"`
|
||||
Name string `json:"name"`
|
||||
Avatar string `json:"avatar"`
|
||||
DefaultDriveID string `json:"default_drive_id" default:"drive"`
|
||||
ResourceDriveID *string `json:"resource_drive_id,omitempty"`
|
||||
BackupDriveID *string `json:"backup_drive_id,omitempty"`
|
||||
FolderID *string `json:"folder_id,omitempty"`
|
||||
UserID string `json:"user_id"` // User ID, unique identifier
|
||||
Name string `json:"name"` // Nickname
|
||||
Avatar string `json:"avatar"` // Avatar URL
|
||||
DefaultDriveID string `json:"default_drive_id"` // Default drive ID
|
||||
ResourceDriveID string `json:"resource_drive_id"` // Resource library drive ID, only returned if authorized
|
||||
BackupDriveID string `json:"backup_drive_id"` // Backup drive ID, only returned if authorized
|
||||
FolderID string `json:"folder_id"` // Folder ID, only returned if authorized
|
||||
}
|
||||
|
||||
// PersonalSpaceInfo represents the user's personal space usage information
|
||||
type PersonalSpaceInfo struct {
|
||||
UsedSize int64 `json:"used_size"` // Used space in bytes
|
||||
TotalSize int64 `json:"total_size"` // Total space in bytes
|
||||
}
|
||||
|
||||
// SpaceInfo represents the response from /adrive/v1.0/user/getSpaceInfo endpoint
|
||||
type SpaceInfo struct {
|
||||
UsedSize int64 `json:"used_size"`
|
||||
TotalSize int64 `json:"total_size"`
|
||||
PersonalSpaceInfo PersonalSpaceInfo `json:"personal_space_info"` // Personal space usage information
|
||||
}
|
||||
|
||||
// VipIdentity represents the VIP status of a user
|
||||
type VipIdentity string
|
||||
|
||||
const (
|
||||
VipIdentityMember VipIdentity = "member" // Regular member
|
||||
VipIdentityVIP VipIdentity = "vip" // VIP member
|
||||
VipIdentitySVIP VipIdentity = "svip" // Super VIP member
|
||||
)
|
||||
|
||||
// VipInfo represents the response from /business/v1.0/user/getVipInfo endpoint
|
||||
type VipInfo struct {
|
||||
Identity string `json:"identity"`
|
||||
Level *string `json:"level,omitempty"`
|
||||
Expire time.Time `json:"expire"`
|
||||
ThirdPartyVip bool `json:"third_party_vip"`
|
||||
ThirdPartyVipExpire *string `json:"third_party_vip_expire,omitempty"`
|
||||
Identity VipIdentity `json:"identity"` // VIP status: member, vip, or svip
|
||||
Level string `json:"level,omitempty"` // Storage level (e.g., "20TB", "8TB")
|
||||
Expire int64 `json:"expire"` // Expiration timestamp in seconds
|
||||
ThirdPartyVip bool `json:"thirdPartyVip"` // Whether third-party VIP benefits are active
|
||||
ThirdPartyVipExpire int64 `json:"thirdPartyVipExpire"` // Third-party VIP benefits expiration timestamp
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
DriveID string `json:"drive_id"`
|
||||
FileID string `json:"file_id"`
|
||||
ParentFileID string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
FileExtension string `json:"file_extension"`
|
||||
ContentHash string `json:"content_hash"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
Thumbnail *string `json:"thumbnail,omitempty"`
|
||||
URL *string `json:"url,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
// Scope represents a single permission scope
|
||||
type Scope struct {
|
||||
Scope string `json:"scope"` // Permission scope identifier
|
||||
}
|
||||
|
||||
type List struct {
|
||||
Items []Item `json:"items"`
|
||||
NextMarker *string `json:"next_marker"`
|
||||
// UserScopes represents the response from /oauth/users/scopes endpoint
|
||||
type UserScopes struct {
|
||||
ID string `json:"id"` // User ID
|
||||
Scopes []Scope `json:"scopes"` // List of permission scopes
|
||||
}
|
||||
|
||||
type ListRequest struct {
|
||||
DriveID string `json:"drive_id"`
|
||||
ParentFileID string `json:"parent_file_id"`
|
||||
// TrialStatus represents the trial status of a VIP feature
|
||||
type TrialStatus string
|
||||
|
||||
const (
|
||||
TrialStatusNoTrial TrialStatus = "noTrial" // Trial not allowed
|
||||
TrialStatusOnTrial TrialStatus = "onTrial" // Trial in progress
|
||||
TrialStatusEndTrial TrialStatus = "endTrial" // Trial ended
|
||||
TrialStatusAllowTrial TrialStatus = "allowTrial" // Trial allowed but not started
|
||||
)
|
||||
|
||||
// FeatureCode represents the available VIP features
|
||||
type FeatureCode string
|
||||
|
||||
const (
|
||||
FeatureCode1080p FeatureCode = "hd.1080p" // 1080p HD feature
|
||||
FeatureCode1080pPlus FeatureCode = "hd.1080p.plus" // 1440p HD feature
|
||||
)
|
||||
|
||||
// VipFeature represents a single VIP feature with its trial status
|
||||
type VipFeature struct {
|
||||
Code FeatureCode `json:"code"` // Feature identifier
|
||||
Intercept bool `json:"intercept"` // Whether the feature is intercepted
|
||||
TrialStatus TrialStatus `json:"trialStatus"` // Current trial status
|
||||
TrialDuration int64 `json:"trialDuration"` // Trial duration in minutes
|
||||
TrialStartTime int64 `json:"trialStartTime"` // Trial start timestamp
|
||||
}
|
||||
|
||||
type DeleteFile struct {
|
||||
DriveID string `json:"drive_id"`
|
||||
FileID string `json:"file_id"`
|
||||
// VipFeatureList represents the response from /business/v1.0/vip/feature/list endpoint
|
||||
type VipFeatureList struct {
|
||||
Result []VipFeature `json:"result"` // List of VIP features
|
||||
}
|
||||
|
||||
type FileMoveCopy struct {
|
||||
DriveID string `json:"drive_id"`
|
||||
FileID string `json:"file_id"`
|
||||
ToParentFileID string `json:"to_parent_file_id"`
|
||||
CheckNameMode *string `json:"check_name_mode"`
|
||||
NewName *string `json:"new_name"`
|
||||
// VipFeatureTrialRequest represents the request body for /business/v1.0/vip/feature/trial endpoint
|
||||
type VipFeatureTrialRequest struct {
|
||||
FeatureCode FeatureCode `json:"featureCode"` // Feature code to start trial for
|
||||
}
|
||||
|
||||
type FileCopyResp struct {
|
||||
DriveID string `json:"drive_id"`
|
||||
FileID string `json:"file_id"`
|
||||
AsyncTaskID string `json:"async_task_id"`
|
||||
Exist bool `json:"exist"`
|
||||
// VipFeatureTrialResponse represents the response from /business/v1.0/vip/feature/trial endpoint
|
||||
type VipFeatureTrialResponse struct {
|
||||
TrialStatus TrialStatus `json:"trialStatus"` // Current trial status
|
||||
TrialDuration int64 `json:"trialDuration"` // Trial duration in minutes
|
||||
TrialStartTime int64 `json:"trialStartTime"` // Trial start timestamp
|
||||
}
|
||||
|
||||
type CreateFolder struct {
|
||||
DriveID string `json:"drive_id"`
|
||||
ParentFileID string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
CheckNameMode string `json:"check_name_mode"`
|
||||
// DriveFile represents a file in a specific drive
|
||||
type DriveFile struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
}
|
||||
|
||||
// CreateFastTransferRequest represents the request body for /adrive/v1.0/openFile/createFastTransfer endpoint
|
||||
type CreateFastTransferRequest struct {
|
||||
DriveFileList []DriveFile `json:"drive_file_list"` // List of drive files to share [1,100]
|
||||
}
|
||||
|
||||
// CreateFastTransferResponse represents the response from /adrive/v1.0/openFile/createFastTransfer endpoint
|
||||
type CreateFastTransferResponse struct {
|
||||
Expiration Time `json:"expiration"` // Fast transfer expiration time
|
||||
CreatorID string `json:"creator_id"` // ID of the fast transfer creator
|
||||
ShareID string `json:"share_id"` // Share ID
|
||||
ShareURL string `json:"share_url"` // Share URL
|
||||
DriveFileList []DriveFile `json:"drive_file_list"` // List of shared drive files
|
||||
}
|
||||
|
||||
// FileType represents the type of a file
|
||||
type FileType string
|
||||
|
||||
const (
|
||||
FileTypeFile FileType = "file" // Regular file
|
||||
FileTypeFolder FileType = "folder" // Directory/folder
|
||||
)
|
||||
|
||||
// FileCategory represents the category of a file
|
||||
type FileCategory string
|
||||
|
||||
const (
|
||||
FileCategoryVideo FileCategory = "video" // Video files
|
||||
FileCategoryDoc FileCategory = "doc" // Document files
|
||||
FileCategoryAudio FileCategory = "audio" // Audio files
|
||||
FileCategoryZip FileCategory = "zip" // Archive files
|
||||
FileCategoryOthers FileCategory = "others" // Other files
|
||||
FileCategoryImage FileCategory = "image" // Image files
|
||||
)
|
||||
|
||||
// OrderDirection represents the sort order direction
|
||||
type OrderDirection string
|
||||
|
||||
const (
|
||||
OrderDirectionASC OrderDirection = "ASC" // Ascending order
|
||||
OrderDirectionDESC OrderDirection = "DESC" // Descending order
|
||||
)
|
||||
|
||||
// OrderBy represents the field to sort by
|
||||
type OrderBy string
|
||||
|
||||
const (
|
||||
OrderByCreatedAt OrderBy = "created_at" // Sort by creation time
|
||||
OrderByUpdatedAt OrderBy = "updated_at" // Sort by update time
|
||||
OrderByName OrderBy = "name" // Sort by name
|
||||
OrderBySize OrderBy = "size" // Sort by size
|
||||
OrderByNameEnhanced OrderBy = "name_enhanced" // Sort by name with enhanced number handling
|
||||
)
|
||||
|
||||
// VideoMediaMetadata represents video file metadata
|
||||
type VideoMediaMetadata struct {
|
||||
// Add video metadata fields as needed
|
||||
}
|
||||
|
||||
// VideoPreviewMetadata represents video preview metadata
|
||||
type VideoPreviewMetadata struct {
|
||||
// Add video preview metadata fields as needed
|
||||
}
|
||||
|
||||
// FileItem represents a file or folder item in the drive
|
||||
type FileItem struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
ParentFileID string `json:"parent_file_id"` // Parent folder ID
|
||||
Name string `json:"name"` // File name
|
||||
Size int64 `json:"size"` // File size in bytes
|
||||
FileExtension string `json:"file_extension"` // File extension
|
||||
ContentHash string `json:"content_hash"` // File content hash
|
||||
Category FileCategory `json:"category"` // File category
|
||||
Type FileType `json:"type"` // File type (file/folder)
|
||||
Thumbnail string `json:"thumbnail,omitempty"` // Thumbnail URL
|
||||
URL string `json:"url,omitempty"` // Preview/download URL for files under 5MB
|
||||
CreatedAt Time `json:"created_at"` // Creation time
|
||||
UpdatedAt Time `json:"updated_at"` // Last update time
|
||||
PlayCursor string `json:"play_cursor,omitempty"` // Playback progress
|
||||
VideoMediaMetadata *VideoMediaMetadata `json:"video_media_metadata,omitempty"` // Video metadata
|
||||
VideoPreviewMetadata *VideoPreviewMetadata `json:"video_preview_metadata,omitempty"` // Video preview metadata
|
||||
}
|
||||
|
||||
// ListFileRequest represents the request body for /adrive/v1.0/openFile/list endpoint
|
||||
type ListFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
Limit int `json:"limit,omitempty"` // Max items to return (default 50, max 100)
|
||||
Marker string `json:"marker,omitempty"` // Pagination marker
|
||||
OrderBy OrderBy `json:"order_by,omitempty"` // Sort field
|
||||
OrderDirection OrderDirection `json:"order_direction,omitempty"` // Sort direction
|
||||
ParentFileID string `json:"parent_file_id"` // Parent folder ID (root for root folder)
|
||||
Category string `json:"category,omitempty"` // File categories (comma-separated)
|
||||
Type FileType `json:"type,omitempty"` // Filter by type
|
||||
VideoThumbnailTime int64 `json:"video_thumbnail_time,omitempty"` // Video thumbnail timestamp (ms)
|
||||
VideoThumbnailWidth int `json:"video_thumbnail_width,omitempty"` // Video thumbnail width
|
||||
ImageThumbnailWidth int `json:"image_thumbnail_width,omitempty"` // Image thumbnail width
|
||||
Fields string `json:"fields,omitempty"` // Fields to return
|
||||
}
|
||||
|
||||
// ListFileResponse represents the response from file listing endpoints
|
||||
type ListFileResponse struct {
|
||||
Items []FileItem `json:"items"` // List of files/folders
|
||||
NextMarker string `json:"next_marker,omitempty"` // Next page marker
|
||||
}
|
||||
|
||||
// SearchFileRequest represents the request body for /adrive/v1.0/openFile/search endpoint
|
||||
type SearchFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
Limit int `json:"limit,omitempty"` // Max items to return (default 100, max 100)
|
||||
Marker string `json:"marker,omitempty"` // Pagination marker
|
||||
Query string `json:"query"` // Search query
|
||||
OrderBy string `json:"order_by,omitempty"` // Sort order
|
||||
VideoThumbnailTime int64 `json:"video_thumbnail_time,omitempty"` // Video thumbnail timestamp (ms)
|
||||
VideoThumbnailWidth int `json:"video_thumbnail_width,omitempty"` // Video thumbnail width
|
||||
ImageThumbnailWidth int `json:"image_thumbnail_width,omitempty"` // Image thumbnail width
|
||||
ReturnTotalCount bool `json:"return_total_count,omitempty"` // Whether to return total count
|
||||
}
|
||||
|
||||
// SearchFileResponse represents the response from the search endpoint
|
||||
type SearchFileResponse struct {
|
||||
Items []FileItem `json:"items"` // Search results
|
||||
NextMarker string `json:"next_marker,omitempty"` // Next page marker
|
||||
TotalCount int64 `json:"total_count,omitempty"` // Total number of matches
|
||||
}
|
||||
|
||||
// StarredFileRequest represents the request body for /adrive/v1.0/openFile/starredList endpoint
|
||||
type StarredFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
Limit int `json:"limit,omitempty"` // Max items to return (default 100, max 100)
|
||||
Marker string `json:"marker,omitempty"` // Pagination marker
|
||||
OrderBy OrderBy `json:"order_by,omitempty"` // Sort field
|
||||
OrderDirection OrderDirection `json:"order_direction,omitempty"` // Sort direction
|
||||
Type FileType `json:"type,omitempty"` // Filter by type
|
||||
VideoThumbnailTime int64 `json:"video_thumbnail_time,omitempty"` // Video thumbnail timestamp (ms)
|
||||
VideoThumbnailWidth int `json:"video_thumbnail_width,omitempty"` // Video thumbnail width
|
||||
ImageThumbnailWidth int `json:"image_thumbnail_width,omitempty"` // Image thumbnail width
|
||||
}
|
||||
|
||||
// GetFileRequest represents the request body for /adrive/v1.0/openFile/get endpoint
|
||||
type GetFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
VideoThumbnailTime int64 `json:"video_thumbnail_time,omitempty"` // Video thumbnail timestamp (ms)
|
||||
VideoThumbnailWidth int `json:"video_thumbnail_width,omitempty"` // Video thumbnail width
|
||||
ImageThumbnailWidth int `json:"image_thumbnail_width,omitempty"` // Image thumbnail width
|
||||
Fields string `json:"fields,omitempty"` // Specific fields to return (comma-separated)
|
||||
}
|
||||
|
||||
// GetFileByPathRequest represents the request body for /adrive/v1.0/openFile/get_by_path endpoint
|
||||
type GetFileByPathRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FilePath string `json:"file_path"` // File path (e.g., /folder/file.txt)
|
||||
}
|
||||
|
||||
// BatchGetFileRequest represents the request body for /adrive/v1.0/openFile/batch/get endpoint
|
||||
type BatchGetFileRequest struct {
|
||||
FileList []DriveFile `json:"file_list"` // List of files to get details for
|
||||
VideoThumbnailTime int64 `json:"video_thumbnail_time,omitempty"` // Video thumbnail timestamp (ms)
|
||||
VideoThumbnailWidth int `json:"video_thumbnail_width,omitempty"` // Video thumbnail width
|
||||
ImageThumbnailWidth int `json:"image_thumbnail_width,omitempty"` // Image thumbnail width
|
||||
}
|
||||
|
||||
// BatchGetFileResponse represents the response from the batch get endpoint
|
||||
type BatchGetFileResponse struct {
|
||||
Items []FileItem `json:"items"` // List of file details
|
||||
}
|
||||
|
||||
// FileDetailExtended represents a file with additional path information
|
||||
type FileDetailExtended struct {
|
||||
FileItem
|
||||
IDPath string `json:"id_path,omitempty"` // Path using IDs (e.g., root:/64de0fb2...)
|
||||
NamePath string `json:"name_path,omitempty"` // Path using names (e.g., root:/folder/file.txt)
|
||||
}
|
||||
|
||||
// GetDownloadURLRequest represents the request body for /adrive/v1.0/openFile/getDownloadUrl endpoint
|
||||
type GetDownloadURLRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
ExpireSec int64 `json:"expire_sec,omitempty"` // URL expiration time in seconds (default 900, max 14400 for premium apps)
|
||||
}
|
||||
|
||||
// GetDownloadURLResponse represents the response from the download URL endpoint
|
||||
type GetDownloadURLResponse struct {
|
||||
URL string `json:"url"` // Download URL
|
||||
Expiration Time `json:"expiration"` // URL expiration time
|
||||
Method string `json:"method"` // Download method
|
||||
Description string `json:"description"` // Additional information about download speed and privileges
|
||||
}
|
||||
|
||||
// CheckNameMode represents how to handle naming conflicts
|
||||
type CheckNameMode string
|
||||
|
||||
const (
|
||||
CheckNameModeAutoRename CheckNameMode = "auto_rename" // Automatically rename if file exists
|
||||
CheckNameModeRefuse CheckNameMode = "refuse" // Don't create if file exists
|
||||
CheckNameModeIgnore CheckNameMode = "ignore" // Create even if file exists
|
||||
)
|
||||
|
||||
// PartInfo represents information about a file part for multipart upload
|
||||
type PartInfo struct {
|
||||
PartNumber int `json:"part_number"` // Part sequence number (1-based)
|
||||
UploadURL string `json:"upload_url,omitempty"` // Upload URL for this part
|
||||
PartSize int64 `json:"part_size,omitempty"` // Size of this part
|
||||
Etag string `json:"etag,omitempty"` // ETag returned after part upload
|
||||
}
|
||||
|
||||
// StreamInfo represents stream information for special file formats (e.g., livp)
|
||||
type StreamInfo struct {
|
||||
ContentHash string `json:"content_hash,omitempty"` // Content hash
|
||||
ContentHashName string `json:"content_hash_name,omitempty"` // Hash algorithm name
|
||||
ProofVersion string `json:"proof_version,omitempty"` // Proof version
|
||||
ProofCode string `json:"proof_code,omitempty"` // Proof code
|
||||
ContentMD5 string `json:"content_md5,omitempty"` // Content MD5
|
||||
PreHash string `json:"pre_hash,omitempty"` // Pre-hash for large files
|
||||
Size int64 `json:"size,omitempty"` // Stream size
|
||||
PartInfoList []PartInfo `json:"part_info_list,omitempty"` // Part information list
|
||||
}
|
||||
|
||||
// CreateFileRequest represents the request body for /adrive/v1.0/openFile/create endpoint
|
||||
type CreateFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
ParentFileID string `json:"parent_file_id"` // Parent folder ID (root for root directory)
|
||||
Name string `json:"name"` // File name (UTF-8, max 1024 bytes)
|
||||
Type FileType `json:"type"` // File type (file/folder)
|
||||
CheckNameMode CheckNameMode `json:"check_name_mode"` // How to handle name conflicts
|
||||
PartInfoList []PartInfo `json:"part_info_list,omitempty"` // Part information for multipart upload (max 10000)
|
||||
StreamsInfo []StreamInfo `json:"streams_info,omitempty"` // Stream information (for special formats)
|
||||
PreHash string `json:"pre_hash,omitempty"` // First 1KB SHA1 for quick duplicate check
|
||||
Size int64 `json:"size,omitempty"` // File size in bytes
|
||||
ContentHash string `json:"content_hash,omitempty"` // Full file content hash
|
||||
ContentHashName string `json:"content_hash_name,omitempty"` // Hash algorithm (default: sha1)
|
||||
ProofCode string `json:"proof_code,omitempty"` // Proof code for duplicate check
|
||||
ProofVersion string `json:"proof_version,omitempty"` // Proof version (fixed: v1)
|
||||
LocalCreatedAt *Time `json:"local_created_at,omitempty"` // Local creation time
|
||||
LocalModifiedAt *Time `json:"local_modified_at,omitempty"` // Local modification time
|
||||
}
|
||||
|
||||
// CreateFileResponse represents the response from the file creation endpoint
|
||||
type CreateFileResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
FileName string `json:"file_name"` // File name
|
||||
ParentFileID string `json:"parent_file_id"` // Parent folder ID
|
||||
Status string `json:"status"` // Status
|
||||
UploadID string `json:"upload_id"` // Upload ID (empty for folders)
|
||||
Available bool `json:"available"` // Whether the file is available
|
||||
Exist bool `json:"exist"` // Whether a file with same name exists
|
||||
RapidUpload bool `json:"rapid_upload"` // Whether rapid upload was used
|
||||
PartInfoList []PartInfo `json:"part_info_list"` // Part information list
|
||||
}
|
||||
|
||||
// GetUploadURLRequest represents the request body for /adrive/v1.0/openFile/getUploadUrl endpoint
|
||||
type GetUploadURLRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
UploadID string `json:"upload_id"` // Upload ID from file creation
|
||||
PartInfoList []PartInfo `json:"part_info_list"` // Part information list
|
||||
}
|
||||
|
||||
// GetUploadURLResponse represents the response from the upload URL endpoint
|
||||
type GetUploadURLResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
UploadID string `json:"upload_id"` // Upload ID
|
||||
CreatedAt Time `json:"created_at"` // Creation time
|
||||
PartInfoList []PartInfo `json:"part_info_list"` // Part information with URLs
|
||||
}
|
||||
|
||||
// ListUploadedPartsRequest represents the request body for /adrive/v1.0/openFile/listUploadedParts endpoint
|
||||
type ListUploadedPartsRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
UploadID string `json:"upload_id"` // Upload ID
|
||||
PartNumberMarker string `json:"part_number_marker,omitempty"` // Marker for pagination
|
||||
}
|
||||
|
||||
// ListUploadedPartsResponse represents the response from the list uploaded parts endpoint
|
||||
type ListUploadedPartsResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
UploadID string `json:"upload_id"` // Upload ID
|
||||
ParallelUpload bool `json:"parallelUpload"` // Whether parallel upload is enabled
|
||||
UploadedParts []PartInfo `json:"uploaded_parts"` // List of uploaded parts
|
||||
NextPartNumberMarker string `json:"next_part_number_marker"` // Marker for next page
|
||||
}
|
||||
|
||||
// CompleteUploadRequest represents the request body for /adrive/v1.0/openFile/complete endpoint
|
||||
type CompleteUploadRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
UploadID string `json:"upload_id"` // Upload ID
|
||||
}
|
||||
|
||||
// CompleteUploadResponse represents the response from the complete upload endpoint
|
||||
type CompleteUploadResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
Name string `json:"name"` // File name
|
||||
Size int64 `json:"size"` // File size
|
||||
FileExtension string `json:"file_extension"` // File extension
|
||||
ContentHash string `json:"content_hash"` // Content hash
|
||||
Category FileCategory `json:"category"` // File category
|
||||
Type FileType `json:"type"` // File type
|
||||
Thumbnail string `json:"thumbnail,omitempty"` // Thumbnail URL
|
||||
URL string `json:"url,omitempty"` // Preview URL
|
||||
DownloadURL string `json:"download_url,omitempty"` // Download URL
|
||||
CreatedAt Time `json:"created_at"` // Creation time
|
||||
UpdatedAt Time `json:"updated_at"` // Last update time
|
||||
}
|
||||
|
||||
// UpdateFileRequest represents the request body for /adrive/v1.0/openFile/update endpoint
|
||||
type UpdateFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
Name string `json:"name,omitempty"` // New file name
|
||||
CheckNameMode CheckNameMode `json:"check_name_mode,omitempty"` // How to handle name conflicts
|
||||
Starred *bool `json:"starred,omitempty"` // Whether to star/unstar the file
|
||||
}
|
||||
|
||||
// UpdateFileResponse represents the response from the file update endpoint
|
||||
type UpdateFileResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
Name string `json:"name"` // File name
|
||||
Size int64 `json:"size"` // File size
|
||||
FileExtension string `json:"file_extension"` // File extension
|
||||
ContentHash string `json:"content_hash"` // Content hash
|
||||
Category FileCategory `json:"category"` // File category
|
||||
Type FileType `json:"type"` // File type (file/folder)
|
||||
CreatedAt Time `json:"created_at"` // Creation time
|
||||
UpdatedAt Time `json:"updated_at"` // Last update time
|
||||
}
|
||||
|
||||
// MoveFileRequest represents the request body for /adrive/v1.0/openFile/move endpoint
|
||||
type MoveFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Current drive ID
|
||||
FileID string `json:"file_id"` // File ID to move
|
||||
ToDriveID string `json:"to_drive_id,omitempty"` // Target drive ID (defaults to current drive_id)
|
||||
ToParentFileID string `json:"to_parent_file_id"` // Target parent folder ID (root for root directory)
|
||||
CheckNameMode CheckNameMode `json:"check_name_mode,omitempty"` // How to handle name conflicts (default: refuse)
|
||||
NewName string `json:"new_name,omitempty"` // New name to use if there's a conflict
|
||||
}
|
||||
|
||||
// MoveFileResponse represents the response from the file move endpoint
|
||||
type MoveFileResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
AsyncTaskID string `json:"async_task_id,omitempty"` // Async task ID for folder moves
|
||||
Exist bool `json:"exist"` // Whether file already exists in target
|
||||
}
|
||||
|
||||
// CopyFileRequest represents the request body for /adrive/v1.0/openFile/copy endpoint
|
||||
type CopyFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID to copy
|
||||
ToDriveID string `json:"to_drive_id,omitempty"` // Target drive ID (defaults to current drive_id)
|
||||
ToParentFileID string `json:"to_parent_file_id"` // Target parent folder ID (root for root directory)
|
||||
AutoRename bool `json:"auto_rename,omitempty"` // Whether to auto rename on conflict (default: false)
|
||||
}
|
||||
|
||||
// CopyFileResponse represents the response from the file copy endpoint
|
||||
type CopyFileResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
AsyncTaskID string `json:"async_task_id,omitempty"` // Async task ID for folder copies
|
||||
}
|
||||
|
||||
// TaskState represents the state of an async task
|
||||
type TaskState string
|
||||
|
||||
const (
|
||||
TaskStateSucceed TaskState = "Succeed" // Task completed successfully
|
||||
TaskStateRunning TaskState = "Running" // Task is still running
|
||||
TaskStateFailed TaskState = "Failed" // Task failed
|
||||
)
|
||||
|
||||
// TrashFileRequest represents the request body for /adrive/v1.0/openFile/recyclebin/trash endpoint
|
||||
type TrashFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID to move to trash
|
||||
}
|
||||
|
||||
// TrashFileResponse represents the response from the trash file endpoint
|
||||
type TrashFileResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
AsyncTaskID string `json:"async_task_id,omitempty"` // Async task ID for folder operations
|
||||
}
|
||||
|
||||
// DeleteFileRequest represents the request body for /adrive/v1.0/openFile/delete endpoint
|
||||
type DeleteFileRequest struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID to delete
|
||||
}
|
||||
|
||||
// DeleteFileResponse represents the response from the delete file endpoint
|
||||
type DeleteFileResponse struct {
|
||||
DriveID string `json:"drive_id"` // Drive ID
|
||||
FileID string `json:"file_id"` // File ID
|
||||
AsyncTaskID string `json:"async_task_id,omitempty"` // Async task ID for folder operations
|
||||
}
|
||||
|
||||
// GetAsyncTaskRequest represents the request body for /adrive/v1.0/openFile/async_task/get endpoint
|
||||
type GetAsyncTaskRequest struct {
|
||||
AsyncTaskID string `json:"async_task_id"` // Async task ID to query
|
||||
}
|
||||
|
||||
// GetAsyncTaskResponse represents the response from the get async task endpoint
|
||||
type GetAsyncTaskResponse struct {
|
||||
State TaskState `json:"state"` // Task state (Succeed/Running/Failed)
|
||||
AsyncTaskID string `json:"async_task_id"` // Async task ID
|
||||
}
|
||||
|
147
backend/adrive/helper.go
Normal file
147
backend/adrive/helper.go
Normal file
@ -0,0 +1,147 @@
|
||||
package adrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/adrive/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Client contains the info for the Aliyun Drive API
|
||||
type AdriveClient struct {
|
||||
mu sync.RWMutex // Protecting read/writes
|
||||
c *rest.Client // The REST client
|
||||
rootURL string // API root URL
|
||||
errorHandler func(resp *http.Response) error
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// NewClient takes an http.Client and makes a new api instance
|
||||
func NewAdriveClient(c *http.Client, rootURL string) *AdriveClient {
|
||||
client := &AdriveClient{
|
||||
c: rest.NewClient(c),
|
||||
rootURL: rootURL,
|
||||
}
|
||||
client.c.SetErrorHandler(errorHandler)
|
||||
client.c.SetRoot(rootURL)
|
||||
|
||||
// Create a pacer using rclone's default exponential backoff
|
||||
client.pacer = fs.NewPacer(
|
||||
context.Background(),
|
||||
pacer.NewDefault(
|
||||
pacer.MinSleep(minSleep),
|
||||
pacer.MaxSleep(maxSleep),
|
||||
pacer.DecayConstant(decayConstant),
|
||||
),
|
||||
)
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// Call makes a call to the API using the params passed in
|
||||
func (c *AdriveClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
|
||||
return c.CallWithPacer(ctx, opts, c.pacer)
|
||||
}
|
||||
|
||||
// CallWithPacer makes a call to the API using the params passed in using the pacer passed in
|
||||
func (c *AdriveClient) CallWithPacer(ctx context.Context, opts *rest.Opts, pacer *fs.Pacer) (resp *http.Response, err error) {
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
resp, err = c.c.Call(ctx, opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// CallJSON makes an API call and decodes the JSON return packet into response
|
||||
func (c *AdriveClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
return c.CallJSONWithPacer(ctx, opts, c.pacer, request, response)
|
||||
}
|
||||
|
||||
// CallJSONWithPacer makes an API call and decodes the JSON return packet into response using the pacer passed in
|
||||
func (c *AdriveClient) CallJSONWithPacer(ctx context.Context, opts *rest.Opts, pacer *fs.Pacer, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
resp, err = c.c.CallJSON(ctx, opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
403,
|
||||
404,
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns true if err is nil, or if it's a retryable error
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if err != nil {
|
||||
// Check for context cancellation first
|
||||
if ctx.Err() != nil {
|
||||
return false, ctx.Err()
|
||||
}
|
||||
// Retry network errors
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if we have an API error
|
||||
if resp.StatusCode >= 400 {
|
||||
apiErr := new(api.Error)
|
||||
decodeErr := rest.DecodeJSON(resp, &apiErr)
|
||||
if decodeErr != nil {
|
||||
fs.Debugf(nil, "Failed to decode error response: %v", decodeErr)
|
||||
// If we can't decode the error, retry server errors
|
||||
return resp.StatusCode >= 500, fmt.Errorf("HTTP error %s", resp.Status)
|
||||
}
|
||||
return apiErr.ShouldRetry(1), apiErr
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
apiErr := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &apiErr)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to decode error response: %v", err)
|
||||
// If we can't decode the error response, create a basic error
|
||||
apiErr.Code = resp.StatusCode
|
||||
apiErr.Message = resp.Status
|
||||
return apiErr
|
||||
}
|
||||
|
||||
// Ensure we have an error code and message
|
||||
if apiErr.Code == 0 {
|
||||
apiErr.Code = resp.StatusCode
|
||||
}
|
||||
if apiErr.Message == "" {
|
||||
apiErr.Message = resp.Status
|
||||
}
|
||||
|
||||
// Add response body as details if present
|
||||
if body, err := io.ReadAll(resp.Body); err == nil && len(body) > 0 {
|
||||
apiErr.Details = string(body)
|
||||
}
|
||||
|
||||
return apiErr
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user