mirror of
https://github.com/AlistGo/alist.git
synced 2025-06-16 00:52:01 +08:00
feat(cloudreve_v4): add Cloudreve V4 driver (#8470 closes #8328 #8467)
Some checks failed
auto_lang / auto generate lang.json (1.21, ubuntu-latest) (push) Has been cancelled
build / Build (ubuntu-latest, android-arm64) (push) Has been cancelled
beta release / Beta Release Changelog (1.21, ubuntu-latest) (push) Has been cancelled
build / Build (ubuntu-latest, darwin-amd64) (push) Has been cancelled
build / Build (ubuntu-latest, darwin-arm64) (push) Has been cancelled
build / Build (ubuntu-latest, linux-amd64-musl) (push) Has been cancelled
build / Build (ubuntu-latest, linux-arm64-musl) (push) Has been cancelled
build / Build (ubuntu-latest, windows-amd64) (push) Has been cancelled
build / Build (ubuntu-latest, windows-arm64) (push) Has been cancelled
release_docker / Build Binaries for Docker Release (push) Has been cancelled
beta release / Beta Release (md5, !(*musl*|*windows-arm64*|*android*|*freebsd*)) (push) Has been cancelled
beta release / Beta Release (md5-android, android-*) (push) Has been cancelled
beta release / Beta Release (md5-freebsd, freebsd-*) (push) Has been cancelled
beta release / Beta Release (md5-linux-musl, linux-!(arm*)-musl*) (push) Has been cancelled
beta release / Beta Release (md5-linux-musl-arm, linux-arm*-musl*) (push) Has been cancelled
beta release / Beta Release (md5-windows-arm64, windows-arm64) (push) Has been cancelled
beta release / Beta Release Desktop (push) Has been cancelled
release_docker / Release Docker image (INSTALL_FFMPEG=true
INSTALL_ARIA2=true
, aio, suffix=-aio,onlatest=true) (push) Has been cancelled
release_docker / Release Docker image (, latest, ) (push) Has been cancelled
release_docker / Release Docker image (INSTALL_ARIA2=true, aria2, suffix=-aria2,onlatest=true) (push) Has been cancelled
release_docker / Release Docker image (INSTALL_FFMPEG=true, ffmpeg, suffix=-ffmpeg,onlatest=true) (push) Has been cancelled
Close need info / close-need-info (push) Has been cancelled
Close inactive / close-inactive (push) Has been cancelled
Some checks failed
auto_lang / auto generate lang.json (1.21, ubuntu-latest) (push) Has been cancelled
build / Build (ubuntu-latest, android-arm64) (push) Has been cancelled
beta release / Beta Release Changelog (1.21, ubuntu-latest) (push) Has been cancelled
build / Build (ubuntu-latest, darwin-amd64) (push) Has been cancelled
build / Build (ubuntu-latest, darwin-arm64) (push) Has been cancelled
build / Build (ubuntu-latest, linux-amd64-musl) (push) Has been cancelled
build / Build (ubuntu-latest, linux-arm64-musl) (push) Has been cancelled
build / Build (ubuntu-latest, windows-amd64) (push) Has been cancelled
build / Build (ubuntu-latest, windows-arm64) (push) Has been cancelled
release_docker / Build Binaries for Docker Release (push) Has been cancelled
beta release / Beta Release (md5, !(*musl*|*windows-arm64*|*android*|*freebsd*)) (push) Has been cancelled
beta release / Beta Release (md5-android, android-*) (push) Has been cancelled
beta release / Beta Release (md5-freebsd, freebsd-*) (push) Has been cancelled
beta release / Beta Release (md5-linux-musl, linux-!(arm*)-musl*) (push) Has been cancelled
beta release / Beta Release (md5-linux-musl-arm, linux-arm*-musl*) (push) Has been cancelled
beta release / Beta Release (md5-windows-arm64, windows-arm64) (push) Has been cancelled
beta release / Beta Release Desktop (push) Has been cancelled
release_docker / Release Docker image (INSTALL_FFMPEG=true
INSTALL_ARIA2=true
, aio, suffix=-aio,onlatest=true) (push) Has been cancelled
release_docker / Release Docker image (, latest, ) (push) Has been cancelled
release_docker / Release Docker image (INSTALL_ARIA2=true, aria2, suffix=-aria2,onlatest=true) (push) Has been cancelled
release_docker / Release Docker image (INSTALL_FFMPEG=true, ffmpeg, suffix=-ffmpeg,onlatest=true) (push) Has been cancelled
Close need info / close-need-info (push) Has been cancelled
Close inactive / close-inactive (push) Has been cancelled
* feat(cloudreve_v4): add Cloudreve V4 driver implementation * fix(cloudreve_v4): update request handling to prevent token refresh loop * feat(onedrive): implement retry logic for upload failures * feat(cloudreve): implement retry logic for upload failures * feat(cloudreve_v4): support cloud sorting * fix(cloudreve_v4): improve token handling in Init method * feat(cloudreve_v4): support share * feat(cloudreve): support reference * feat(cloudreve_v4): support version upload * fix(cloudreve_v4): add SetBody in upLocal * fix(cloudreve_v4): update URL structure in Link and FileUrlResp
This commit is contained in:
@ -22,6 +22,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
|
||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||
_ "github.com/alist-org/alist/v3/drivers/doubao"
|
||||
_ "github.com/alist-org/alist/v3/drivers/doubao_share"
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
type Cloudreve struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *Cloudreve
|
||||
}
|
||||
|
||||
func (d *Cloudreve) Config() driver.Config {
|
||||
@ -37,8 +38,18 @@ func (d *Cloudreve) Init(ctx context.Context) error {
|
||||
return d.login()
|
||||
}
|
||||
|
||||
func (d *Cloudreve) InitReference(storage driver.Driver) error {
|
||||
refStorage, ok := storage.(*Cloudreve)
|
||||
if ok {
|
||||
d.ref = refStorage
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Cloudreve) Drop(ctx context.Context) error {
|
||||
d.Cookie = ""
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4,12 +4,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
@ -19,7 +21,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/cookie"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
json "github.com/json-iterator/go"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
@ -35,6 +36,9 @@ func (d *Cloudreve) getUA() string {
|
||||
}
|
||||
|
||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.request(method, path, callback, out)
|
||||
}
|
||||
u := d.Address + "/api/v3" + path
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
@ -79,11 +83,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac
|
||||
}
|
||||
if out != nil && r.Data != nil {
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(r.Data)
|
||||
marshal, err = jsoniter.Marshal(r.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(marshal, out)
|
||||
err = jsoniter.Unmarshal(marshal, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -187,12 +191,9 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@ -205,9 +206,26 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||
req.SetHeader("User-Agent", d.getUA())
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
req.AddRetryCondition(func(r *resty.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if r.IsError() {
|
||||
return true
|
||||
}
|
||||
var retryResp Resp
|
||||
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
|
||||
if jErr != nil {
|
||||
return true
|
||||
}
|
||||
if retryResp.Code != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
break
|
||||
return err
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
@ -222,16 +240,15 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@ -248,14 +265,43 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
finish += byteSize
|
||||
err = func() error {
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
} else {
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -264,16 +310,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@ -295,22 +340,31 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
_ = res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
@ -318,16 +372,15 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@ -346,11 +399,27 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
etags = append(etags, res.Header.Get("ETag"))
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case etag == "":
|
||||
return errors.New("faild to get ETag from header")
|
||||
default:
|
||||
retryCount = 0
|
||||
etags = append(etags, etag)
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
}
|
||||
|
||||
// s3LikeFinishUpload
|
||||
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
|
||||
|
305
drivers/cloudreve_v4/driver.go
Normal file
305
drivers/cloudreve_v4/driver.go
Normal file
@ -0,0 +1,305 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type CloudreveV4 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *CloudreveV4
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Config() driver.Config {
|
||||
if d.ref != nil {
|
||||
return d.ref.Config()
|
||||
}
|
||||
if d.EnableVersionUpload {
|
||||
config.NoOverwriteUpload = false
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Init(ctx context.Context) error {
|
||||
// removing trailing slash
|
||||
d.Address = strings.TrimSuffix(d.Address, "/")
|
||||
op.MustSaveDriverStorage(d)
|
||||
if d.ref != nil {
|
||||
return nil
|
||||
}
|
||||
if d.AccessToken == "" && d.RefreshToken != "" {
|
||||
return d.refreshToken()
|
||||
}
|
||||
if d.Username != "" {
|
||||
return d.login()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
|
||||
refStorage, ok := storage.(*CloudreveV4)
|
||||
if ok {
|
||||
d.ref = refStorage
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Drop(ctx context.Context) error {
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
const pageSize int = 100
|
||||
var f []File
|
||||
var r FileResp
|
||||
params := map[string]string{
|
||||
"page_size": strconv.Itoa(pageSize),
|
||||
"uri": dir.GetPath(),
|
||||
"order_by": d.OrderBy,
|
||||
"order_direction": d.OrderDirection,
|
||||
"page": "0",
|
||||
}
|
||||
|
||||
for {
|
||||
err := d.request(http.MethodGet, "/file", func(req *resty.Request) {
|
||||
req.SetQueryParams(params)
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f = append(f, r.Files...)
|
||||
if r.Pagination.NextToken == "" || len(r.Files) < pageSize {
|
||||
break
|
||||
}
|
||||
params["next_page_token"] = r.Pagination.NextToken
|
||||
}
|
||||
|
||||
return utils.SliceConvert(f, func(src File) (model.Obj, error) {
|
||||
if d.EnableFolderSize && src.Type == 1 {
|
||||
var ds FolderSummaryResp
|
||||
err := d.request(http.MethodGet, "/file/info", func(req *resty.Request) {
|
||||
req.SetQueryParam("uri", src.Path)
|
||||
req.SetQueryParam("folder_summary", "true")
|
||||
}, &ds)
|
||||
if err == nil && ds.FolderSummary.Size > 0 {
|
||||
src.Size = ds.FolderSummary.Size
|
||||
}
|
||||
}
|
||||
var thumb model.Thumbnail
|
||||
if d.EnableThumb && src.Type == 0 {
|
||||
var t FileThumbResp
|
||||
err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) {
|
||||
req.SetQueryParam("uri", src.Path)
|
||||
}, &t)
|
||||
if err == nil && t.URL != "" {
|
||||
thumb = model.Thumbnail{
|
||||
Thumbnail: t.URL,
|
||||
}
|
||||
}
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: src.ID,
|
||||
Path: src.Path,
|
||||
Name: src.Name,
|
||||
Size: src.Size,
|
||||
Modified: src.UpdatedAt,
|
||||
Ctime: src.CreatedAt,
|
||||
IsFolder: src.Type == 1,
|
||||
},
|
||||
Thumbnail: thumb,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var url FileUrlResp
|
||||
err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{file.GetPath()},
|
||||
"download": true,
|
||||
})
|
||||
}, &url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(url.Urls) == 0 {
|
||||
return nil, errors.New("server returns no url")
|
||||
}
|
||||
exp := time.Until(url.Expires)
|
||||
return &model.Link{
|
||||
URL: url.Urls[0].URL,
|
||||
Expiration: &exp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"type": "folder",
|
||||
"uri": parentDir.GetPath() + "/" + dirName,
|
||||
"error_on_conflict": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{srcObj.GetPath()},
|
||||
"dst": dstDir.GetPath(),
|
||||
"copy": false,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"new_name": newName,
|
||||
"uri": srcObj.GetPath(),
|
||||
})
|
||||
}, nil)
|
||||
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{srcObj.GetPath()},
|
||||
"dst": dstDir.GetPath(),
|
||||
"copy": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.request(http.MethodDelete, "/file", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{obj.GetPath()},
|
||||
"unlink": false,
|
||||
"skip_soft_delete": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if file.GetSize() == 0 {
|
||||
// 空文件使用新建文件方法,避免上传卡锁
|
||||
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"type": "file",
|
||||
"uri": dstDir.GetPath() + "/" + file.GetName(),
|
||||
"error_on_conflict": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
var p StoragePolicy
|
||||
var r FileResp
|
||||
var u FileUploadResp
|
||||
var err error
|
||||
params := map[string]string{
|
||||
"page_size": "10",
|
||||
"uri": dstDir.GetPath(),
|
||||
"order_by": "created_at",
|
||||
"order_direction": "asc",
|
||||
"page": "0",
|
||||
}
|
||||
err = d.request(http.MethodGet, "/file", func(req *resty.Request) {
|
||||
req.SetQueryParams(params)
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p = r.StoragePolicy
|
||||
body := base.Json{
|
||||
"uri": dstDir.GetPath() + "/" + file.GetName(),
|
||||
"size": file.GetSize(),
|
||||
"policy_id": p.ID,
|
||||
"last_modified": file.ModTime().UnixMilli(),
|
||||
"mime_type": "",
|
||||
}
|
||||
if d.EnableVersionUpload {
|
||||
body["entity_type"] = "version"
|
||||
}
|
||||
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
|
||||
req.SetBody(body)
|
||||
}, &u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u.StoragePolicy.Relay {
|
||||
err = d.upLocal(ctx, file, u, up)
|
||||
} else {
|
||||
switch u.StoragePolicy.Type {
|
||||
case "local":
|
||||
err = d.upLocal(ctx, file, u, up)
|
||||
case "remote":
|
||||
err = d.upRemote(ctx, file, u, up)
|
||||
case "onedrive":
|
||||
err = d.upOneDrive(ctx, file, u, up)
|
||||
case "s3":
|
||||
err = d.upS3(ctx, file, u, up)
|
||||
default:
|
||||
return errs.NotImplement
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
// 删除失败的会话
|
||||
_ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"id": u.SessionID,
|
||||
"uri": u.URI,
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*CloudreveV4)(nil)
|
44
drivers/cloudreve_v4/meta.go
Normal file
44
drivers/cloudreve_v4/meta.go
Normal file
@ -0,0 +1,44 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// driver.RootID
|
||||
// define other
|
||||
Address string `json:"address" required:"true"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
CustomUA string `json:"custom_ua"`
|
||||
EnableFolderSize bool `json:"enable_folder_size"`
|
||||
EnableThumb bool `json:"enable_thumb"`
|
||||
EnableVersionUpload bool `json:"enable_version_upload"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Cloudreve V4",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "cloudreve://my",
|
||||
CheckStatus: true,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &CloudreveV4{}
|
||||
})
|
||||
}
|
164
drivers/cloudreve_v4/types.go
Normal file
164
drivers/cloudreve_v4/types.go
Normal file
@ -0,0 +1,164 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
StoragePolicy StoragePolicy
|
||||
}
|
||||
|
||||
type Resp struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Data any `json:"data"`
|
||||
}
|
||||
|
||||
type BasicConfigResp struct {
|
||||
InstanceID string `json:"instance_id"`
|
||||
// Title string `json:"title"`
|
||||
// Themes string `json:"themes"`
|
||||
// DefaultTheme string `json:"default_theme"`
|
||||
User struct {
|
||||
ID string `json:"id"`
|
||||
// Nickname string `json:"nickname"`
|
||||
// CreatedAt time.Time `json:"created_at"`
|
||||
// Anonymous bool `json:"anonymous"`
|
||||
Group struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Permission string `json:"permission"`
|
||||
} `json:"group"`
|
||||
} `json:"user"`
|
||||
// Logo string `json:"logo"`
|
||||
// LogoLight string `json:"logo_light"`
|
||||
// CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"`
|
||||
CaptchaType string `json:"captcha_type"` // support 'normal' only
|
||||
// AppPromotion bool `json:"app_promotion"`
|
||||
}
|
||||
|
||||
type SiteLoginConfigResp struct {
|
||||
LoginCaptcha bool `json:"login_captcha"`
|
||||
Authn bool `json:"authn"`
|
||||
}
|
||||
|
||||
type PrepareLoginResp struct {
|
||||
WebauthnEnabled bool `json:"webauthn_enabled"`
|
||||
PasswordEnabled bool `json:"password_enabled"`
|
||||
}
|
||||
|
||||
type CaptchaResp struct {
|
||||
Image string `json:"image"`
|
||||
Ticket string `json:"ticket"`
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
AccessExpires time.Time `json:"access_expires"`
|
||||
RefreshExpires time.Time `json:"refresh_expires"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
User struct {
|
||||
ID string `json:"id"`
|
||||
// Email string `json:"email"`
|
||||
// Nickname string `json:"nickname"`
|
||||
Status string `json:"status"`
|
||||
// CreatedAt time.Time `json:"created_at"`
|
||||
Group struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Permission string `json:"permission"`
|
||||
// DirectLinkBatchSize int `json:"direct_link_batch_size"`
|
||||
// TrashRetention int `json:"trash_retention"`
|
||||
} `json:"group"`
|
||||
// Language string `json:"language"`
|
||||
} `json:"user"`
|
||||
Token Token `json:"token"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Type int `json:"type"` // 0: file, 1: folder
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Size int64 `json:"size"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
Path string `json:"path"`
|
||||
Capability string `json:"capability"`
|
||||
Owned bool `json:"owned"`
|
||||
PrimaryEntity string `json:"primary_entity"`
|
||||
}
|
||||
|
||||
type StoragePolicy struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
MaxSize int64 `json:"max_size"`
|
||||
Relay bool `json:"relay,omitempty"`
|
||||
}
|
||||
|
||||
type Pagination struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"page_size"`
|
||||
IsCursor bool `json:"is_cursor"`
|
||||
NextToken string `json:"next_token,omitempty"`
|
||||
}
|
||||
|
||||
type Props struct {
|
||||
Capability string `json:"capability"`
|
||||
MaxPageSize int `json:"max_page_size"`
|
||||
OrderByOptions []string `json:"order_by_options"`
|
||||
OrderDirectionOptions []string `json:"order_direction_options"`
|
||||
}
|
||||
|
||||
type FileResp struct {
|
||||
Files []File `json:"files"`
|
||||
Parent File `json:"parent"`
|
||||
Pagination Pagination `json:"pagination"`
|
||||
Props Props `json:"props"`
|
||||
ContextHint string `json:"context_hint"`
|
||||
MixedType bool `json:"mixed_type"`
|
||||
StoragePolicy StoragePolicy `json:"storage_policy"`
|
||||
}
|
||||
|
||||
type FileUrlResp struct {
|
||||
Urls []struct {
|
||||
URL string `json:"url"`
|
||||
} `json:"urls"`
|
||||
Expires time.Time `json:"expires"`
|
||||
}
|
||||
|
||||
type FileUploadResp struct {
|
||||
// UploadID string `json:"upload_id"`
|
||||
SessionID string `json:"session_id"`
|
||||
ChunkSize int64 `json:"chunk_size"`
|
||||
Expires int64 `json:"expires"`
|
||||
StoragePolicy StoragePolicy `json:"storage_policy"`
|
||||
URI string `json:"uri"`
|
||||
CompleteURL string `json:"completeURL,omitempty"` // for S3-like
|
||||
CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive
|
||||
UploadUrls []string `json:"upload_urls,omitempty"` // for not-local
|
||||
Credential string `json:"credential,omitempty"` // for local
|
||||
}
|
||||
|
||||
type FileThumbResp struct {
|
||||
URL string `json:"url"`
|
||||
Expires time.Time `json:"expires"`
|
||||
}
|
||||
|
||||
type FolderSummaryResp struct {
|
||||
File
|
||||
FolderSummary struct {
|
||||
Size int64 `json:"size"`
|
||||
Files int64 `json:"files"`
|
||||
Folders int64 `json:"folders"`
|
||||
Completed bool `json:"completed"`
|
||||
CalculatedAt time.Time `json:"calculated_at"`
|
||||
} `json:"folder_summary"`
|
||||
}
|
476
drivers/cloudreve_v4/util.go
Normal file
476
drivers/cloudreve_v4/util.go
Normal file
@ -0,0 +1,476 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *CloudreveV4) getUA() string {
|
||||
if d.CustomUA != "" {
|
||||
return d.CustomUA
|
||||
}
|
||||
return base.UserAgent
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.request(method, path, callback, out)
|
||||
}
|
||||
u := d.Address + "/api/v4" + path
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"User-Agent": d.getUA(),
|
||||
})
|
||||
if d.AccessToken != "" {
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
}
|
||||
|
||||
var r Resp
|
||||
req.SetResult(&r)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
resp, err := req.Execute(method, u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !resp.IsSuccess() {
|
||||
return errors.New(resp.String())
|
||||
}
|
||||
|
||||
if r.Code != 0 {
|
||||
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
|
||||
// try to refresh token
|
||||
err = d.refreshToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.request(method, path, callback, out)
|
||||
}
|
||||
return errors.New(r.Msg)
|
||||
}
|
||||
|
||||
if out != nil && r.Data != nil {
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(r.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(marshal, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) login() error {
|
||||
var siteConfig SiteLoginConfigResp
|
||||
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !siteConfig.Authn {
|
||||
return errors.New("authn not support")
|
||||
}
|
||||
var prepareLogin PrepareLoginResp
|
||||
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !prepareLogin.PasswordEnabled {
|
||||
return errors.New("password not enabled")
|
||||
}
|
||||
if prepareLogin.WebauthnEnabled {
|
||||
return errors.New("webauthn not support")
|
||||
}
|
||||
for range 5 {
|
||||
err = d.doLogin(siteConfig.LoginCaptcha)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err.Error() != "CAPTCHA not match." {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
var err error
|
||||
loginBody := base.Json{
|
||||
"email": d.Username,
|
||||
"password": d.Password,
|
||||
}
|
||||
if needCaptcha {
|
||||
var config BasicConfigResp
|
||||
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.CaptchaType != "normal" {
|
||||
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
|
||||
}
|
||||
var captcha CaptchaResp
|
||||
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") {
|
||||
return errors.New("can not get captcha")
|
||||
}
|
||||
loginBody["ticket"] = captcha.Ticket
|
||||
i := strings.Index(captcha.Image, ",")
|
||||
dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:]))
|
||||
vRes, err := base.RestyClient.R().SetMultipartField(
|
||||
"image", "validateCode.png", "image/png", dec).
|
||||
Post(setting.GetStr(conf.OcrApi))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 {
|
||||
return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString())
|
||||
}
|
||||
captchaCode := jsoniter.Get(vRes.Body(), "result").ToString()
|
||||
if captchaCode == "" {
|
||||
return errors.New("ocr error: empty result")
|
||||
}
|
||||
loginBody["captcha"] = captchaCode
|
||||
}
|
||||
var token TokenResponse
|
||||
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||
req.SetBody(loginBody)
|
||||
}, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) refreshToken() error {
|
||||
var token Token
|
||||
if token.RefreshToken == "" {
|
||||
if d.Username != "" {
|
||||
err := d.login()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"refresh_token": d.RefreshToken,
|
||||
})
|
||||
}, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
if DEFAULT == 0 {
|
||||
// support relay
|
||||
DEFAULT = file.GetSize()
|
||||
}
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||
req.SetHeader("Content-Type", "application/octet-stream")
|
||||
req.SetContentLength(true)
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
req.AddRetryCondition(func(r *resty.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if r.IsError() {
|
||||
return true
|
||||
}
|
||||
var retryResp Resp
|
||||
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
|
||||
if jErr != nil {
|
||||
return true
|
||||
}
|
||||
if retryResp.Code != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
err = func() error {
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
} else {
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
}
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case etag == "":
|
||||
return errors.New("faild to get ETag from header")
|
||||
default:
|
||||
retryCount = 0
|
||||
etags = append(etags, etag)
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
}
|
||||
|
||||
// s3LikeFinishUpload
|
||||
bodyBuilder := &strings.Builder{}
|
||||
bodyBuilder.WriteString("<CompleteMultipartUpload>")
|
||||
for i, etag := range etags {
|
||||
bodyBuilder.WriteString(fmt.Sprintf(
|
||||
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
|
||||
i+1, // PartNumber 从 1 开始
|
||||
etag,
|
||||
))
|
||||
}
|
||||
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
||||
req, err := http.NewRequest(
|
||||
"POST",
|
||||
u.CompleteURL,
|
||||
strings.NewReader(bodyBuilder.String()),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/xml")
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// 上传成功发送回调请求
|
||||
return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
}
|
@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -17,7 +18,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var onedriveHostMap = map[string]Host{
|
||||
@ -204,19 +204,18 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
log.Debugf("upload: %d", finish)
|
||||
var byteSize int64 = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
log.Debug(err, n)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -228,19 +227,31 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Onedrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -17,7 +18,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var onedriveHostMap = map[string]Host{
|
||||
@ -154,19 +154,18 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
log.Debugf("upload: %d", finish)
|
||||
var byteSize int64 = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
log.Debug(err, n)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -178,19 +177,31 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[OnedriveAPP] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user