Merge 0ef345ce33f8c838f19416f97c571ca6b3a529ef into 0b9671313b14ffe839ecbd7dd2ae5ac7f6f05db8

This commit is contained in:
arch 2025-04-13 16:11:50 -04:00 committed by GitHub
commit c4ee6b2396
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 1858 additions and 0 deletions

View File

@ -119,6 +119,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Terabox [:page_facing_up:](https://rclone.org/terabox)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@ -58,6 +58,7 @@ import (
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/terabox"
_ "github.com/rclone/rclone/backend/ulozto"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"

540
backend/terabox/api.go Normal file
View File

@ -0,0 +1,540 @@
package terabox
import (
"bytes"
"context"
"crypto/md5"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
libPath "path"
"github.com/rclone/rclone/backend/terabox/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
func (f *Fs) apiExec(ctx context.Context, opts *rest.Opts, res any) error {
if opts == nil {
return fmt.Errorf("empty request")
}
opts.IgnoreStatus = true
retry := 0
retry:
if !f.notFirstRun {
f.client.SetRoot(f.baseURL)
f.client.SetHeader("Accept", "application/json, text/plain, */*")
if f.accessToken == "" {
f.client.SetHeader("Referer", baseURL)
f.client.SetHeader("X-Requested-With", "XMLHttpRequest")
f.client.SetHeader("Cookie", f.opt.Cookie)
}
f.notFirstRun = true
}
if opts.Parameters != nil {
if f.accessToken == "" {
opts.Parameters.Add("app_id", "250528")
opts.Parameters.Add("channel", "dubox")
opts.Parameters.Add("clienttype", "0")
} else {
opts.Parameters.Add("access_tokens", f.accessToken)
}
}
if retry == 0 && opts.Method == http.MethodPost && opts.MultipartParams != nil {
var overhead int64
var err error
opts.Body, opts.ContentType, overhead, err = rest.MultipartUpload(ctx, opts.Body, opts.MultipartParams, opts.MultipartContentName, opts.MultipartFileName)
if err != nil {
return err
}
if opts.ContentLength != nil {
*opts.ContentLength += overhead
}
}
resp, err := f.client.Call(ctx, opts)
if err != nil {
return err
}
debug(f.opt, 3, "Request: %+v", resp.Request)
debug(f.opt, 2, "Response: %+v", resp)
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
debug(f.opt, 2, "Response body: %s", string(body))
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("http error %d: %v", resp.StatusCode, resp.Status)
fs.Debug(nil, err.Error())
if IsInSlice(resp.StatusCode, retryErrorCodes) {
retry++
if retry > 2 {
return err
}
time.Sleep(time.Duration(retry) * time.Second)
goto retry
}
return err
}
if err := json.Unmarshal(body, res); err != nil {
return err
}
if _, skip := res.(*api.ResponseUploadedChunk); !skip {
if err, ok := res.(api.ErrorInterface); ok {
if err.Err() != nil {
return err
}
} else {
return fmt.Errorf("response have no api error interface")
}
}
return nil
}
func (f *Fs) apiJsToken(ctx context.Context) error {
res, err := f.client.Call(ctx, &rest.Opts{Method: http.MethodGet})
if err != nil {
return err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
jsToken := getStrBetween(string(body), "`function%20fn%28a%29%7Bwindow.jsToken%20%3D%20a%7D%3Bfn%28%22", "%22%29`")
if jsToken == "" {
debug(f.opt, 3, "jsToken not found, body: %s", string(body))
return fmt.Errorf("jsToken not found")
}
f.jsToken = jsToken
return nil
}
func (f *Fs) apiCheckLogin(ctx context.Context) error {
var res api.ResponseDefault
err := f.apiExec(ctx, NewRequest(http.MethodGet, "/api/check/login"), &res)
if err != nil {
return err
}
return nil
}
func (f *Fs) apiList(ctx context.Context, dir string) ([]*api.Item, error) {
if len(dir) == 0 || dir[0] != '/' {
dir = "/" + dir
}
page := 1
limit := 100
opt := NewRequest(http.MethodGet, "/api/list")
opt.Parameters.Add("dir", dir)
// opt.Parameters.Add("web", "1") // If 1 is passed, the thumbnail field thumbs will be returned.
// opt.Parameters.Add("order", ...) // Sorting field: time (modification time), name (file name), size (size; note that directories do not have a size)
// if true {
// opt.Parameters.Add("desc", "1") // 1: descending order; 0: ascending order
// }
list := make([]*api.Item, 0)
for {
opt.Parameters.Set("page", strconv.Itoa(page))
opt.Parameters.Set("num", strconv.Itoa(limit))
var res api.ResponseList
err := f.apiExec(ctx, opt, &res)
if err != nil {
return nil, err
}
list = append(list, res.List...)
if len(res.List) == 0 || len(res.List) < limit {
break
}
page++
}
return list, nil
}
// files info, can return info about a few files, but we're use it for only one file
func (f *Fs) apiItemInfo(ctx context.Context, path string, downloadLink bool) (*api.Item, error) {
opt := NewRequest(http.MethodGet, "/api/filemetas")
opt.Parameters.Add("target", fmt.Sprintf(`["%s"]`, path))
if downloadLink {
opt.Parameters.Add("dlink", "1")
} else {
opt.Parameters.Add("dlink", "0")
}
var res api.ResponseItemInfo
err := f.apiExec(ctx, opt, &res)
// firstly we will check error for a file, and only then one for full operation {"errno":12,"info":[{"errno":-9}],"request_id":8798843383335989660}
if len(res.List) > 0 {
if res.List[0].Err() != nil {
return nil, res.List[0].Err()
}
return &res.List[0].Item, nil
}
if err != nil {
return nil, err
}
return nil, fs.ErrorObjectNotFound
}
func (f *Fs) apiMkDir(ctx context.Context, path string) error {
opt := NewRequest(http.MethodPost, "/api/create")
opt.MultipartParams = url.Values{}
opt.MultipartParams.Add("path", path)
opt.MultipartParams.Add("isdir", "1")
opt.MultipartParams.Add("rtype", "0") // The file naming policy. The default value is 1. 0: Do not rename. If a file with the same name exists in the cloud, this call will fail and return a conflict; 1: Rename if there is any path conflict; 2: Rename only if there is a path conflict and the block_list is different; 3: Overwrite
var res api.ResponseDefault
err := f.apiExec(ctx, opt, &res)
return err
}
// operation - copy (file copy), move (file movement), rename (file renaming), and delete (file deletion)
// opera=copy: filelist: [{"path":"/hello/test.mp4","dest":"","newname":"test.mp4"}]
// opera=move: filelist: [{"path":"/test.mp4","dest":"/test_dir","newname":"test.mp4"}]
// opera=rename: filelist:[{"path":"/hello/test.mp4","newname":"test_one.mp4"}]
// opera=delete: filelist: ["/test.mp4"]
func (f *Fs) apiOperation(ctx context.Context, operation string, items []api.OperationalItem) error {
opt := NewRequest(http.MethodPost, "/api/filemanager")
opt.Parameters.Add("opera", operation)
opt.Parameters.Add("async", "0") // The default value is 0; 0: synchronous; 1: adaptive; 2: asynchronous. The difference lies in whether to care about the success of the request, and the returned structure differs. Different structures are returned based on the request parameters; see the return examples for details.)
var list any
if operation == "delete" {
list = make([]string, len(items))
for idx, item := range items {
list.([]string)[idx] = item.Path
}
} else {
list = items
}
mItems, err := json.Marshal(list)
if err != nil {
return err
}
body := fmt.Sprintf("filelist=%s", strings.ReplaceAll(url.QueryEscape(string(mItems)), "+", "%20"))
opt.Body = bytes.NewBufferString(body)
var res api.ResponseOperational
if err := f.apiExec(ctx, opt, &res); err != nil {
return err
}
for _, oi := range res.Info {
if oi.Err() != nil {
return oi.Err()
}
}
if operation == "delete" && f.opt.DeletePermanently {
if err := f.apiCleanRecycleBin(ctx); err != nil {
return err
}
}
return nil
}
// Download file
func (f *Fs) apiDownloadLink(ctx context.Context, fileID uint64) (*api.ResponseDownload, error) {
var err error
f.signsMX.Do(func() {
err = f.apiSignPrepare(ctx)
})
if err != nil {
f.signsMX = sync.Once{}
return nil, err
}
opt := NewRequest(http.MethodGet, "/api/download")
opt.Parameters.Add("type", "dlink")
opt.Parameters.Add("vip", "2")
opt.Parameters.Add("sign", sign(f.signs[0], f.signs[1]))
opt.Parameters.Add("timestamp", fmt.Sprintf("%d", time.Now().Unix()))
opt.Parameters.Add("need_speed", "1")
opt.Parameters.Add("fidlist", fmt.Sprintf("[%d]", fileID))
var res api.ResponseDownload
if err := f.apiExec(ctx, opt, &res); err != nil {
return nil, err
}
return &res, nil
}
func (f *Fs) apiSignPrepare(ctx context.Context) error {
opt := NewRequest(http.MethodGet, "/api/home/info")
var res api.ResponseHomeInfo
if err := f.apiExec(ctx, opt, &res); err != nil {
return err
}
f.signs = []string{res.Data.Sign3, res.Data.Sign1}
return nil
}
// Delete files from Recycle Bin
func (f *Fs) apiCleanRecycleBin(ctx context.Context) error {
opt := NewRequest(http.MethodPost, "/api/recycle/clear")
opt.Parameters.Add("async", "0") // The default value is 0; 0: synchronous; 1: adaptive; 2: asynchronous. The difference lies in whether to care about the success of the request, and the returned structure differs. Different structures are returned based on the request parameters; see the return examples for details.)
var res api.ResponseDefault
if err := f.apiExec(ctx, opt, &res); err != nil {
return err
}
return nil
}
// Quota limits for storage
func (f *Fs) apiQuotaInfo(ctx context.Context) (*api.ResponseQuota, error) {
opt := NewRequest(http.MethodGet, "/api/quota")
opt.Parameters.Add("checkexpire", "1")
opt.Parameters.Add("checkfree", "1")
var res api.ResponseQuota
if err := f.apiExec(ctx, opt, &res); err != nil {
return nil, err
}
return &res, nil
}
// Upload file
func (f *Fs) apiFileUpload(ctx context.Context, path string, size int64, modTime time.Time, in io.Reader, options []fs.OpenOption, overwriteMode uint8) error {
if size > fileLimitSize {
return api.Num2Err(58)
}
// get host for upload
var err error
f.uploadHostMX.Do(func() {
err = f.apiFileLocateUpload(ctx)
})
if err != nil {
f.uploadHostMX = sync.Once{}
return err
}
// get JS token
if f.jsToken == "" {
if err := f.apiJsToken(ctx); err != nil {
return err
}
}
// precreate file
resPreCreate, err := f.apiFilePrecreate(ctx, path, size, modTime)
if err != nil {
return err
}
if resPreCreate.Type == 2 {
return api.Num2Err(-8)
}
// upload chunks
chunksUploaded := map[int]string{}
chunksUploadedCounter := 0
chunkData := make([]byte, chunkSize)
for {
// check context
if ctx.Err() != nil {
return ctx.Err()
}
// read chunk
r, err := in.Read(chunkData)
if r == 0 && err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
// calculate md5
chunksUploaded[chunksUploadedCounter] = fmt.Sprintf("%x", md5.Sum(chunkData))
resUpload, err := f.apiFileUploadChunk(ctx, path, resPreCreate.UploadID, chunksUploadedCounter, int64(r), chunkData[:r], options)
if err != nil {
return err
}
// upload chunk
if chunksUploaded[chunksUploadedCounter] != resUpload.MD5 {
debug(f.opt, 1, "uploaded chunk have another md5 then our: %s, uploaded: %s", chunksUploaded[chunksUploadedCounter], resUpload.MD5)
chunksUploaded[chunksUploadedCounter] = resUpload.MD5
}
chunksUploadedCounter++
}
chunksUploadedList := make([]string, len(chunksUploaded))
for k, v := range chunksUploaded {
chunksUploadedList[k] = v
}
// create file
err = f.apiFileCreate(ctx, path, resPreCreate.UploadID, size, modTime, chunksUploadedList, overwriteMode)
if err != nil {
return err
}
return nil
}
func (f *Fs) apiFileLocateUpload(ctx context.Context) error {
opt := NewRequest(http.MethodGet, "https://d.terabox.com/rest/2.0/pcs/file?method=locateupload")
opt.Parameters = nil
var res api.ResponseFileLocateUpload
if err := f.apiExec(ctx, opt, &res); err != nil {
return err
}
f.uploadHost = res.Host
return nil
}
func (f *Fs) apiFilePrecreate(ctx context.Context, path string, size int64, modTime time.Time) (*api.ResponsePrecreate, error) {
opt := NewRequest(http.MethodPost, "/api/precreate")
opt.Parameters.Add("jsToken", f.jsToken)
opt.MultipartParams = url.Values{}
opt.MultipartParams.Add("path", path)
opt.MultipartParams.Add("autoinit", "1")
opt.MultipartParams.Add("local_mtime", fmt.Sprintf("%d", modTime.Unix()))
opt.MultipartParams.Add("file_limit_switch_v34", "true")
opt.MultipartParams.Add("size", fmt.Sprintf("%d", size))
dirPath, _ := libPath.Split(path)
opt.MultipartParams.Add("target_path", dirPath)
if size > chunkSize {
opt.MultipartParams.Add("block_list", `["5910a591dd8fc18c32a8f3df4fdc1761", "a5fc157d78e6ad1c7e114b056c92821e"]`)
} else {
opt.MultipartParams.Add("block_list", `["5910a591dd8fc18c32a8f3df4fdc1761"]`)
}
var jsTokenRequested bool
var res api.ResponsePrecreate
for {
err := f.apiExec(ctx, opt, &res)
if err != nil {
if api.ErrIsNum(err, 4000023) && !jsTokenRequested {
jsTokenRequested = true
if err := f.apiJsToken(ctx); err != nil {
return nil, err
}
opt.Parameters.Set("jsToken", f.jsToken)
continue
}
return nil, err
}
break
}
return &res, nil
}
func (f *Fs) apiFileUploadChunk(ctx context.Context, path, uploadID string, chunkNumber int, size int64, data []byte, options []fs.OpenOption) (*api.ResponseUploadedChunk, error) {
opt := NewRequest(http.MethodPost, fmt.Sprintf("https://%s/rest/2.0/pcs/superfile2", f.uploadHost))
opt.Parameters.Add("method", "upload")
opt.Parameters.Add("path", path)
opt.Parameters.Add("uploadid", uploadID)
opt.Parameters.Add("partseq", fmt.Sprintf("%d", chunkNumber))
opt.Parameters.Add("uploadsign", "0")
opt.Options = options
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, bytes.NewReader(data), opt.MultipartParams, "file", "blob")
if err != nil {
return nil, fmt.Errorf("failed to make multipart upload for file: %w", err)
}
contentLength := overhead + size
opt.ContentLength = &contentLength
opt.ContentType = contentType
opt.Body = formReader
var res api.ResponseUploadedChunk
if err := f.apiExec(ctx, opt, &res); err != nil {
return nil, err
}
return &res, nil
}
func (f *Fs) apiFileCreate(ctx context.Context, path, uploadID string, size int64, modTime time.Time, blockList []string, overwriteMode uint8) error {
opt := NewRequest(http.MethodPost, "/api/create")
opt.Parameters.Add("isdir", "0")
// The file naming policy. The default value is 1. 0: Do not rename. If a file with the same name exists in the cloud, this call will fail and return a conflict; 1: Rename if there is any path conflict; 2: Rename only if there is a path conflict and the block_list is different; 3: Overwrite
opt.Parameters.Add("rtype", fmt.Sprintf("%d", overwriteMode))
if overwriteMode > 3 {
opt.Parameters.Set("rtype", "1")
}
opt.MultipartParams = url.Values{}
opt.MultipartParams.Add("path", path)
// opt.MultipartParams.Add("isdir", "0") // for dir create this param shold be in body, for upload in URL
// opt.MultipartParams.Add("rtype", "0") // for dir create this param shold be in body, for upload in URL
opt.MultipartParams.Add("local_mtime", fmt.Sprintf("%d", modTime.Unix()))
opt.MultipartParams.Add("uploadid", uploadID)
opt.MultipartParams.Add("size", fmt.Sprintf("%d", size))
dirPath, _ := libPath.Split(path)
opt.MultipartParams.Add("target_path", dirPath)
blockListStr, err := json.Marshal(blockList)
if err != nil {
return err
}
opt.MultipartParams.Add("block_list", string(blockListStr))
debug(f.opt, 3, "%+v", opt.MultipartParams)
var res api.ResponseDefault
return f.apiExec(ctx, opt, &res)
}

View File

@ -0,0 +1,176 @@
package api
import (
"fmt"
)
var (
errorsDescription = map[int]string{
// from API
2: "Required parameters are missing.",
105: "External link error",
100001: "The client_id or client_secret parameter is invalid.",
100002: "The code is invalid (invalid or expired code).",
200001: "Unsupported authorization type grant_type.",
200002: "Invalid access_token.",
200003: "The access_token has expired.",
200004: "Invalid refresh_token.",
200005: "The refresh_token has expired.",
300001: "The frequency of exchanging the code for the access_token is too high.",
400001: "The user has not yet completed the authorization operation for the device_code (the error code of the device code mode).",
500001: "Internal service exception.",
-7: "Invalid file name",
-8: "The file already exists",
-9: "The file doesn't exist or request parameter spd is incorrect.",
-12: "Error in extraction code",
// from web client
-1: "User name or password verification failed",
-2: "Back up",
-4: "Unknown error",
-5: "Unknown error",
-6: "Failed to login, please try again later ",
// -7: "Unable to access or the name is wrong",
// -8: "The file already exists in this directory ",
// -9: "The file doesn't exist or The request parameter spd is incorrect.",
-10: "Your space is insufficient",
-11: "The parent directory does not exist",
// -12: "Error in extraction code",
-14: "The account has been initialized",
-13: "The device has been bonded",
-19: "Please enter the verification code",
-21: "Cannot operate preference files",
-22: "Shared files cannot be renamed or moved",
-23: "Failed to operate database, please contact the administrator",
-24: "The files to cancel contain some that are not allowed to cancel",
-25: "Not beta user",
-26: "Invalid invitation code",
-32: "Your space is insufficient",
1: "System Error",
// 2: "Server Error, please try again later",
3: "No more than 100 files at a time",
4: "New file name error ",
5: "Illegal target directory",
7: "Illegal NS or no access",
8: "Illegal ID or no access",
9: "Failed to apply for the key",
10: "Unsuccessful superfile",
11: "Illegal user ID (or user name) or no access ",
12: "Some files already exist in target directory",
15: "Operation failed",
58: "Size of upload file more than allowed free plan limit (4GB)", // original message Upload single file vip limit
102: "Unable to access the directory",
103: "Incorrect password",
104: "Invalid cookie",
111: "You currently have unfinished tasks, please operate after completion",
121: "The number of files exceeds the limit, please delete your files until the number is below 5 million",
132: "Verify your identity to operate the files",
141: "Internal Error",
142: "You have been removed from this Shared Folder, thus you cannot continue",
301: "Other request error",
501: "Illegal format of the LIST",
618: "Failed request",
619: "PCS returns an error code",
600: "json error",
601: "Incorrect exception",
617: "Other error",
211: "Unable to access or being banned",
407: "Internal error",
31080: "Server error, please try again later",
31021: "Unable to access network, please check the network or try again later",
31075: "No more than 999 files at a time, please reduce the number",
31116: "Your space is insufficient",
31401: "The selected file will be canceled from the shared folder after it is moved, and the members cannot view it. Are you sure to continue?",
31034: "The frequency of operation is too soon, please try again later",
36009: "Insufficient user space",
36010: "The file doesn't exist",
36012: "Operation timed out, please try again later",
36013: "Cannot download, too many tasks are downloaded at the same time",
36014: "The storage path has been used",
36016: "Task deleted",
36017: "Task completed",
36019: "The task is being processed",
36018: "Failed to resolve,the torrent file is corrupted",
36020: "The task address doesn't exist",
36021: "A normal user can download 1 task at a time! Y You can download more by subscribing the offline download package",
36023: "A normal user can complete 5 offline downloading tasks only per month! You can download more by subscribing the offline download package",
36022: "Cannot download, too many tasks are downloaded at the same time",
36024: "The number of downloads in this month has reached the limit",
36025: "Expired link",
36026: "Link format error",
36028: "Unable to access relevant information",
36027: "Link format error",
36031: "Network busy, please try again later",
36001: "Network busy, please try again later",
36032: "Cannot download the offline files because they contain illegal contents",
9000: "TeraBox is not yet available in vm area",
36038: "Download is unavailable as requested by the copyright owner",
9001: "A Server Error has occurred, please try again later",
9002: "The frequency of complaint is too fast, please try again later",
10019: "Format Error",
10013: "You currently have unfinished tasks, please operate after completion",
10014: "No importable credentials detected",
// gotted with message
31208: "content_type is not exists",
31299: "Size of superfile2 first part should not be smaller than 4MB",
// third party [unofficial]
4000023: "need verify (jsToken expired)",
}
)
// Num2Err convert error number to error presence
func Num2Err(number int) error {
if number == 0 {
return nil
}
return ErrorAPI{number}
}
// ErrIsNum checking is provided error is Terabox error
func ErrIsNum(err error, number int) bool {
if e, ok := err.(ErrorInterface); ok {
return e.ErrorNumber() == number
}
return false
}
var _ ErrorInterface = ResponseDefault{}
// ErrorInterface universal Error Interface
type ErrorInterface interface {
ErrorNumber() int
Error() string
Err() error
}
// ErrorAPI - Terabox API error
type ErrorAPI struct {
ErrNumber int `json:"errno"`
}
// ErrorNumber return Terabox error number
func (err ErrorAPI) ErrorNumber() int {
return err.ErrNumber
}
// Error is Error Interface implementation
func (err ErrorAPI) Error() string {
if _, ok := errorsDescription[err.ErrNumber]; ok {
return errorsDescription[err.ErrNumber]
}
return fmt.Sprintf("Unknown error %d", err.ErrNumber)
}
// Err return the error if number of error not 0, otherwise nil
func (err ErrorAPI) Err() error {
if err.ErrNumber == 0 {
return nil
}
return err
}

View File

@ -0,0 +1,110 @@
// Package api provides types used by the Terabox API.
package api
// ResponseDefault - check only error
type ResponseDefault struct {
ErrorAPI
}
// ResponseItemInfo - data or dir information
type ResponseItemInfo struct {
ErrorAPI
List []*struct {
ErrorAPI
Item
} `json:"info"`
}
// ResponseList - list dir information
type ResponseList struct {
ErrorAPI
List []*Item `json:"list"`
}
// Item information
type Item struct {
ID uint64 `json:"fs_id"` // The file ID; the unique identification of the file in the cloud
Name string `json:"server_filename"`
Path string `json:"path"`
MD5 string `json:"md5"`
Size int64 `json:"size"`
Category int `json:"category"` // File type: 1: video; 2: audio; 3: image; 4: document; 5: application; 6: others; 7: seed
Isdir int `json:"isdir"`
Share int `json:"share"` // if share > 0,the file is shared
ServerCreateTime int64 `json:"server_ctime"` // The server-side creation time of the file [unix timestamp]
ServerModifiedTime int64 `json:"server_mtime"` // The server-side modification time of the file [unix timestamp]
DownloadLink string `json:"dlink,omitempty"` // Download link for Item Info response, not awailable for list
}
// OperationalItem operation on Item
type OperationalItem struct {
Path string `json:"path"`
Destination string `json:"dest,omitempty"`
NewName string `json:"newname,omitempty"`
OnDuplicate string `json:"ondup,omitempty"` // foor copy or move, can be `overwrite`, `newcopy` - will add `(1)` to filename
}
// ResponseOperational result of operation on Item
type ResponseOperational struct {
ErrorAPI
Info []struct {
ErrorAPI
Path string `json:"path"`
} `json:"info"`
}
// ResponseDownload contain Download link
type ResponseDownload struct {
ErrorAPI
DownloadLink []struct {
ID uint64 `json:"fs_id"`
URL string `json:"dlink"`
} `json:"dlink"`
FileInfo []struct {
Size int64 `json:"size"`
Name string `json:"filename"`
} `json:"file_info"`
}
// ResponseHomeInfo download file sign
type ResponseHomeInfo struct {
ErrorAPI
Data struct {
Sign1 string `json:"sign1"`
Sign3 string `json:"sign3"`
Timestamp int64 `json:"timestamp"`
} `json:"data"`
}
// ResponseQuota storage info
type ResponseQuota struct {
ErrorAPI
Total int64 `json:"total"`
Used int64 `json:"used"`
Free int64 `json:"free"`
Expire bool `json:"expire"`
SboxUsed int64 `json:"sbox_used"`
ServerTime int64 `json:"server_time"`
}
// ResponseFileLocateUpload host for file uploading
type ResponseFileLocateUpload struct {
ErrorAPI
Host string `json:"host"`
}
// ResponsePrecreate params of created file
type ResponsePrecreate struct {
ErrorAPI
UploadID string `json:"uploadid"`
Type int `json:"return_type"`
Path string `json:"path"`
BlockList []int64 `json:"block_list"`
}
// ResponseUploadedChunk information about uploaded chunk
type ResponseUploadedChunk struct {
UploadID string `json:"uploadid"`
PartSeq int `json:"partseq"`
MD5 string `json:"md5"`
}

734
backend/terabox/terabox.go Normal file
View File

@ -0,0 +1,734 @@
// Package terabox provides an interface to the Terabox storage system.
// resources for implementation:
// https://github.com/ivansaul/terabox_downloader
// https://gist.github.com/CypherpunkSamurai/58d8f2b669e101e893a6ecf3d3938412
// https://github.com/maiquocthinh/Terabox-DL
// https://github.com/fskhri/TeraboxDownloader
// https://github.com/AlistGo/alist/tree/main/drivers/terabox
//
// Documentation:
// https://www.terabox.com/integrations/docs?lang=en
package terabox
import (
"context"
"errors"
"fmt"
"io"
"net/http"
libPath "path"
"strconv"
"sync"
"time"
"github.com/rclone/rclone/backend/terabox/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/rest"
)
const (
baseURL = "https://www.terabox.com"
chunkSize int64 = 4 << 20 // 4MB
fileLimitSize int64 = 4 << 30 // 4GB
// minSleep = 400 * time.Millisecond // api is extremely rate limited now
// maxSleep = 5 * time.Second
// decayConstant = 2 // bigger for slower decay, exponential
// attackConstant = 0 // start with max sleep
)
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)
func init() {
fs.Register(&fs.RegInfo{
Name: "terabox",
Description: "Terabox",
NewFs: NewFs,
Options: []fs.Option{
// {
// Help: "Your access token.",
// Name: "access_token",
// Sensitive: true,
// },
{
Help: "Set cookie (should contains TSID; ndus; ndut_fmt)",
Name: "cookie",
Advanced: false,
Required: true,
},
{
Help: "Clear Recycle Bin after deletion",
Name: "delete_permanently",
Advanced: true,
Default: false,
},
{
Help: "Set custom header User Agent",
Name: "user_agent",
Advanced: true,
Default: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
},
{
Help: "Set extra debug level from 0 to 3 (0 - none; 1 - name of function and params; 2 - response output; 3 - request output)",
Name: "debug_level",
Advanced: true,
Default: 0,
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// maxFileLength = 255
Default: (encoder.Display |
encoder.EncodeBackQuote |
encoder.EncodeDoubleQuote |
encoder.EncodeLtGt |
encoder.EncodeLeftSpace |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
// AccessToken string `config:"access_token"`
Cookie string `config:"cookie"`
DeletePermanently bool `config:"delete_permanently"`
UserAgent string `config:"user_agent"`
DebugLevel uint8 `config:"debug"`
Enc encoder.MultiEncoder `config:"encoding"`
}
//
//------------------------------------------------------------------------------------------------------------------------
//
// Fs is the interface a cloud storage system must provide
type Fs struct {
root string
name string
opt *Options
features *fs.Features
client *rest.Client
// pacer *fs.Pacer
origRoot string
origRootItem *api.Item
baseURL string
notFirstRun bool
// sign for download should be got only once
signs []string
signsMX sync.Once
// upload host should be got only once
uploadHost string
uploadHostMX sync.Once
// official access [added for future releases]
accessToken string
// unofficial access [web token required for upload]
jsToken string
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
//
// copyto: for srcFS the root is with the file name, for dstFS last segment (filename) will be cutted
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
if err := configstruct.Set(config, opt); err != nil {
return nil, err
}
debug(opt, 1, "NewFS %s; %s; %+v;", name, root, opt)
if root == "" {
root = "/"
} else if root[0:1] == "." {
root = root[1:]
}
f := &Fs{
name: name,
root: root,
origRoot: root, // save origin root, because it can change, if path is file
opt: opt,
// pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
baseURL: baseURL,
// jsToken: "",
}
f.features = (&fs.Features{
ReadMetadata: true,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
newCtx, clientConfig := fs.AddConfig(ctx)
if opt.UserAgent != "" {
clientConfig.UserAgent = opt.UserAgent
}
f.client = rest.NewClient(fshttp.NewClient(newCtx))
// update base url for official API Requests [not finished, some methods should be update for compatible]
if f.accessToken != "" {
f.baseURL += "/open"
}
// the root exists ever, have no reason to check it
if root != "/" {
var err error
// cache the item, for do not request the same data, when will make NewObject on next step, if item is nil, then file or dir not found and we can skip request a List or NewObject
if f.origRootItem, err = f.apiItemInfo(ctx, root, true); err != nil {
if !api.ErrIsNum(err, -9) {
return nil, err
}
} else if f.origRootItem.Isdir == 0 {
f.root = libPath.Dir(root)
// return an error with an fs which points to the parent of file
return f, fs.ErrorIsFile
}
} else {
// check the account is active
if err := f.apiCheckLogin(ctx); err != nil {
return nil, err
}
}
return f, nil
}
//
// fs.Info interface implementation
//------------------------------------------------------------------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Terabox root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
//
// fs.Abouter Interface implementation [optional]
//------------------------------------------------------------------------------------------------------------------------
// About gets quota information from the Fs.
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
debug(f.opt, 1, "About")
info, err := f.apiQuotaInfo(ctx)
if err != nil {
return nil, err
}
return &fs.Usage{Total: &info.Total, Used: &info.Used, Free: &info.Free}, nil
}
//
// fs.Fs interface implementation
//------------------------------------------------------------------------------------------------------------------------
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
debug(f.opt, 1, "List %s;", dir)
if f.origRootItem == nil {
return nil, fs.ErrorDirNotFound
}
list, err := f.apiList(ctx, libPath.Join(f.root, dir))
if err != nil {
if api.ErrIsNum(err, -9) {
return nil, fs.ErrorDirNotFound
}
return nil, err
}
for _, item := range list {
remote := libPath.Join(dir, f.opt.Enc.ToStandardName(item.Name))
if item.Isdir > 0 {
dir := fs.NewDir(remote, time.Time{}).SetID(strconv.FormatUint(item.ID, 10))
entries = append(entries, dir)
} else {
file := &Object{
fs: f,
id: item.ID,
remote: remote,
size: item.Size,
modTime: time.Unix(item.ServerModifiedTime, 0),
hash: item.MD5,
}
entries = append(entries, file)
}
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it
// returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
debug(f.opt, 1, "NewObject %s;", remote)
var item *api.Item
if f.origRoot == libPath.Join(f.root, remote) {
if f.origRootItem == nil {
return nil, fs.ErrorObjectNotFound
}
item = f.origRootItem
} else {
var err error
item, err = f.apiItemInfo(ctx, libPath.Join(f.root, remote), true)
if err != nil {
if api.ErrIsNum(err, -9) {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
}
if item.Isdir > 0 {
return nil, fs.ErrorIsDir
}
return &Object{
fs: f,
id: item.ID,
remote: remote,
size: item.Size,
modTime: time.Unix(item.ServerModifiedTime, 0),
hash: item.MD5,
downloadLink: item.DownloadLink,
}, nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
debug(f.opt, 1, "Put %p; %+v; %+v;", in, src, options)
if src.Size() < 0 {
return nil, errors.New("refusing to update with unknown size")
}
if err := f.apiFileUpload(ctx, libPath.Join(f.root, src.Remote()), src.Size(), src.ModTime(ctx), in, options, 0); err != nil {
return nil, err
}
return f.NewObject(ctx, src.Remote())
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
debug(f.opt, 1, "Mkdir %s;", dir)
pth := libPath.Join(f.root, dir)
if pth == "" || pth == "." || pth == "/" {
return nil
}
if err := f.apiMkDir(ctx, pth); err != nil && !api.ErrIsNum(err, -8) {
return err
}
return nil
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return fs.ErrorNotImplemented
// f.apiOperation(ctx, "delete", []api.OperationalItem{
// {Path: libPath.Join(f.root, dir)},
// })
}
//
// fs.PutUncheckeder Interface implementation [optional]
//------------------------------------------------------------------------------------------------------------------------
// PutUnchecked put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
debug(f.opt, 1, "PutUnchecked %p; %+v; %+v;", in, src, options)
if src.Size() < 0 {
return nil, errors.New("refusing to update with unknown size")
}
if err := f.apiFileUpload(ctx, libPath.Join(f.root, src.Remote()), src.Size(), src.ModTime(ctx), in, options, 1); err != nil {
return nil, err
}
return f.NewObject(ctx, src.Remote())
}
//
// fs.Copier Interface implementation [optional]
//------------------------------------------------------------------------------------------------------------------------
// Copy src to this remote using server side operations.
// It returns the destination Object and a possible error
//
// # Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
debug(f.opt, 1, "Copy %+v; %s;", src, remote)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcPath := libPath.Join(srcObj.fs.root, srcObj.remote)
if f.origRootItem == nil {
if err := f.apiMkDir(ctx, f.root); err != nil && !api.ErrIsNum(err, -8) {
return nil, err
}
}
if err := f.apiOperation(ctx, "copy", []api.OperationalItem{{Path: srcPath, Destination: f.root, NewName: remote}}); err != nil {
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
return f.NewObject(ctx, remote)
}
//
// fs.Mover Interface implementation [optional]
//------------------------------------------------------------------------------------------------------------------------
// Move src to this remote using server side move operations.
// This is stored with the remote path given
// It returns the destination Object and a possible error
//
// # Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
debug(f.opt, 1, "Move %+v; %s;", src, remote)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcPath := libPath.Join(srcObj.fs.root, srcObj.remote)
if f.origRootItem == nil {
if err := f.apiMkDir(ctx, f.root); err != nil && !api.ErrIsNum(err, -8) {
return nil, err
}
}
if err := f.apiOperation(ctx, "move", []api.OperationalItem{{Path: srcPath, Destination: f.root, NewName: remote}}); err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err)
}
return f.NewObject(ctx, remote)
}
//
// fs.DirMove Interface implementation [optional]
//------------------------------------------------------------------------------------------------------------------------
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
debug(f.opt, 1, "DirMove %+v; %s; %s;", src, srcRemote, dstRemote)
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := libPath.Join(srcFs.root, srcRemote)
if srcFs.origRootItem == nil {
return fmt.Errorf("source directory not found")
}
_, name := libPath.Split(srcPath)
if name == "" {
return fmt.Errorf("couldn't move root directory")
}
if f.origRootItem == nil {
if err := f.apiMkDir(ctx, f.root); err != nil && !api.ErrIsNum(err, -8) {
return err
}
}
dstPath := libPath.Join(f.root, dstRemote)
if err := f.apiOperation(ctx, "move", []api.OperationalItem{{Path: srcPath, Destination: dstPath, NewName: name}}); err != nil {
return fmt.Errorf("couldn't move directory: %w", err)
}
return nil
}
// fs.Purger Interface implementation [optional]
// ------------------------------------------------------------------------------------------------------------------------
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
debug(f.opt, 1, "Purge %s;", dir)
return f.apiOperation(ctx, "delete", []api.OperationalItem{
{Path: libPath.Join(f.root, dir)},
})
}
//
// fs.Cleaner Interface implementation [optional]
//------------------------------------------------------------------------------------------------------------------------
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
debug(f.opt, 1, "CleanUp")
return f.apiCleanRecycleBin(ctx)
}
//
//
//
// Object represents an Terabox object
type Object struct {
fs *Fs // what this object is part of
id uint64 // file id
remote string // The remote path
size int64 // Bytes in the object
modTime time.Time // Modified time of the object
hash string // md5
downloadLink string // download link, available only for for objects which created by NewObject method
}
//
// ObjectInfo Interface implementation
//------------------------------------------------------------------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return o.hash, nil
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
//
// fs.IDer Interface implementation [optional]
//------------------------------------------------------------------------------------------------------------------------
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return fmt.Sprintf("%d", o.id)
}
//
// Object Interface implementation
//------------------------------------------------------------------------------------------------------------------------
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
debug(o.fs.opt, 1, "Object Open %+v;", options)
if o.downloadLink == "" {
if item, err := o.fs.apiItemInfo(ctx, o.remote, true); err == nil && item.DownloadLink != "" {
o.downloadLink = item.DownloadLink
}
}
if o.downloadLink == "" {
if res, err := o.fs.apiDownloadLink(ctx, o.id); err != nil {
return nil, err
} else if len(res.DownloadLink) > 0 {
o.downloadLink = res.DownloadLink[0].URL
}
}
if o.downloadLink == "" {
return nil, fs.ErrorObjectNotFound
}
fs.FixRangeOption(options, o.size)
resp, err := o.fs.client.Call(ctx, &rest.Opts{Method: http.MethodGet, RootURL: o.downloadLink, Options: options})
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size.
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
debug(o.fs.opt, 1, "Object Update %p; %+v; %+v;", in, src, options)
if src.Size() < 0 {
return errors.New("refusing to update with unknown size")
}
if err := o.fs.apiFileUpload(ctx, libPath.Join(o.fs.root, o.remote), src.Size(), src.ModTime(ctx), in, options, 3); err != nil {
return err
}
// Fetch new object after deleting the duplicate
newO, err := o.fs.NewObject(ctx, o.Remote())
if err != nil {
return err
}
// Replace guts of old object with new one
*o = *newO.(*Object)
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
debug(o.fs.opt, 1, "Remove")
return o.fs.apiOperation(ctx, "delete", []api.OperationalItem{
{Path: o.remote},
})
}

View File

@ -0,0 +1,21 @@
// Test Terabox filesystem interface
package terabox_test
import (
"testing"
"github.com/rclone/rclone/backend/terabox"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestTerabox:"
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*terabox.Object)(nil),
})
}

79
backend/terabox/util.go Normal file
View File

@ -0,0 +1,79 @@
package terabox
import (
"encoding/base64"
"fmt"
"net/url"
"regexp"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// NewRequest init new request params
func NewRequest(method, path string) *rest.Opts {
if strings.HasPrefix(path, "https://") {
return &rest.Opts{Method: method, RootURL: path, Parameters: url.Values{}}
}
return &rest.Opts{Method: method, Path: path, Parameters: url.Values{}}
}
// IsInSlice check is slice contain the elem
func IsInSlice[T comparable](v T, list []T) bool {
if list == nil {
return false
}
for i := 0; i < len(list); i++ {
if list[i] == v {
return true
}
}
return false
}
func debug(opt *Options, level uint8, str string, args ...any) {
if opt.DebugLevel < level {
return
}
fs.Debugf(nil, str, args...)
}
func getStrBetween(raw, start, end string) string {
regexPattern := fmt.Sprintf(`%s(.*?)%s`, regexp.QuoteMeta(start), regexp.QuoteMeta(end))
regex := regexp.MustCompile(regexPattern)
matches := regex.FindStringSubmatch(raw)
if len(matches) < 2 {
return ""
}
mid := matches[1]
return mid
}
func sign(s1, s2 string) string {
var a = make([]int, 256)
var p = make([]int, 256)
var o []byte
var v = len(s1)
for q := 0; q < 256; q++ {
a[q] = int(s1[(q % v) : (q%v)+1][0])
p[q] = q
}
for u, q := 0, 0; q < 256; q++ {
u = (u + p[q] + a[q]) % 256
p[q], p[u] = p[u], p[q]
}
for i, u, q := 0, 0, 0; q < len(s2); q++ {
i = (i + 1) % 256
u = (u + p[i]) % 256
p[i], p[u] = p[u], p[i]
k := p[((p[i] + p[u]) % 256)]
o = append(o, byte(int(s2[q])^k))
}
return base64.StdEncoding.EncodeToString(o)
}

View File

@ -84,6 +84,7 @@ docs = [
"smb.md",
"storj.md",
"sugarsync.md",
"terabox.md",
"ulozto.md",
"uptobox.md",
"union.md",

View File

@ -187,6 +187,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
{{< provider name="Synology" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
{{< provider name="Terabox" home="https://www.terabox.com/" config="/terabox/" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Uloz.to" home="https://uloz.to" config="/ulozto/" >}}
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}

View File

@ -83,6 +83,7 @@ See the following for detailed instructions for
* [SMB](/smb/)
* [Storj](/storj/)
* [SugarSync](/sugarsync/)
* [Terabox](/terabox/)
* [Union](/union/)
* [Uloz.to](/ulozto/)
* [Uptobox](/uptobox/)

View File

@ -62,6 +62,7 @@ Here is an overview of the major features of each cloud storage system.
| SMB | - | R/W | Yes | No | - | - |
| SugarSync | - | - | No | No | - | - |
| Storj | - | R | No | No | - | - |
| Terabox | MD5 | DR/W | No | No | - | - |
| Uloz.to | MD5, SHA256 ¹³ | - | No | Yes | - | - |
| Uptobox | - | - | No | Yes | - | - |
| WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - |
@ -542,6 +543,7 @@ upon backend-specific capabilities.
| SMB | No | No | Yes | Yes | No | No | Yes | Yes | No | No | Yes |
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
| Storj | Yes ² | Yes | Yes | No | No | Yes | Yes | No | Yes | No | No |
| Terabox | Yes | Yes | Yes | Yes | Yes | No | No | No | No | Yes | Yes |
| Uloz.to | No | No | Yes | Yes | No | No | No | No | No | No | Yes |
| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | No |
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes |

190
docs/content/terabox.md Normal file
View File

@ -0,0 +1,190 @@
---
title: "Terabox"
description: "Rclone docs for Terabox"
versionIntroduced: "v1.70"
---
# {{< icon "fa fa-inbox" >}} Terabox
This is a Backend for Terabox Cloud storage (alpha).
**Notice** This integrations is not official, because official integration required special `client id` and `client secret` which only can be provided by Terabox. I made a request for this, but did not got a responce, yet. If Terabox will provide the keys for official integration, will update the integration.
Paths are specified as `remote:path` or `remote:/path`
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
## Limitations
For now max upload filesize is 4GB.
Download in multithread mode and single thread mode has the same speed result. Got about 1.7 - 1.9 MB/s for standart multithread download and the same result for one thread (with key`--multi-thread-streams 1`)
Looks like cookie live period is 1 year, I start develop three weeks ago, then two weeks I was busy, after a two weeks delay, all still works.
## Configuration
To configure an Terabox backend you need provide cookie from your logined account. Three variables need to be provided `TSID`, `ndus`, `ndut_fmt` separated by semicolon.
How to obtain the cookie:
* Chrome extension:
1) Install extension `Cookie viewer`: https://chromewebstore.google.com/detail/dedhcncdjkmjpebfohadfeeaopiponca?utm_source=item-share-cb
2) Go to your storage: https://www.terabox.com/main
3) On search panel exrtensions click to Cookie viewer and copy this three values: `TSID`, `ndus`, `ndut_fmt` insert it one by one to some text document and separate by semicolon, then copy result string for configuration, you should got something like `TSID=xxx; ndus=xxx; ndut_fmt=xxx`
4) Remove extension `Cookie viewer`
* Chrome dev:
1) Go to your storage https://www.terabox.com/main
2) Right top corner `three dots` > `More tools` > `Developer tools` (**Note** The page will be automaticaly redirected to `about:blank`, no worries, it's ok.)
3) Select `Network` tab and check `Preserve log`
4) Push to `Go Back` button on address panel
5) Select any request and scroll `Headers` tab to down under `Request headers` and copy cookie value.
6) Close dev tools
Here is an example of how to make a remote called `remote` with the default setup.
First run:
rclone config
This will guide you through an interactive setup process:
```
e) Edit existing remote
n) New remote
d) Delete remote
r) Rename remote
c) Copy remote
s) Set configuration password
q) Quit config
e/n/d/r/c/s/q> n
name> remote
Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
[...]
52 / Terabox
\ "terabox"
[...]
Storage> terabox
Option cookie.
Set cookie (should contains TSID; ndus; ndut_fmt)
Enter a value.
cookie> TSID=xxx; ndus=xxx; ndut_fmt=xxx
Edit advanced config? (y/n)
y) Yes
n) No (default)
y/n> n
Configuration complete.
--------------------
[terabox]
type = terabox
cookie = "TSID=xxx; ndus=xxx; ndut_fmt=xxx"
--------------------
Keep this "remote" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d>
```
Once configured you can then use `remote` like this,
List directories in top level of your Terabox
rclone lsd remote:
List all the files in your Terabox
rclone ls remote:
To copy a local directory to a Terabox directory called backup
rclone copy /home/source remote:backup
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/terabox/terabox.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to Terabox.
#### --terabox-cookie
Your cookie.
Properties:
- Config: cookie
- Env Var: RCLONE_TERABOX_COOKIE
- Type: string
- Required: true
### Advanced options
Here are the Advanced options specific to Terabox.
#### --terabox-delete-permanently
Set if you want to clean Trash bin after file deleted.
Properties:
- Config: delete_permanently
- Env Var: RCLONE_TERABOX_DELETE_PERMANENTLY
- Type: bool
- Default: false
#### --terabox-user-agent
Set if you want to to change default user agent.
Properties:
- Config: user_agent
- Env Var: RCLONE_TERABOX_USER_AGENT
- Type: string
- Default: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
#### --terabox-debug-level
Set extra verbose level from 0 to 3.
0 - none
1 - Caled functions and params
2 - response from terabox (params and body)
3 - request to terabox (params)
Properties:
- Config: debug_level
- Env Var: RCLONE_TERABOX_DEBUG_LEVEL
- Type: uint8
- Default: 0,
#### --terabox-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_TERABOX_ENCODING
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot
#### --terabox-description
Description of the remote.
Properties:
- Config: description
- Env Var: RCLONE_TERABOX_DESCRIPTION
- Type: string
- Required: false
{{< rem autogenerated options stop >}}

View File

@ -105,6 +105,7 @@
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
<a class="dropdown-item" href="/terabox/"><i class="fas fa-inbox fa-fw"></i> Terabox</a>
<a class="dropdown-item" href="/ulozto/"><i class="fas fa-angle-double-down fa-fw"></i> Uloz.to</a>
<a class="dropdown-item" href="/uptobox/"><i class="fa fa-archive fa-fw"></i> Uptobox</a>
<a class="dropdown-item" href="/union/"><i class="fa fa-link fa-fw"></i> Union (merge backends)</a>