Merge remote-tracking branch 'alist/main' into perf

This commit is contained in:
j2rong4cn 2025-04-09 21:14:23 +08:00
commit c888c4c6ed
31 changed files with 1054 additions and 123 deletions

View File

@ -77,6 +77,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] [Dropbox](https://www.dropbox.com/)
- [x] [FeijiPan](https://www.feijipan.com/)
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] Easy to deploy and out-of-the-box
- [x] File preview (PDF, markdown, code, plain text, ...)
- [x] Image preview in gallery mode

View File

@ -2,6 +2,7 @@ package _139
import (
"context"
"encoding/xml"
"fmt"
"io"
"net/http"
@ -729,14 +730,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
break
}
}
var reportSize int64
if d.ReportRealSize {
reportSize = stream.GetSize()
} else {
reportSize = 0
}
data := base.Json{
"manualRename": 2,
"operation": 0,
"fileCount": 1,
"totalSize": 0, // 去除上传大小限制
"totalSize": reportSize,
"uploadContentList": []base.Json{{
"contentName": stream.GetName(),
"contentSize": 0, // 去除上传大小限制
"contentSize": reportSize,
// "digest": "5a3231986ce7a6b46e408612d385bafa"
}},
"parentCatalogID": dstDir.GetID(),
@ -754,10 +761,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
"operation": 0,
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
"seqNo": random.String(32), //序列号不能为空
"totalSize": 0,
"totalSize": reportSize,
"uploadContentList": []base.Json{{
"contentName": stream.GetName(),
"contentSize": 0,
"contentSize": reportSize,
// "digest": "5a3231986ce7a6b46e408612d385bafa"
}},
})
@ -768,6 +775,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
if resp.Data.Result.ResultCode != "0" {
return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc)
}
size := stream.GetSize()
// Progress
@ -806,13 +816,23 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("%+v", res)
if res.StatusCode != http.StatusOK {
res.Body.Close()
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
bodyBytes, err := io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("error reading response body: %v", err)
}
var result InterLayerUploadResult
err = xml.Unmarshal(bodyBytes, &result)
if err != nil {
return fmt.Errorf("error parsing XML: %v", err)
}
if result.ResultCode != 0 {
return fmt.Errorf("upload failed with result code: %d, message: %s", result.ResultCode, result.Msg)
}
}
return nil
default:
return errs.NotImplement

View File

@ -12,6 +12,7 @@ type Addition struct {
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
CloudID string `json:"cloud_id"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
}
var config = driver.Config{

View File

@ -143,6 +143,13 @@ type UploadResp struct {
} `json:"data"`
}
type InterLayerUploadResult struct {
XMLName xml.Name `xml:"result"`
Text string `xml:",chardata"`
ResultCode int `xml:"resultCode"`
Msg string `xml:"msg"`
}
type CloudContent struct {
ContentID string `json:"contentID"`
//Modifier string `json:"modifier"`

View File

@ -16,6 +16,7 @@ import (
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
_ "github.com/alist-org/alist/v3/drivers/azure_blob"
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
_ "github.com/alist-org/alist/v3/drivers/baidu_share"

View File

@ -0,0 +1,313 @@
package azure_blob
import (
"context"
"fmt"
"io"
"path"
"regexp"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
)
// Azure Blob Storage based on the blob APIs
// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api
type AzureBlob struct {
model.Storage
Addition
client *azblob.Client
containerClient *container.Client
config driver.Config
}
// Config returns the driver configuration.
func (d *AzureBlob) Config() driver.Config {
return d.config
}
// GetAddition returns additional settings specific to Azure Blob Storage.
func (d *AzureBlob) GetAddition() driver.Additional {
return &d.Addition
}
// Init initializes the Azure Blob Storage client using shared key authentication.
func (d *AzureBlob) Init(ctx context.Context) error {
// Validate the endpoint URL
accountName := extractAccountName(d.Addition.Endpoint)
if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) {
return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only")
}
credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey)
if err != nil {
return fmt.Errorf("failed to create credential: %w", err)
}
// Check if Endpoint is just account name
endpoint := d.Addition.Endpoint
if accountName == endpoint {
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
}
// Initialize Azure Blob client with retry policy
client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential,
&azblob.ClientOptions{ClientOptions: azcore.ClientOptions{
Retry: policy.RetryOptions{
MaxRetries: MaxRetries,
RetryDelay: RetryDelay,
},
}})
if err != nil {
return fmt.Errorf("failed to create client: %w", err)
}
d.client = client
// Ensure container exists or create it
containerName := strings.Trim(d.Addition.ContainerName, "/ \\")
if containerName == "" {
return fmt.Errorf("container name cannot be empty")
}
return d.createContainerIfNotExists(ctx, containerName)
}
// Drop releases resources associated with the Azure Blob client.
func (d *AzureBlob) Drop(ctx context.Context) error {
d.client = nil
return nil
}
// List retrieves blobs and directories under the specified path.
func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
prefix := ensureTrailingSlash(dir.GetPath())
pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
Prefix: &prefix,
})
var objs []model.Obj
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list blobs: %w", err)
}
// Process directories
for _, blobPrefix := range page.Segment.BlobPrefixes {
objs = append(objs, &model.Object{
Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")),
Path: *blobPrefix.Name,
Modified: *blobPrefix.Properties.LastModified,
Ctime: *blobPrefix.Properties.CreationTime,
IsFolder: true,
})
}
// Process files
for _, blob := range page.Segment.BlobItems {
if strings.HasSuffix(*blob.Name, "/") {
continue
}
objs = append(objs, &model.Object{
Name: path.Base(*blob.Name),
Path: *blob.Name,
Size: *blob.Properties.ContentLength,
Modified: *blob.Properties.LastModified,
Ctime: *blob.Properties.CreationTime,
IsFolder: false,
})
}
}
return objs, nil
}
// Link generates a temporary SAS URL for accessing a blob.
func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
blobClient := d.containerClient.NewBlobClient(file.GetPath())
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
if err != nil {
return nil, fmt.Errorf("failed to generate SAS URL: %w", err)
}
return &model.Link{URL: sasURL}, nil
}
// MakeDir creates a virtual directory by uploading an empty blob as a marker.
func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
dirPath := path.Join(parentDir.GetPath(), dirName)
if err := d.mkDir(ctx, dirPath); err != nil {
return nil, fmt.Errorf("failed to create directory marker: %w", err)
}
return &model.Object{
Path: dirPath,
Name: dirName,
IsFolder: true,
}, nil
}
// Move relocates an object (file or directory) to a new directory.
func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
srcPath := srcObj.GetPath()
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
return nil, fmt.Errorf("move operation failed: %w", err)
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Modified: time.Now(),
IsFolder: srcObj.IsDir(),
Size: srcObj.GetSize(),
}, nil
}
// Rename changes the name of an existing object.
func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
srcPath := srcObj.GetPath()
dstPath := path.Join(path.Dir(srcPath), newName)
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
return nil, fmt.Errorf("rename operation failed: %w", err)
}
return &model.Object{
Path: dstPath,
Name: newName,
Modified: time.Now(),
IsFolder: srcObj.IsDir(),
Size: srcObj.GetSize(),
}, nil
}
// Copy duplicates an object (file or directory) to a specified destination directory.
func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
// Handle directory copying using flat listing
if srcObj.IsDir() {
srcPrefix := srcObj.GetPath()
srcPrefix = ensureTrailingSlash(srcPrefix)
// Get all blobs under the source directory
blobs, err := d.flattenListBlobs(ctx, srcPrefix)
if err != nil {
return nil, fmt.Errorf("failed to list source directory contents: %w", err)
}
// Process each blob - copy to destination
for _, blob := range blobs {
// Skip the directory marker itself
if *blob.Name == srcPrefix {
continue
}
// Calculate relative path from source
relPath := strings.TrimPrefix(*blob.Name, srcPrefix)
itemDstPath := path.Join(dstPath, relPath)
if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") {
// Create directory marker at destination
err := d.mkDir(ctx, itemDstPath)
if err != nil {
return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
}
} else {
// Copy the blob
if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil {
return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err)
}
}
}
// Create directory marker at destination if needed
if len(blobs) == 0 {
err := d.mkDir(ctx, dstPath)
if err != nil {
return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
}
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Modified: time.Now(),
IsFolder: true,
}, nil
}
// Copy a single file
if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil {
return nil, fmt.Errorf("failed to copy blob: %w", err)
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: time.Now(),
IsFolder: false,
}, nil
}
// Remove deletes a specified blob or recursively deletes a directory and its contents.
func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error {
path := obj.GetPath()
// Handle recursive directory deletion
if obj.IsDir() {
return d.deleteFolder(ctx, path)
}
// Delete single file
return d.deleteFile(ctx, path, false)
}
// Put uploads a file stream to Azure Blob Storage with progress tracking.
func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
blobPath := path.Join(dstDir.GetPath(), stream.GetName())
blobClient := d.containerClient.NewBlockBlobClient(blobPath)
// Determine optimal upload options based on file size
options := optimizedUploadOptions(stream.GetSize())
// Track upload progress
progressTracker := &progressTracker{
total: stream.GetSize(),
updateProgress: up,
}
// Wrap stream to handle context cancellation and progress tracking
limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker))
// Upload the stream to Azure Blob Storage
_, err := blobClient.UploadStream(ctx, limitedStream, options)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
return &model.Object{
Path: blobPath,
Name: stream.GetName(),
Size: stream.GetSize(),
Modified: time.Now(),
IsFolder: false,
}, nil
}
// The following methods related to archive handling are not implemented yet.
// func (d *AzureBlob) GetArchiveMeta(...) {...}
// func (d *AzureBlob) ListArchive(...) {...}
// func (d *AzureBlob) Extract(...) {...}
// func (d *AzureBlob) ArchiveDecompress(...) {...}
// Ensure AzureBlob implements the driver.Driver interface.
var _ driver.Driver = (*AzureBlob)(nil)

View File

@ -0,0 +1,27 @@
package azure_blob
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
Endpoint string `json:"endpoint" required:"true" default:"https://<accountname>.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."`
AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"`
ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"`
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."`
}
var config = driver.Config{
Name: "Azure Blob Storage",
LocalSort: true,
CheckStatus: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &AzureBlob{
config: config,
}
})
}

View File

@ -0,0 +1,20 @@
package azure_blob
import "github.com/alist-org/alist/v3/internal/driver"
// progressTracker is used to track upload progress
type progressTracker struct {
total int64
current int64
updateProgress driver.UpdateProgress
}
// Write implements io.Writer to track progress
func (pt *progressTracker) Write(p []byte) (n int, err error) {
n = len(p)
pt.current += int64(n)
if pt.updateProgress != nil && pt.total > 0 {
pt.updateProgress(float64(pt.current) * 100 / float64(pt.total))
}
return n, nil
}

401
drivers/azure_blob/util.go Normal file
View File

@ -0,0 +1,401 @@
package azure_blob
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"path"
"sort"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
log "github.com/sirupsen/logrus"
)
const (
// MaxRetries defines the maximum number of retry attempts for Azure operations
MaxRetries = 3
// RetryDelay defines the base delay between retries
RetryDelay = 3 * time.Second
// MaxBatchSize defines the maximum number of operations in a single batch request
MaxBatchSize = 128
)
// extractAccountName 从 Azure 存储 Endpoint 中提取账户名
func extractAccountName(endpoint string) string {
// 移除协议前缀
endpoint = strings.TrimPrefix(endpoint, "https://")
endpoint = strings.TrimPrefix(endpoint, "http://")
// 获取第一个点之前的部分(即账户名)
parts := strings.Split(endpoint, ".")
if len(parts) > 0 {
// to lower case
return strings.ToLower(parts[0])
}
return ""
}
// isNotFoundError checks if the error is a "not found" type error
func isNotFoundError(err error) bool {
var storageErr *azcore.ResponseError
if errors.As(err, &storageErr) {
return storageErr.StatusCode == 404
}
// Fallback to string matching for backwards compatibility
return err != nil && strings.Contains(err.Error(), "BlobNotFound")
}
// flattenListBlobs - Optimize blob listing to handle pagination better
func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) {
// Standardize prefix format
prefix = ensureTrailingSlash(prefix)
var blobItems []container.BlobItem
pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
Prefix: &prefix,
Include: container.ListBlobsInclude{
Metadata: true,
},
})
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list blobs: %w", err)
}
for _, blob := range page.Segment.BlobItems {
blobItems = append(blobItems, *blob)
}
}
return blobItems, nil
}
// batchDeleteBlobs - Simplify batch deletion logic
func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error {
if len(blobPaths) == 0 {
return nil
}
// Process in batches of MaxBatchSize
for i := 0; i < len(blobPaths); i += MaxBatchSize {
end := min(i+MaxBatchSize, len(blobPaths))
currentBatch := blobPaths[i:end]
// Create batch builder
batchBuilder, err := d.containerClient.NewBatchBuilder()
if err != nil {
return fmt.Errorf("failed to create batch builder: %w", err)
}
// Add delete operations
for _, blobPath := range currentBatch {
if err := batchBuilder.Delete(blobPath, nil); err != nil {
return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err)
}
}
// Submit batch
responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil)
if err != nil {
return fmt.Errorf("batch delete request failed: %w", err)
}
// Check responses
for _, resp := range responses.Responses {
if resp.Error != nil && !isNotFoundError(resp.Error) {
// 获取 blob 名称以提供更好的错误信息
blobName := "unknown"
if resp.BlobName != nil {
blobName = *resp.BlobName
}
return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error)
}
}
}
return nil
}
// deleteFolder recursively deletes a directory and all its contents
func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error {
// Ensure directory path ends with slash
prefix = ensureTrailingSlash(prefix)
// Get all blobs under the directory using flattenListBlobs
globs, err := d.flattenListBlobs(ctx, prefix)
if err != nil {
return fmt.Errorf("failed to list blobs for deletion: %w", err)
}
// If there are blobs in the directory, delete them
if len(globs) > 0 {
// 分离文件和目录标记
var filePaths []string
var dirPaths []string
for _, blob := range globs {
blobName := *blob.Name
if isDirectory(blob) {
// remove trailing slash for directory names
dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/"))
} else {
filePaths = append(filePaths, blobName)
}
}
// 先删除文件,再删除目录
if len(filePaths) > 0 {
if err := d.batchDeleteBlobs(ctx, filePaths); err != nil {
return err
}
}
if len(dirPaths) > 0 {
// 按路径深度分组
depthMap := make(map[int][]string)
for _, dir := range dirPaths {
depth := strings.Count(dir, "/") // 计算目录深度
depthMap[depth] = append(depthMap[depth], dir)
}
// 按深度从大到小排序
var depths []int
for depth := range depthMap {
depths = append(depths, depth)
}
sort.Sort(sort.Reverse(sort.IntSlice(depths)))
// 按深度逐层批量删除
for _, depth := range depths {
batch := depthMap[depth]
if err := d.batchDeleteBlobs(ctx, batch); err != nil {
return err
}
}
}
}
// 最后删除目录标记本身
return d.deleteEmptyDirectory(ctx, prefix)
}
// deleteFile deletes a single file or blob with better error handling
func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error {
blobClient := d.containerClient.NewBlobClient(path)
_, err := blobClient.Delete(ctx, nil)
if err != nil && !(isDir && isNotFoundError(err)) {
return err
}
return nil
}
// copyFile copies a single blob from source path to destination path
func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error {
srcBlob := d.containerClient.NewBlobClient(srcPath)
dstBlob := d.containerClient.NewBlobClient(dstPath)
// Use configured expiration time for SAS URL
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
if err != nil {
return fmt.Errorf("failed to generate source SAS URL: %w", err)
}
_, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil)
return err
}
// createContainerIfNotExists - Create container if not exists
// Clean up commented code
func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error {
serviceClient := d.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(containerName)
var options = service.CreateContainerOptions{}
_, err := containerClient.Create(ctx, &options)
if err != nil {
var responseErr *azcore.ResponseError
if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" {
return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err)
}
}
d.containerClient = containerClient
return nil
}
// mkDir creates a virtual directory marker by uploading an empty blob with metadata.
func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error {
dirPath := ensureTrailingSlash(fullDirName)
blobClient := d.containerClient.NewBlockBlobClient(dirPath)
// Upload an empty blob with metadata indicating it's a directory
_, err := blobClient.Upload(ctx, struct {
*bytes.Reader
io.Closer
}{
Reader: bytes.NewReader([]byte{}),
Closer: io.NopCloser(nil),
}, &blockblob.UploadOptions{
Metadata: map[string]*string{
"hdi_isfolder": to.Ptr("true"),
},
})
return err
}
// ensureTrailingSlash ensures the provided path ends with a trailing slash.
func ensureTrailingSlash(path string) string {
if !strings.HasSuffix(path, "/") {
return path + "/"
}
return path
}
// moveOrRename moves or renames blobs or directories from source to destination.
func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error {
if isDir {
// Normalize paths for directory operations
srcPath = ensureTrailingSlash(srcPath)
dstPath = ensureTrailingSlash(dstPath)
// List all blobs under the source directory
blobs, err := d.flattenListBlobs(ctx, srcPath)
if err != nil {
return fmt.Errorf("failed to list blobs: %w", err)
}
// Iterate and copy each blob to the destination
for _, item := range blobs {
srcBlobName := *item.Name
relPath := strings.TrimPrefix(srcBlobName, srcPath)
itemDstPath := path.Join(dstPath, relPath)
if isDirectory(item) {
// Create directory marker at destination
if err := d.mkDir(ctx, itemDstPath); err != nil {
return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
}
} else {
// Copy file blob to destination
if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil {
return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err)
}
}
}
// Handle empty directories by creating a marker at destination
if len(blobs) == 0 {
if err := d.mkDir(ctx, dstPath); err != nil {
return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
}
}
// Delete source directory and its contents
if err := d.deleteFolder(ctx, srcPath); err != nil {
log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err)
// Retry deletion once more and ignore the result
if err := d.deleteFolder(ctx, srcPath); err != nil {
log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err)
}
}
return nil
}
// Single file move or rename operation
if err := d.copyFile(ctx, srcPath, dstPath); err != nil {
return fmt.Errorf("failed to copy file: %w", err)
}
// Delete source file after successful copy
if err := d.deleteFile(ctx, srcPath, false); err != nil {
log.Errorf("Error deleting source file [%s]: %v", srcPath, err)
}
return nil
}
// optimizedUploadOptions returns the optimal upload options based on file size
func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions {
options := &azblob.UploadStreamOptions{
BlockSize: 4 * 1024 * 1024, // 4MB block size
Concurrency: 4, // Default concurrency
}
// For large files, increase block size and concurrency
if fileSize > 256*1024*1024 { // For files larger than 256MB
options.BlockSize = 8 * 1024 * 1024 // 8MB blocks
options.Concurrency = 8 // More concurrent uploads
}
// For very large files (>1GB)
if fileSize > 1024*1024*1024 {
options.BlockSize = 16 * 1024 * 1024 // 16MB blocks
options.Concurrency = 16 // Higher concurrency
}
return options
}
// isDirectory determines if a blob represents a directory
// Checks multiple indicators: path suffix, metadata, and content type
func isDirectory(blob container.BlobItem) bool {
// Check path suffix
if strings.HasSuffix(*blob.Name, "/") {
return true
}
// Check metadata for directory marker
if blob.Metadata != nil {
if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" {
return true
}
// Azure Storage Explorer and other tools may use different metadata keys
if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" {
return true
}
}
// Check content type (some tools mark directories with specific content types)
if blob.Properties != nil && blob.Properties.ContentType != nil {
contentType := strings.ToLower(*blob.Properties.ContentType)
if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") {
return true
}
}
return false
}
// deleteEmptyDirectory deletes a directory only if it's empty
func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error {
// Directory is empty, delete the directory marker
blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/"))
_, err := blobClient.Delete(ctx, nil)
// Also try deleting with trailing slash (for different directory marker formats)
if err != nil && isNotFoundError(err) {
blobClient = d.containerClient.NewBlobClient(dirPath)
_, err = blobClient.Delete(ctx, nil)
}
// Ignore not found errors
if err != nil && isNotFoundError(err) {
log.Infof("Directory [%s] not found during deletion: %v", dirPath, err)
return nil
}
return err
}

View File

@ -79,6 +79,8 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.DownloadAPI == "crack" {
return d.linkCrack(file, args)
} else if d.DownloadAPI == "crack_video" {
return d.linkCrackVideo(file, args)
}
return d.linkOfficial(file, args)
}

View File

@ -10,7 +10,7 @@ type Addition struct {
driver.RootPath
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
@ -19,6 +19,7 @@ type Addition struct {
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
}
var config = driver.Config{

View File

@ -17,7 +17,7 @@ type TokenErrResp struct {
type File struct {
//TkbindId int `json:"tkbind_id"`
//OwnerType int `json:"owner_type"`
//Category int `json:"category"`
Category int `json:"category"`
//RealCategory string `json:"real_category"`
FsId int64 `json:"fs_id"`
//OperId int `json:"oper_id"`

View File

@ -79,6 +79,12 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
return retry.Unrecoverable(err2)
}
}
if 31023 == errno && d.DownloadAPI == "crack_video" {
result = res.Body()
return nil
}
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
}
result = res.Body()
@ -131,7 +137,16 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
if len(resp.List) == 0 {
break
}
res = append(res, resp.List...)
if d.OnlyListVideoFile {
for _, file := range resp.List {
if file.Isdir == 1 || file.Category == 1 {
res = append(res, file)
}
}
} else {
res = append(res, resp.List...)
}
}
return res, nil
}
@ -187,6 +202,34 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link,
}, nil
}
func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
param := map[string]string{
"type": "VideoURL",
"path": fmt.Sprintf("%s", file.GetPath()),
"fs_id": file.GetID(),
"devuid": "0%1",
"clienttype": "1",
"channel": "android_15_25010PN30C_bd-netdisk_1523a",
"nom3u8": "1",
"dlink": "1",
"media": "1",
"origin": "dlna",
}
resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(param)
}, nil)
if err != nil {
return nil, err
}
return &model.Link{
URL: utils.Json.Get(resp, "info", "dlink").ToString(),
Header: http.Header{
"User-Agent": []string{d.CustomCrackUA},
},
}, nil
}
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
params := map[string]string{
"method": "filemanager",

View File

@ -162,6 +162,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
switch r.Policy.Type {
case "onedrive":
err = d.upOneDrive(ctx, stream, u, up)
case "s3":
err = d.upS3(ctx, stream, u, up)
case "remote": // 从机存储
err = d.upRemote(ctx, stream, u, up)
case "local": // 本机存储

View File

@ -21,11 +21,12 @@ type Policy struct {
}
type UploadInfo struct {
SessionID string `json:"sessionID"`
ChunkSize int `json:"chunkSize"`
Expires int `json:"expires"`
UploadURLs []string `json:"uploadURLs"`
Credential string `json:"credential,omitempty"`
SessionID string `json:"sessionID"`
ChunkSize int `json:"chunkSize"`
Expires int `json:"expires"`
UploadURLs []string `json:"uploadURLs"`
Credential string `json:"credential,omitempty"` // local
CompleteURL string `json:"completeURL,omitempty"` // s3
}
type DirectoryResp struct {

View File

@ -312,3 +312,82 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
}
return nil
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
etags = append(etags, res.Header.Get("ETag"))
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
// s3LikeFinishUpload
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
bodyBuilder := &strings.Builder{}
bodyBuilder.WriteString("<CompleteMultipartUpload>")
for i, etag := range etags {
bodyBuilder.WriteString(fmt.Sprintf(
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
i+1, // PartNumber 从 1 开始
etag,
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/xml")
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
}
// 上传成功发送回调请求
err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil)
if err != nil {
return err
}
return nil
}

View File

@ -55,9 +55,9 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
ID: child.ID,
Path: child.ParentID,
Name: child.Name,
Size: int64(child.Size),
Modified: time.Unix(int64(child.UpdateTime), 0),
Ctime: time.Unix(int64(child.CreateTime), 0),
Size: child.Size,
Modified: time.Unix(child.UpdateTime, 0),
Ctime: time.Unix(child.CreateTime, 0),
IsFolder: child.NodeType == 1,
},
Key: child.Key,

View File

@ -22,15 +22,15 @@ type NodeInfo struct {
Name string `json:"name"`
Key string `json:"key"`
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
Size int `json:"size"`
Size int64 `json:"size"`
Source int `json:"source"`
NameReviewStatus int `json:"name_review_status"`
ContentReviewStatus int `json:"content_review_status"`
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
}
type GetFileUrlResp struct {

View File

@ -69,7 +69,7 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
d.ClientVersion = PCClientVersion
d.PackageName = PCPackageName
d.Algorithms = PCAlgorithms
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
}
if d.Addition.CaptchaToken != "" && d.Addition.RefreshToken == "" {

View File

@ -28,34 +28,32 @@ import (
)
var AndroidAlgorithms = []string{
"7xOq4Z8s",
"QE9/9+IQco",
"WdX5J9CPLZp",
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
"cc44M+l7GDhav",
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
"wla81BUVSmDkctHDpUT",
"c6wMr1sm1WxiR3i8LDAm3W",
"hRLrEQCFNYi0PFPV",
"o1J41zIraDtJPNuhBu7Ifb/q3",
"U",
"RrbZvV0CTu3gaZJ56PVKki4IeP",
"NNuRbLckJqUp1Do0YlrKCUP",
"UUwnBbipMTvInA0U0E9",
"VzGc",
"SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx",
"nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl",
"Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA",
"VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz",
"u5ujk5sM62gpJOsB/1Gu/zsfgfZO",
"dXYIiBOAHZgzSruaQ2Nhrqc2im",
"z5jUTBSIpBN9g4qSJGlidNAutX6",
"KJE2oveZ34du/g1tiimm",
}
var WebAlgorithms = []string{
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
"uSUX02HYJ1IkyLdhINEFcCf7l2",
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
"3binT1s/5a1pu3fGsN",
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
"DYS3StqnAEKdGddRP8CJrxUSFh",
"crquW+4",
"ryKqvW9B9hly+JAymXCIfag5Z",
"Hr08T/NDTX1oSJfHk90c",
"i",
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
"+r6CQVxjzJV6LCV",
"F",
"pFJRC",
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
"/750aCr4lm/Sly/c",
"RB+DT/gZCrbV",
"",
"CyLsf7hdkIRxRm215hl",
"7xHvLi2tOYP0Y92b",
"ZGTXXxu8E/MIWaEDB+Sm/",
"1UI3",
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
"ihtqpG6FMt65+Xk+tWUH2",
"NhXXU9rg4XXdzo7u5o",
}
var PCAlgorithms = []string{
@ -80,17 +78,17 @@ const (
const (
AndroidClientID = "YNxT9w7GMdWvEOKa"
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
AndroidClientVersion = "1.49.3"
AndroidClientVersion = "1.53.2"
AndroidPackageName = "com.pikcloud.pikpak"
AndroidSdkVersion = "2.0.4.204101"
AndroidSdkVersion = "2.0.6.206003"
WebClientID = "YUMx5nI8ZU8Ap8pm"
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
WebClientVersion = "undefined"
WebClientVersion = "2.0.0"
WebPackageName = "drive.mypikpak.com"
WebSdkVersion = "8.0.3"
PCClientID = "YvtoWO6GNHiuCl7x"
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
PCClientVersion = "undefined" // 2.5.6.4831
PCClientVersion = "undefined" // 2.6.11.4955
PCPackageName = "mypikpak.com"
PCSdkVersion = "8.0.3"
)

View File

@ -66,7 +66,7 @@ func (d *PikPakShare) Init(ctx context.Context) error {
d.ClientVersion = PCClientVersion
d.PackageName = PCPackageName
d.Algorithms = PCAlgorithms
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
}
// 获取CaptchaToken

View File

@ -17,34 +17,32 @@ import (
)
var AndroidAlgorithms = []string{
"7xOq4Z8s",
"QE9/9+IQco",
"WdX5J9CPLZp",
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
"cc44M+l7GDhav",
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
"wla81BUVSmDkctHDpUT",
"c6wMr1sm1WxiR3i8LDAm3W",
"hRLrEQCFNYi0PFPV",
"o1J41zIraDtJPNuhBu7Ifb/q3",
"U",
"RrbZvV0CTu3gaZJ56PVKki4IeP",
"NNuRbLckJqUp1Do0YlrKCUP",
"UUwnBbipMTvInA0U0E9",
"VzGc",
"SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx",
"nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl",
"Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA",
"VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz",
"u5ujk5sM62gpJOsB/1Gu/zsfgfZO",
"dXYIiBOAHZgzSruaQ2Nhrqc2im",
"z5jUTBSIpBN9g4qSJGlidNAutX6",
"KJE2oveZ34du/g1tiimm",
}
var WebAlgorithms = []string{
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
"uSUX02HYJ1IkyLdhINEFcCf7l2",
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
"3binT1s/5a1pu3fGsN",
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
"DYS3StqnAEKdGddRP8CJrxUSFh",
"crquW+4",
"ryKqvW9B9hly+JAymXCIfag5Z",
"Hr08T/NDTX1oSJfHk90c",
"i",
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
"+r6CQVxjzJV6LCV",
"F",
"pFJRC",
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
"/750aCr4lm/Sly/c",
"RB+DT/gZCrbV",
"",
"CyLsf7hdkIRxRm215hl",
"7xHvLi2tOYP0Y92b",
"ZGTXXxu8E/MIWaEDB+Sm/",
"1UI3",
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
"ihtqpG6FMt65+Xk+tWUH2",
"NhXXU9rg4XXdzo7u5o",
}
var PCAlgorithms = []string{
@ -63,17 +61,17 @@ var PCAlgorithms = []string{
const (
AndroidClientID = "YNxT9w7GMdWvEOKa"
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
AndroidClientVersion = "1.49.3"
AndroidClientVersion = "1.53.2"
AndroidPackageName = "com.pikcloud.pikpak"
AndroidSdkVersion = "2.0.4.204101"
AndroidSdkVersion = "2.0.6.206003"
WebClientID = "YUMx5nI8ZU8Ap8pm"
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
WebClientVersion = "undefined"
WebClientVersion = "2.0.0"
WebPackageName = "drive.mypikpak.com"
WebSdkVersion = "8.0.3"
PCClientID = "YvtoWO6GNHiuCl7x"
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
PCClientVersion = "undefined" // 2.5.6.4831
PCClientVersion = "undefined" // 2.6.11.4955
PCPackageName = "mypikpak.com"
PCSdkVersion = "8.0.3"
)

View File

@ -74,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
"Referer": []string{d.conf.referer},
"User-Agent": []string{ua},
},
Concurrency: 2,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
}

View File

@ -125,7 +125,6 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs
}
func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
files := &model.Link{}
var fileLink FileLink
_, err := d.request(ctx, "/file", "GET", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
@ -139,8 +138,12 @@ func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArg
if err != nil {
return nil, err
}
files.URL = fileLink.Data.DownloadURL
return files, nil
return &model.Link{
URL: fileLink.Data.DownloadURL,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
}
func (d *QuarkUCTV) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {

8
go.mod
View File

@ -79,6 +79,12 @@ require (
gorm.io/gorm v1.25.11
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
)
require (
github.com/STARRY-S/zip v0.2.1 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
@ -111,7 +117,7 @@ require (
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/xhofe/115-sdk-go v0.1.4
github.com/xhofe/115-sdk-go v0.1.5
github.com/yuin/goldmark v1.7.8
go4.org v0.0.0-20230225012048-214862532bf5
resty.dev/v3 v3.0.0-beta.2 // indirect

10
go.sum
View File

@ -19,6 +19,12 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@ -606,8 +612,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xhofe/115-sdk-go v0.1.4 h1:erIWuWH+kZQOEHM+YZK8Y6sWQ2s/SFJIFh/WeCtjiiY=
github.com/xhofe/115-sdk-go v0.1.4/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U=
github.com/xhofe/115-sdk-go v0.1.5 h1:2+E92l6AX0+ABAkrdmDa9PE5ONN7wVLCaKkK80zETOg=
github.com/xhofe/115-sdk-go v0.1.5/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM=

View File

@ -29,7 +29,6 @@ type ArchiveReader interface {
func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) {
encrypted := false
dirMap := make(map[string]*model.ObjectTree)
dirMap["."] = &model.ObjectTree{}
for _, file := range r.Files() {
if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
encrypted = true
@ -44,7 +43,7 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
dir = stdpath.Dir(name)
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
isNewFolder = dir != "."
dirObj = &model.ObjectTree{}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
@ -60,41 +59,45 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree
dir = strings.TrimSuffix(name, "/")
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
isNewFolder = dir != "."
dirObj = &model.ObjectTree{}
dirMap[dir] = dirObj
}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.FileInfo().ModTime()
dirObj.Children = make([]model.ObjTree, 0)
}
if isNewFolder {
// 将 文件夹 添加到 父文件夹
dir = stdpath.Dir(dir)
pDirObj := dirMap[dir]
if pDirObj != nil {
pDirObj.Children = append(pDirObj.Children, dirObj)
continue
}
// 考虑压缩包仅记录文件的路径,不记录文件夹
// 循环创建所有父文件夹
parentDir := stdpath.Dir(dir)
for {
// 考虑压缩包仅记录文件的路径,不记录文件夹
pDirObj = &model.ObjectTree{}
pDirObj.IsFolder = true
pDirObj.Name = stdpath.Base(dir)
pDirObj.Modified = file.FileInfo().ModTime()
dirMap[dir] = pDirObj
pDirObj.Children = append(pDirObj.Children, dirObj)
dir = stdpath.Dir(dir)
if dirMap[dir] != nil {
parentDirObj := dirMap[parentDir]
if parentDirObj == nil {
parentDirObj = &model.ObjectTree{}
if parentDir != "." {
parentDirObj.IsFolder = true
parentDirObj.Name = stdpath.Base(parentDir)
parentDirObj.Modified = file.FileInfo().ModTime()
}
dirMap[parentDir] = parentDirObj
}
parentDirObj.Children = append(parentDirObj.Children, dirObj)
parentDir = stdpath.Dir(parentDir)
if dirMap[parentDir] != nil {
break
}
dirObj = pDirObj
dirObj = parentDirObj
}
}
}
return encrypted, dirMap["."].GetChildren()
if len(dirMap) > 0 {
return encrypted, dirMap["."].GetChildren()
} else {
return encrypted, nil
}
}
func MakeModelObj(file os.FileInfo) *model.Object {

View File

@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
// 使用请求的Context
// 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞
ctx := context.WithValue(r.Context(), "request_header", &r.Header)
ctx := context.WithValue(r.Context(), "request_header", r.Header)
switch {
case len(ranges) == 0:
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})

View File

@ -20,11 +20,7 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
}
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
if link.Concurrency != 0 || link.PartSize != 0 {
requestHeader := ctx.Value("request_header")
if requestHeader == nil {
requestHeader = &http.Header{}
}
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
header := net.ProcessHeader(nil, link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
@ -65,11 +61,7 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
}
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
requestHeader := ctx.Value("request_header")
if requestHeader == nil {
requestHeader = &http.Header{}
}
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
header := net.ProcessHeader(nil, link.Header)
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
return net.RequestHttp(ctx, "GET", header, link.URL)

View File

@ -50,9 +50,9 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
requestHeader := ctx.Value("request_header")
if requestHeader == nil {
requestHeader = &http.Header{}
requestHeader = http.Header{}
}
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
header := net.ProcessHeader(requestHeader.(http.Header), link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize

View File

@ -33,6 +33,8 @@ type DirReq struct {
}
type ObjResp struct {
Id string `json:"id"`
Path string `json:"path"`
Name string `json:"name"`
Size int64 `json:"size"`
IsDir bool `json:"is_dir"`
@ -210,6 +212,8 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
for _, obj := range objs {
thumb, _ := model.GetThumb(obj)
resp = append(resp, ObjResp{
Id: obj.GetID(),
Path: obj.GetPath(),
Name: obj.GetName(),
Size: obj.GetSize(),
IsDir: obj.IsDir(),
@ -326,6 +330,8 @@ func FsGet(c *gin.Context) {
thumb, _ := model.GetThumb(obj)
common.SuccessResp(c, FsGetResp{
ObjResp: ObjResp{
Id: obj.GetID(),
Path: obj.GetPath(),
Name: obj.GetName(),
Size: obj.GetSize(),
IsDir: obj.IsDir(),