Merge 1618e5cb08851124b4506a4aacc9129d59d33486 into 41bdab49aa8acca9e88862c3db55cd7a8a84ba6a

This commit is contained in:
j2rong4cn 2025-04-19 14:34:21 +08:00 committed by GitHub
commit 2deb5a78c8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 37 additions and 58 deletions

View File

@ -108,12 +108,17 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if !ok {
return nil, errs.ObjectNotFound
}
isRedirect := args.Redirect
if strings.HasSuffix(root, ".proxy") {
args.Redirect = true
}
for _, dst := range dsts {
link, err := d.link(ctx, dst, sub, args)
if err == nil {
if !args.Redirect && len(link.URL) > 0 {
if !isRedirect && len(link.URL) > 0 {
// 正常情况下 多并发 仅支持返回URL的驱动
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发
// alias套娃alias 或者 根目录以`.proxy`结尾
// 可以让crypt、mega等驱动(不返回URL的) 支持并发
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
}
@ -282,14 +287,6 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn
for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
if !args.Redirect && len(link.URL) > 0 {
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
}
}

View File

@ -103,23 +103,21 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs)
if err != nil {
return nil, err
}
if _, ok := storage.(*Alias); !ok && !args.Redirect {
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
proxy := args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub))
if !proxy {
_, proxy = storage.(*Alias)
}
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
if common.ShouldProxy(storage, stdpath.Base(sub)) {
if proxy {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
link := &model.Link{
URL: fmt.Sprintf("%s/p%s?sign=%s",
common.GetApiUrl(args.HttpReq),
utils.EncodePath(reqPath, true),
sign.Sign(reqPath)),
}
if args.HttpReq != nil && d.ProxyRange {
link.RangeReadCloser = common.NoProxyRange
Concurrency: common.NoProxyRangeMark,
}
return link, nil
}
@ -195,31 +193,25 @@ func (d *Alias) extract(ctx context.Context, dst, sub string, args model.Archive
if err != nil {
return nil, err
}
if _, ok := storage.(driver.ArchiveReader); ok {
if _, ok := storage.(*Alias); !ok && !args.Redirect {
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
}
if _, ok := storage.(driver.ArchiveReader); !ok {
return nil, errs.NotImplement
}
if args.Redirect && common.ShouldProxy(storage, stdpath.Base(sub)) {
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
if common.ShouldProxy(storage, stdpath.Base(sub)) {
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(args.HttpReq),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
if args.HttpReq != nil && d.ProxyRange {
link.RangeReadCloser = common.NoProxyRange
}
return link, nil
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(args.HttpReq),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
Concurrency: common.NoProxyRangeMark,
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
return link, nil
}
return nil, errs.NotImplement
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
}

View File

@ -70,7 +70,7 @@ func TestDownloadOrder(t *testing.T) {
t.Errorf("expect %v API calls, got %v", e, a)
}
expectRngs := []string{"2-3", "5-3", "8-3", "11-1"}
expectRngs := []string{"2-1", "3-3", "6-3", "9-3"}
for _, rng := range expectRngs {
if !slices.Contains(*ranges, rng) {
t.Errorf("expect range %v, but absent in return", rng)

View File

@ -19,7 +19,7 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link")
}
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
if link.Concurrency != 0 || link.PartSize != 0 {
if link.Concurrency > 0 || link.PartSize > 0 {
header := net.ProcessHeader(nil, link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency

View File

@ -16,7 +16,6 @@ import (
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
@ -43,7 +42,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
RangeReadCloserIF: link.RangeReadCloser,
Limiter: stream.ServerDownloadLimit,
})
} else if link.Concurrency != 0 || link.PartSize != 0 {
} else if link.Concurrency > 0 || link.PartSize > 0 {
attachHeader(w, file)
size := file.GetSize()
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
@ -111,22 +110,13 @@ func GetEtag(file model.Obj) string {
return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), file.GetSize())
}
var NoProxyRange = &model.RangeReadCloser{}
const NoProxyRangeMark int = -1
func ProxyRange(link *model.Link, size int64) {
if link.MFile != nil {
if link.RangeReadCloser != nil || len(link.URL) == 0 || link.Concurrency == NoProxyRangeMark {
return
}
if link.RangeReadCloser == nil {
var rrc, err = stream.GetRangeReadCloserFromLink(size, link)
if err != nil {
log.Warnf("ProxyRange error: %s", err)
return
}
link.RangeReadCloser = rrc
} else if link.RangeReadCloser == NoProxyRange {
link.RangeReadCloser = nil
}
link.RangeReadCloser, _ = stream.GetRangeReadCloserFromLink(size, link)
}
type InterceptResponseWriter struct {