refactor(net): pass request header (#8031 close #8008)

* refactor(net): pass request header

* feat(proxy): add `Etag` to response header

* refactor
This commit is contained in:
j2rong4cn 2025-03-01 18:35:34 +08:00 committed by GitHub
parent 646c7bcd21
commit 4145734c18
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 56 additions and 44 deletions

View File

@ -63,6 +63,7 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}

View File

@ -263,12 +263,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
rrc := remoteLink.RangeReadCloser
if len(remoteLink.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: remoteLink.URL,
Header: remoteLink.Header,
}
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
if err != nil {
return nil, err
}
@ -304,7 +299,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
resultLink := &model.Link{
Header: remoteLink.Header,
RangeReadCloser: resultRangeReadCloser,
Expiration: remoteLink.Expiration,
}

View File

@ -170,7 +170,7 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
if res.StatusCode() != 200 {
return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
}
return res.Header().Get("ETag"), nil
return res.Header().Get("Etag"), nil
}
func (d *QuarkOrUC) upCommit(pre UpPreResp, md5s []string) error {

View File

@ -304,10 +304,6 @@ func (d *Quqi) linkFromCDN(id string) (*model.Link, error) {
}
return &model.Link{
Header: http.Header{
"Origin": []string{"https://quqi.com"},
"Cookie": []string{d.Cookie},
},
RangeReadCloser: &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers},
Expiration: &expiration,
}, nil

View File

@ -382,6 +382,9 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
if resp == nil {
return 0, err
}
if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
return 0, err
}
if ch.id == 0 { //第1个任务 有限的重试,超过重试就会结束请求
switch resp.StatusCode {
default:

View File

@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
// 使用请求的Context
// 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞
ctx := r.Context()
ctx := context.WithValue(r.Context(), "request_header", &r.Header)
switch {
case len(ranges) == 0:
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})

View File

@ -71,6 +71,7 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult {
if im == "" {
return condNone
}
r.Header.Del("If-Match")
for {
im = textproto.TrimString(im)
if len(im) == 0 {
@ -98,7 +99,11 @@ func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult {
func checkIfUnmodifiedSince(r *http.Request, modtime time.Time) condResult {
ius := r.Header.Get("If-Unmodified-Since")
if ius == "" || isZeroTime(modtime) {
if ius == "" {
return condNone
}
r.Header.Del("If-Unmodified-Since")
if isZeroTime(modtime) {
return condNone
}
t, err := http.ParseTime(ius)
@ -120,6 +125,7 @@ func checkIfNoneMatch(w http.ResponseWriter, r *http.Request) condResult {
if inm == "" {
return condNone
}
r.Header.Del("If-None-Match")
buf := inm
for {
buf = textproto.TrimString(buf)
@ -150,7 +156,11 @@ func checkIfModifiedSince(r *http.Request, modtime time.Time) condResult {
return condNone
}
ims := r.Header.Get("If-Modified-Since")
if ims == "" || isZeroTime(modtime) {
if ims == "" {
return condNone
}
r.Header.Del("If-Modified-Since")
if isZeroTime(modtime) {
return condNone
}
t, err := http.ParseTime(ims)
@ -174,6 +184,7 @@ func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) con
if ir == "" {
return condNone
}
r.Header.Del("If-Range")
etag, _ := scanETag(ir)
if etag != "" {
if etagStrongMatch(etag, w.Header().Get("Etag")) {

View File

@ -384,7 +384,7 @@ func (c *headCache) read(p []byte) (n int, err error) {
n, err = lr.Read(buf[off:])
off += n
c.cur += int64(n)
if err == io.EOF && n == int(bufL) {
if err == io.EOF && off == int(bufL) {
err = nil
}
if err != nil {
@ -468,7 +468,7 @@ func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error)
}
}
if rc != nil && off-rc.cur <= utils.MB {
n, err := utils.CopyWithBufferN(utils.NullWriter{}, rc.reader, off-rc.cur)
n, err := utils.CopyWithBufferN(io.Discard, rc.reader, off-rc.cur)
rc.cur += n
if err == io.EOF && rc.cur == off {
err = nil

View File

@ -3,13 +3,13 @@ package stream
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"io"
"net/http"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)
@ -19,7 +19,11 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
}
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
if link.Concurrency != 0 || link.PartSize != 0 {
header := net.ProcessHeader(http.Header{}, link.Header)
requestHeader := ctx.Value("request_header")
if requestHeader == nil {
requestHeader = &http.Header{}
}
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
@ -60,7 +64,11 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
}
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
header := net.ProcessHeader(http.Header{}, link.Header)
requestHeader := ctx.Value("request_header")
if requestHeader == nil {
requestHeader = &http.Header{}
}
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
return net.RequestHttp(ctx, "GET", header, link.URL)

View File

@ -233,9 +233,3 @@ func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err
}
return
}
type NullWriter struct{}
func (NullWriter) Write(p []byte) (n int, err error) {
return len(p), nil
}

View File

@ -19,7 +19,7 @@ import (
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
if link.MFile != nil {
defer link.MFile.Close()
attachFileName(w, file)
attachHeader(w, file)
contentType := link.Header.Get("Content-Type")
if contentType != "" {
w.Header().Set("Content-Type", contentType)
@ -35,17 +35,21 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
http.ServeContent(w, r, file.GetName(), file.ModTime(), mFile)
return nil
} else if link.RangeReadCloser != nil {
attachFileName(w, file)
attachHeader(w, file)
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{
RangeReadCloserIF: link.RangeReadCloser,
Limiter: stream.ServerDownloadLimit,
})
return nil
} else if link.Concurrency != 0 || link.PartSize != 0 {
attachFileName(w, file)
attachHeader(w, file)
size := file.GetSize()
header := net.ProcessHeader(r.Header, link.Header)
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
requestHeader := ctx.Value("request_header")
if requestHeader == nil {
requestHeader = &http.Header{}
}
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
@ -91,10 +95,20 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
return nil
}
}
func attachFileName(w http.ResponseWriter, file model.Obj) {
func attachHeader(w http.ResponseWriter, file model.Obj) {
fileName := file.GetName()
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, fileName, url.PathEscape(fileName)))
w.Header().Set("Content-Type", utils.GetMimeType(fileName))
w.Header().Set("Etag", GetEtag(file))
}
func GetEtag(file model.Obj) string {
for _, v := range file.GetHash().Export() {
if len(v) != 0 {
return fmt.Sprintf(`"%s"`, v)
}
}
// 参考nginx
return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), file.GetSize())
}
var NoProxyRange = &model.RangeReadCloser{}

View File

@ -195,11 +195,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
}
rrc := link.RangeReadCloser
if len(link.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: link.URL,
Header: link.Header,
}
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, link)
if err != nil {
return nil, err
}

View File

@ -9,7 +9,6 @@ import (
"context"
"encoding/xml"
"errors"
"fmt"
"mime"
"net/http"
"path"
@ -18,6 +17,7 @@ import (
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/server/common"
)
// Proppatch describes a property update instruction as defined in RFC 4918.
@ -473,7 +473,7 @@ func findETag(ctx context.Context, ls LockSystem, name string, fi model.Obj) (st
// The Apache http 2.4 web server by default concatenates the
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.GetSize()), nil
return common.GetEtag(fi), nil
}
func findSupportedLock(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) {

View File

@ -227,11 +227,6 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
if err != nil {
return http.StatusNotFound, err
}
etag, err := findETag(ctx, h.LockSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
if r.Method == http.MethodHead {
w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.GetSize()))
return http.StatusOK, nil
@ -361,7 +356,7 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
w.Header().Set("Etag", etag)
return http.StatusCreated, nil
}