mirror of
https://github.com/AlistGo/alist.git
synced 2025-04-23 13:54:04 +08:00
* feat(archive): multipart support & sevenzip tool * feat(archive): rardecode tool * feat(archive): support decompress multi-selected * fix(archive): decompress response filter internal * feat(archive): support multipart zip * fix: more applicable AcceptedMultipartExtensions interface
This commit is contained in:
parent
704d3854df
commit
1335f80362
6
go.mod
6
go.mod
@ -85,7 +85,7 @@ require (
|
||||
github.com/blevesearch/go-faiss v1.0.20 // indirect
|
||||
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.2.3 // indirect
|
||||
@ -106,14 +106,14 @@ require (
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/xhofe/115-sdk-go v0.1.4
|
||||
github.com/yuin/goldmark v1.7.8
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5
|
||||
resty.dev/v3 v3.0.0-beta.2 // indirect
|
||||
)
|
||||
|
||||
|
@ -3,5 +3,7 @@ package archive
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/archives"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/iso9660"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/rardecode"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/sevenzip"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/zip"
|
||||
)
|
||||
|
@ -16,14 +16,18 @@ import (
|
||||
type Archives struct {
|
||||
}
|
||||
|
||||
func (*Archives) AcceptedExtensions() []string {
|
||||
func (Archives) AcceptedExtensions() []string {
|
||||
return []string{
|
||||
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z",
|
||||
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar",
|
||||
}
|
||||
}
|
||||
|
||||
func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
fsys, err := getFs(ss, args)
|
||||
func (Archives) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{}
|
||||
}
|
||||
|
||||
func (Archives) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
fsys, err := getFs(ss[0], args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -47,8 +51,8 @@ func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (mod
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
||||
func (Archives) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
fsys, err := getFs(ss[0], args.ArchiveArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -69,8 +73,8 @@ func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([
|
||||
})
|
||||
}
|
||||
|
||||
func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
||||
func (Archives) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
fsys, err := getFs(ss[0], args.ArchiveArgs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@ -85,8 +89,8 @@ func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs)
|
||||
return file, stat.Size(), nil
|
||||
}
|
||||
|
||||
func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
||||
func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
fsys, err := getFs(ss[0], args.ArchiveArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -133,5 +137,5 @@ func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args m
|
||||
var _ tool.Tool = (*Archives)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&Archives{})
|
||||
tool.RegisterTool(Archives{})
|
||||
}
|
||||
|
@ -14,19 +14,23 @@ import (
|
||||
type ISO9660 struct {
|
||||
}
|
||||
|
||||
func (t *ISO9660) AcceptedExtensions() []string {
|
||||
func (ISO9660) AcceptedExtensions() []string {
|
||||
return []string{".iso"}
|
||||
}
|
||||
|
||||
func (t *ISO9660) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
func (ISO9660) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{}
|
||||
}
|
||||
|
||||
func (ISO9660) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: "",
|
||||
Encrypted: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
img, err := getImage(ss)
|
||||
func (ISO9660) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
img, err := getImage(ss[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -48,8 +52,8 @@ func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
img, err := getImage(ss)
|
||||
func (ISO9660) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
img, err := getImage(ss[0])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@ -63,8 +67,8 @@ func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs
|
||||
return io.NopCloser(obj.Reader()), obj.Size(), nil
|
||||
}
|
||||
|
||||
func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
img, err := getImage(ss)
|
||||
func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
img, err := getImage(ss[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -92,5 +96,5 @@ func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args
|
||||
var _ tool.Tool = (*ISO9660)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&ISO9660{})
|
||||
tool.RegisterTool(ISO9660{})
|
||||
}
|
||||
|
140
internal/archive/rardecode/rardecode.go
Normal file
140
internal/archive/rardecode/rardecode.go
Normal file
@ -0,0 +1,140 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/nwaples/rardecode/v2"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type RarDecoder struct{}
|
||||
|
||||
func (RarDecoder) AcceptedExtensions() []string {
|
||||
return []string{".rar"}
|
||||
}
|
||||
|
||||
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".part1.rar": {".part%d.rar", 2},
|
||||
}
|
||||
}
|
||||
|
||||
func (RarDecoder) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
l, err := list(ss, args.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, tree := tool.GenerateMetaTreeFromFolderTraversal(l)
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: "",
|
||||
Encrypted: false,
|
||||
Tree: tree,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (RarDecoder) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (RarDecoder) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
reader, err := getReader(ss, args.Password)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
for {
|
||||
var header *rardecode.FileHeader
|
||||
header, err = reader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if header.Name == innerPath {
|
||||
if header.IsDir {
|
||||
break
|
||||
}
|
||||
return io.NopCloser(reader), header.UnPackedSize, nil
|
||||
}
|
||||
}
|
||||
return nil, 0, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
reader, err := getReader(ss, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.InnerPath == "/" {
|
||||
for {
|
||||
var header *rardecode.FileHeader
|
||||
header, err = reader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := header.Name
|
||||
if header.IsDir {
|
||||
name = name + "/"
|
||||
}
|
||||
err = decompress(reader, header, name, outputPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for {
|
||||
var header *rardecode.FileHeader
|
||||
header, err = reader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := header.Name
|
||||
if header.IsDir {
|
||||
name = name + "/"
|
||||
}
|
||||
if name == innerPath {
|
||||
err = _decompress(reader, header, outputPath, up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createdBaseDir = true
|
||||
}
|
||||
restPath := strings.TrimPrefix(name, innerPath+"/")
|
||||
err = decompress(reader, header, restPath, targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*RarDecoder)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(RarDecoder{})
|
||||
}
|
225
internal/archive/rardecode/utils.go
Normal file
225
internal/archive/rardecode/utils.go
Normal file
@ -0,0 +1,225 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/nwaples/rardecode/v2"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type VolumeFile struct {
|
||||
stream.SStreamReadAtSeeker
|
||||
name string
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Name() string {
|
||||
return v.name
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Size() int64 {
|
||||
return v.SStreamReadAtSeeker.GetRawStream().GetSize()
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Mode() fs.FileMode {
|
||||
return 0644
|
||||
}
|
||||
|
||||
func (v *VolumeFile) ModTime() time.Time {
|
||||
return v.SStreamReadAtSeeker.GetRawStream().ModTime()
|
||||
}
|
||||
|
||||
func (v *VolumeFile) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Stat() (fs.FileInfo, error) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (v *VolumeFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type VolumeFs struct {
|
||||
parts map[string]*VolumeFile
|
||||
}
|
||||
|
||||
func (v *VolumeFs) Open(name string) (fs.File, error) {
|
||||
file, ok := v.parts[name]
|
||||
if !ok {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) {
|
||||
if len(ss) == 1 {
|
||||
reader, err := stream.NewReadAtSeeker(ss[0], 0)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
fileName := "file.rar"
|
||||
fsys := &VolumeFs{parts: map[string]*VolumeFile{
|
||||
fileName: {SStreamReadAtSeeker: reader, name: fileName},
|
||||
}}
|
||||
return fileName, rardecode.FileSystem(fsys), nil
|
||||
} else {
|
||||
parts := make(map[string]*VolumeFile, len(ss))
|
||||
for i, s := range ss {
|
||||
reader, err := stream.NewReadAtSeeker(s, 0)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
fileName := fmt.Sprintf("file.part%d.rar", i+1)
|
||||
parts[fileName] = &VolumeFile{SStreamReadAtSeeker: reader, name: fileName}
|
||||
}
|
||||
return "file.part1.rar", rardecode.FileSystem(&VolumeFs{parts: parts}), nil
|
||||
}
|
||||
}
|
||||
|
||||
type WrapReader struct {
|
||||
files []*rardecode.File
|
||||
}
|
||||
|
||||
func (r *WrapReader) Files() []tool.SubFile {
|
||||
ret := make([]tool.SubFile, 0, len(r.files))
|
||||
for _, f := range r.files {
|
||||
ret = append(ret, &WrapFile{File: f})
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type WrapFile struct {
|
||||
*rardecode.File
|
||||
}
|
||||
|
||||
func (f *WrapFile) Name() string {
|
||||
if f.File.IsDir {
|
||||
return f.File.Name + "/"
|
||||
}
|
||||
return f.File.Name
|
||||
}
|
||||
|
||||
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||
return &WrapFileInfo{File: f.File}
|
||||
}
|
||||
|
||||
type WrapFileInfo struct {
|
||||
*rardecode.File
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Name() string {
|
||||
return stdpath.Base(f.File.Name)
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Size() int64 {
|
||||
return f.File.UnPackedSize
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) ModTime() time.Time {
|
||||
return f.File.ModificationTime
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) IsDir() bool {
|
||||
return f.File.IsDir
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
func list(ss []*stream.SeekableStream, password string) (*WrapReader, error) {
|
||||
fileName, fsOpt, err := makeOpts(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := []rardecode.Option{fsOpt}
|
||||
if password != "" {
|
||||
opts = append(opts, rardecode.Password(password))
|
||||
}
|
||||
files, err := rardecode.List(fileName, opts...)
|
||||
// rardecode输出文件列表的顺序不一定是父目录在前,子目录在后
|
||||
// 父路径的长度一定比子路径短,排序后的files可保证父路径在前
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return len(files[i].Name) < len(files[j].Name)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, filterPassword(err)
|
||||
}
|
||||
return &WrapReader{files: files}, nil
|
||||
}
|
||||
|
||||
func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader, error) {
|
||||
fileName, fsOpt, err := makeOpts(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := []rardecode.Option{fsOpt}
|
||||
if password != "" {
|
||||
opts = append(opts, rardecode.Password(password))
|
||||
}
|
||||
rc, err := rardecode.OpenReader(fileName, opts...)
|
||||
if err != nil {
|
||||
return nil, filterPassword(err)
|
||||
}
|
||||
ss[0].Closers.Add(rc)
|
||||
return &rc.Reader, nil
|
||||
}
|
||||
|
||||
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if base != "" {
|
||||
err := _decompress(reader, header, targetPath, func(_ float64) {})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: header.UnPackedSize,
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func filterPassword(err error) error {
|
||||
if err != nil && strings.Contains(err.Error(), "password") {
|
||||
return errs.WrongArchivePassword
|
||||
}
|
||||
return err
|
||||
}
|
72
internal/archive/sevenzip/sevenzip.go
Normal file
72
internal/archive/sevenzip/sevenzip.go
Normal file
@ -0,0 +1,72 @@
|
||||
package sevenzip
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
)
|
||||
|
||||
type SevenZip struct{}
|
||||
|
||||
func (SevenZip) AcceptedExtensions() []string {
|
||||
return []string{".7z"}
|
||||
}
|
||||
|
||||
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".7z.001": {".7z.%.3d", 2},
|
||||
}
|
||||
}
|
||||
|
||||
func (SevenZip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
reader, err := getReader(ss, args.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: reader})
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: "",
|
||||
Encrypted: args.Password != "",
|
||||
Tree: tree,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (SevenZip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (SevenZip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
reader, err := getReader(ss, args.Password)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
for _, file := range reader.File {
|
||||
if file.Name == innerPath {
|
||||
r, e := file.Open()
|
||||
if e != nil {
|
||||
return nil, 0, e
|
||||
}
|
||||
return r, file.FileInfo().Size(), nil
|
||||
}
|
||||
}
|
||||
return nil, 0, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (SevenZip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
reader, err := getReader(ss, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tool.DecompressFromFolderTraversal(&WrapReader{Reader: reader}, outputPath, args, up)
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*SevenZip)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(SevenZip{})
|
||||
}
|
61
internal/archive/sevenzip/utils.go
Normal file
61
internal/archive/sevenzip/utils.go
Normal file
@ -0,0 +1,61 @@
|
||||
package sevenzip
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/bodgit/sevenzip"
|
||||
"io"
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
type WrapReader struct {
|
||||
Reader *sevenzip.Reader
|
||||
}
|
||||
|
||||
func (r *WrapReader) Files() []tool.SubFile {
|
||||
ret := make([]tool.SubFile, 0, len(r.Reader.File))
|
||||
for _, f := range r.Reader.File {
|
||||
ret = append(ret, &WrapFile{f: f})
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type WrapFile struct {
|
||||
f *sevenzip.File
|
||||
}
|
||||
|
||||
func (f *WrapFile) Name() string {
|
||||
return f.f.Name
|
||||
}
|
||||
|
||||
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||
return f.f.FileInfo()
|
||||
}
|
||||
|
||||
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
||||
return f.f.Open()
|
||||
}
|
||||
|
||||
func getReader(ss []*stream.SeekableStream, password string) (*sevenzip.Reader, error) {
|
||||
readerAt, err := stream.NewMultiReaderAt(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sr, err := sevenzip.NewReaderWithPassword(readerAt, readerAt.Size(), password)
|
||||
if err != nil {
|
||||
return nil, filterPassword(err)
|
||||
}
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
func filterPassword(err error) error {
|
||||
if err != nil {
|
||||
var e *sevenzip.ReadError
|
||||
if errors.As(err, &e) && e.Encrypted {
|
||||
return errs.WrongArchivePassword
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
@ -6,10 +6,16 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type MultipartExtension struct {
|
||||
PartFileFormat string
|
||||
SecondPartIndex int
|
||||
}
|
||||
|
||||
type Tool interface {
|
||||
AcceptedExtensions() []string
|
||||
GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
||||
List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
||||
Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
|
||||
Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
|
||||
AcceptedMultipartExtensions() map[string]MultipartExtension
|
||||
GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
||||
List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
||||
Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
|
||||
Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
|
||||
}
|
||||
|
201
internal/archive/tool/helper.go
Normal file
201
internal/archive/tool/helper.go
Normal file
@ -0,0 +1,201 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
)
|
||||
|
||||
type SubFile interface {
|
||||
Name() string
|
||||
FileInfo() fs.FileInfo
|
||||
Open() (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type CanEncryptSubFile interface {
|
||||
IsEncrypted() bool
|
||||
SetPassword(password string)
|
||||
}
|
||||
|
||||
type ArchiveReader interface {
|
||||
Files() []SubFile
|
||||
}
|
||||
|
||||
func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) {
|
||||
encrypted := false
|
||||
dirMap := make(map[string]*model.ObjectTree)
|
||||
dirMap["."] = &model.ObjectTree{}
|
||||
for _, file := range r.Files() {
|
||||
if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
|
||||
encrypted = true
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(file.Name(), "/")
|
||||
var dir string
|
||||
var dirObj *model.ObjectTree
|
||||
isNewFolder := false
|
||||
if !file.FileInfo().IsDir() {
|
||||
// 先将 文件 添加到 所在的文件夹
|
||||
dir = stdpath.Dir(name)
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = true
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Modified = file.FileInfo().ModTime()
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.Children = append(
|
||||
dirObj.Children, &model.ObjectTree{
|
||||
Object: *MakeModelObj(file.FileInfo()),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
dir = strings.TrimSuffix(name, "/")
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = true
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Modified = file.FileInfo().ModTime()
|
||||
dirObj.Children = make([]model.ObjTree, 0)
|
||||
}
|
||||
if isNewFolder {
|
||||
// 将 文件夹 添加到 父文件夹
|
||||
dir = stdpath.Dir(dir)
|
||||
pDirObj := dirMap[dir]
|
||||
if pDirObj != nil {
|
||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
||||
pDirObj = &model.ObjectTree{}
|
||||
pDirObj.IsFolder = true
|
||||
pDirObj.Name = stdpath.Base(dir)
|
||||
pDirObj.Modified = file.FileInfo().ModTime()
|
||||
dirMap[dir] = pDirObj
|
||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
||||
dir = stdpath.Dir(dir)
|
||||
if dirMap[dir] != nil {
|
||||
break
|
||||
}
|
||||
dirObj = pDirObj
|
||||
}
|
||||
}
|
||||
}
|
||||
return encrypted, dirMap["."].GetChildren()
|
||||
}
|
||||
|
||||
func MakeModelObj(file os.FileInfo) *model.Object {
|
||||
return &model.Object{
|
||||
Name: file.Name(),
|
||||
Size: file.Size(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: file.IsDir(),
|
||||
}
|
||||
}
|
||||
|
||||
type WrapFileInfo struct {
|
||||
model.Obj
|
||||
}
|
||||
|
||||
func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
var err error
|
||||
files := r.Files()
|
||||
if args.InnerPath == "/" {
|
||||
for i, file := range files {
|
||||
name := file.Name()
|
||||
err = decompress(file, name, outputPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(i+1) * 100.0 / float64(len(files)))
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for _, file := range files {
|
||||
name := file.Name()
|
||||
if name == innerPath {
|
||||
err = _decompress(file, outputPath, args.Password, up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createdBaseDir = true
|
||||
}
|
||||
restPath := strings.TrimPrefix(name, innerPath+"/")
|
||||
err = decompress(file, restPath, targetPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decompress(file SubFile, filePath, outputPath, password string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if base != "" {
|
||||
err := _decompress(file, targetPath, password, func(_ float64) {})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _decompress(file SubFile, targetPath, password string, up model.UpdateProgress) error {
|
||||
if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
|
||||
encrypt.SetPassword(password)
|
||||
}
|
||||
rc, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = rc.Close() }()
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: rc,
|
||||
Size: file.FileInfo().Size(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -5,19 +5,28 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Tools = make(map[string]Tool)
|
||||
Tools = make(map[string]Tool)
|
||||
MultipartExtensions = make(map[string]MultipartExtension)
|
||||
)
|
||||
|
||||
func RegisterTool(tool Tool) {
|
||||
for _, ext := range tool.AcceptedExtensions() {
|
||||
Tools[ext] = tool
|
||||
}
|
||||
for mainFile, ext := range tool.AcceptedMultipartExtensions() {
|
||||
MultipartExtensions[mainFile] = ext
|
||||
Tools[mainFile] = tool
|
||||
}
|
||||
}
|
||||
|
||||
func GetArchiveTool(ext string) (Tool, error) {
|
||||
func GetArchiveTool(ext string) (*MultipartExtension, Tool, error) {
|
||||
t, ok := Tools[ext]
|
||||
if !ok {
|
||||
return nil, errs.UnknownArchiveFormat
|
||||
return nil, nil, errs.UnknownArchiveFormat
|
||||
}
|
||||
return t, nil
|
||||
partExt, ok := MultipartExtensions[ext]
|
||||
if !ok {
|
||||
return nil, t, nil
|
||||
}
|
||||
return &partExt, t, nil
|
||||
}
|
||||
|
@ -2,8 +2,13 @@ package zip
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/fs"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/saintfish/chardet"
|
||||
"github.com/yeka/zip"
|
||||
@ -16,65 +21,62 @@ import (
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
"golang.org/x/text/encoding/unicode/utf32"
|
||||
"golang.org/x/text/transform"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func toModelObj(file os.FileInfo) *model.Object {
|
||||
return &model.Object{
|
||||
Name: decodeName(file.Name()),
|
||||
Size: file.Size(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: file.IsDir(),
|
||||
}
|
||||
type WrapReader struct {
|
||||
Reader *zip.Reader
|
||||
}
|
||||
|
||||
func decompress(file *zip.File, filePath, outputPath, password string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (r *WrapReader) Files() []tool.SubFile {
|
||||
ret := make([]tool.SubFile, 0, len(r.Reader.File))
|
||||
for _, f := range r.Reader.File {
|
||||
ret = append(ret, &WrapFile{f: f})
|
||||
}
|
||||
if base != "" {
|
||||
err := _decompress(file, targetPath, password, func(_ float64) {})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ret
|
||||
}
|
||||
|
||||
func _decompress(file *zip.File, targetPath, password string, up model.UpdateProgress) error {
|
||||
if file.IsEncrypted() {
|
||||
file.SetPassword(password)
|
||||
type WrapFileInfo struct {
|
||||
fs.FileInfo
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Name() string {
|
||||
return decodeName(f.FileInfo.Name())
|
||||
}
|
||||
|
||||
type WrapFile struct {
|
||||
f *zip.File
|
||||
}
|
||||
|
||||
func (f *WrapFile) Name() string {
|
||||
return decodeName(f.f.Name)
|
||||
}
|
||||
|
||||
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
|
||||
}
|
||||
|
||||
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
||||
return f.f.Open()
|
||||
}
|
||||
|
||||
func (f *WrapFile) IsEncrypted() bool {
|
||||
return f.f.IsEncrypted()
|
||||
}
|
||||
|
||||
func (f *WrapFile) SetPassword(password string) {
|
||||
f.f.SetPassword(password)
|
||||
}
|
||||
|
||||
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
||||
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
|
||||
// FIXME: Incorrect parsing method for standard multipart zip format
|
||||
ss = append(ss[1:], ss[0])
|
||||
}
|
||||
rc, err := file.Open()
|
||||
reader, err := stream.NewMultiReaderAt(ss)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, decodeName(file.FileInfo().Name())), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: rc,
|
||||
Size: file.FileInfo().Size(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return zip.NewReader(reader, reader.Size())
|
||||
}
|
||||
|
||||
func filterPassword(err error) error {
|
||||
|
@ -2,7 +2,6 @@ package zip
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
@ -10,106 +9,37 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/yeka/zip"
|
||||
)
|
||||
|
||||
type Zip struct {
|
||||
}
|
||||
|
||||
func (*Zip) AcceptedExtensions() []string {
|
||||
return []string{".zip"}
|
||||
func (Zip) AcceptedExtensions() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".zip": {".z%.2d", 1},
|
||||
".zip.001": {".zip.%.3d", 2},
|
||||
}
|
||||
}
|
||||
|
||||
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encrypted := false
|
||||
dirMap := make(map[string]*model.ObjectTree)
|
||||
dirMap["."] = &model.ObjectTree{}
|
||||
for _, file := range zipReader.File {
|
||||
if file.IsEncrypted() {
|
||||
encrypted = true
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(decodeName(file.Name), "/")
|
||||
var dir string
|
||||
var dirObj *model.ObjectTree
|
||||
isNewFolder := false
|
||||
if !file.FileInfo().IsDir() {
|
||||
// 先将 文件 添加到 所在的文件夹
|
||||
dir = stdpath.Dir(name)
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = true
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Modified = file.ModTime()
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.Children = append(
|
||||
dirObj.Children, &model.ObjectTree{
|
||||
Object: *toModelObj(file.FileInfo()),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
dir = strings.TrimSuffix(name, "/")
|
||||
dirObj = dirMap[dir]
|
||||
if dirObj == nil {
|
||||
isNewFolder = true
|
||||
dirObj = &model.ObjectTree{}
|
||||
dirMap[dir] = dirObj
|
||||
}
|
||||
dirObj.IsFolder = true
|
||||
dirObj.Name = stdpath.Base(dir)
|
||||
dirObj.Modified = file.ModTime()
|
||||
dirObj.Children = make([]model.ObjTree, 0)
|
||||
}
|
||||
if isNewFolder {
|
||||
// 将 文件夹 添加到 父文件夹
|
||||
dir = stdpath.Dir(dir)
|
||||
pDirObj := dirMap[dir]
|
||||
if pDirObj != nil {
|
||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
||||
pDirObj = &model.ObjectTree{}
|
||||
pDirObj.IsFolder = true
|
||||
pDirObj.Name = stdpath.Base(dir)
|
||||
pDirObj.Modified = file.ModTime()
|
||||
dirMap[dir] = pDirObj
|
||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
||||
dir = stdpath.Dir(dir)
|
||||
if dirMap[dir] != nil {
|
||||
break
|
||||
}
|
||||
dirObj = pDirObj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: zipReader.Comment,
|
||||
Encrypted: encrypted,
|
||||
Tree: dirMap["."].GetChildren(),
|
||||
Tree: tree,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -134,13 +64,13 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
|
||||
if dir == nil && len(strs) == 2 {
|
||||
dir = &model.Object{
|
||||
Name: strs[0],
|
||||
Modified: ss.ModTime(),
|
||||
Modified: ss[0].ModTime(),
|
||||
IsFolder: true,
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
ret = append(ret, toModelObj(file.FileInfo()))
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
|
||||
}
|
||||
if len(ret) == 0 && dir != nil {
|
||||
ret = append(ret, dir)
|
||||
@ -157,7 +87,7 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
|
||||
continue
|
||||
}
|
||||
exist = true
|
||||
ret = append(ret, toModelObj(file.FileInfo()))
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
|
||||
}
|
||||
if !exist {
|
||||
return nil, errs.ObjectNotFound
|
||||
@ -166,12 +96,8 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
|
||||
}
|
||||
}
|
||||
|
||||
func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@ -191,58 +117,16 @@ func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.
|
||||
return nil, 0, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.InnerPath == "/" {
|
||||
for i, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
err = decompress(file, name, outputPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(i+1) * 100.0 / float64(len(zipReader.File)))
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for _, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
if name == innerPath {
|
||||
err = _decompress(file, outputPath, args.Password, up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createdBaseDir = true
|
||||
}
|
||||
restPath := strings.TrimPrefix(name, innerPath+"/")
|
||||
err = decompress(file, restPath, targetPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return tool.DecompressFromFolderTraversal(&WrapReader{Reader: zipReader}, outputPath, args, up)
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*Zip)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&Zip{})
|
||||
tool.RegisterTool(Zip{})
|
||||
}
|
||||
|
@ -79,13 +79,13 @@ type Remove interface {
|
||||
type Put interface {
|
||||
// Put a file (provided as a FileStreamer) into the driver
|
||||
// Besides the most basic upload functionality, the following features also need to be implemented:
|
||||
// 1. Canceling (when `<-ctx.Done()` returns), by the following methods:
|
||||
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
|
||||
// (1) Use request methods that carry context, such as the following:
|
||||
// a. http.NewRequestWithContext
|
||||
// b. resty.Request.SetContext
|
||||
// c. s3manager.Uploader.UploadWithContext
|
||||
// d. utils.CopyWithCtx
|
||||
// (2) Use a `driver.ReaderWithCtx` or a `driver.NewLimitedUploadStream`
|
||||
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
|
||||
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
||||
// this is typically applicable to chunked uploads.
|
||||
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
||||
|
@ -4,17 +4,6 @@ import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/xhofe/tache"
|
||||
"io"
|
||||
"math/rand"
|
||||
"mime"
|
||||
@ -25,6 +14,17 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type ArchiveDownloadTask struct {
|
||||
@ -37,7 +37,6 @@ type ArchiveDownloadTask struct {
|
||||
dstStorage driver.Driver
|
||||
SrcStorageMp string
|
||||
DstStorageMp string
|
||||
Tool tool.Tool
|
||||
}
|
||||
|
||||
func (t *ArchiveDownloadTask) GetName() string {
|
||||
@ -67,33 +66,39 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
|
||||
if t.srcStorage == nil {
|
||||
t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
|
||||
}
|
||||
l, srcObj, err := op.Link(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
|
||||
srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
|
||||
Header: http.Header{},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs := stream.FileStream{
|
||||
Obj: srcObj,
|
||||
Ctx: t.Ctx(),
|
||||
}
|
||||
ss, err := stream.NewSeekableStream(fs, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := ss.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
var e error
|
||||
for _, s := range ss {
|
||||
e = stderrors.Join(e, s.Close())
|
||||
}
|
||||
if e != nil {
|
||||
log.Errorf("failed to close file streamer, %v", e)
|
||||
}
|
||||
}()
|
||||
var decompressUp model.UpdateProgress
|
||||
if t.CacheFull {
|
||||
t.SetTotalBytes(srcObj.GetSize())
|
||||
t.status = "getting src object"
|
||||
_, err = ss.CacheFullInTempFileAndUpdateProgress(t.SetProgress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var total, cur int64 = 0, 0
|
||||
for _, s := range ss {
|
||||
total += s.GetSize()
|
||||
}
|
||||
t.SetTotalBytes(total)
|
||||
t.status = "getting src object"
|
||||
for _, s := range ss {
|
||||
_, err = s.CacheFullInTempFileAndUpdateProgress(func(p float64) {
|
||||
t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total))
|
||||
})
|
||||
cur += s.GetSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
t.SetProgress(100.0)
|
||||
decompressUp = func(_ float64) {}
|
||||
} else {
|
||||
decompressUp = t.SetProgress
|
||||
@ -103,7 +108,7 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = t.Tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
|
||||
err = tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -344,11 +349,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ext := stdpath.Ext(srcObjActualPath)
|
||||
t, err := tool.GetArchiveTool(ext)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
|
||||
}
|
||||
taskCreator, _ := ctx.Value("user").(*model.User)
|
||||
tsk := &ArchiveDownloadTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
@ -361,7 +361,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
|
||||
DstDirPath: dstDirActualPath,
|
||||
SrcStorageMp: srcStorage.GetStorage().MountPath,
|
||||
DstStorageMp: dstStorage.GetStorage().MountPath,
|
||||
Tool: t,
|
||||
}
|
||||
if ctx.Value(conf.NoTaskKey) != nil {
|
||||
uploadTask, err := tsk.RunWithoutPushUploadTask()
|
||||
|
@ -3,6 +3,7 @@ package op
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
@ -54,21 +55,76 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
||||
return meta, err
|
||||
}
|
||||
|
||||
func getArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, *stream.SeekableStream, error) {
|
||||
func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, []*stream.SeekableStream, error) {
|
||||
l, obj, err := Link(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
||||
}
|
||||
ext := stdpath.Ext(obj.GetName())
|
||||
t, err := tool.GetArchiveTool(ext)
|
||||
baseName, ext, found := strings.Cut(obj.GetName(), ".")
|
||||
if !found {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
}
|
||||
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
||||
}
|
||||
partExt, t, err := tool.GetArchiveTool("." + ext)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
|
||||
var e error
|
||||
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
|
||||
if e != nil {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
}
|
||||
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
|
||||
}
|
||||
}
|
||||
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
||||
if err != nil {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
}
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
||||
}
|
||||
return obj, t, ss, nil
|
||||
ret := []*stream.SeekableStream{ss}
|
||||
if partExt == nil {
|
||||
return obj, t, ret, nil
|
||||
} else {
|
||||
index := partExt.SecondPartIndex
|
||||
dir := stdpath.Dir(path)
|
||||
for {
|
||||
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
|
||||
var o model.Obj
|
||||
l, o, err = Link(ctx, storage, p, args)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l)
|
||||
if err != nil {
|
||||
if l.MFile != nil {
|
||||
_ = l.MFile.Close()
|
||||
}
|
||||
if l.RangeReadCloser != nil {
|
||||
_ = l.RangeReadCloser.Close()
|
||||
}
|
||||
for _, s := range ret {
|
||||
_ = s.Close()
|
||||
}
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
||||
}
|
||||
ret = append(ret, ss)
|
||||
index++
|
||||
}
|
||||
return obj, t, ret, nil
|
||||
}
|
||||
}
|
||||
|
||||
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
||||
@ -94,13 +150,17 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
||||
return obj, archiveMetaProvider, err
|
||||
}
|
||||
}
|
||||
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := ss.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
var e error
|
||||
for _, s := range ss {
|
||||
e = stderrors.Join(e, s.Close())
|
||||
}
|
||||
if e != nil {
|
||||
log.Errorf("failed to close file streamer, %v", e)
|
||||
}
|
||||
}()
|
||||
meta, err := t.GetMeta(ss, args.ArchiveArgs)
|
||||
@ -114,9 +174,9 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
||||
if !storage.Config().NoCache {
|
||||
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
|
||||
archiveMetaProvider.Expiration = &Expiration
|
||||
} else if ss.Link.MFile == nil {
|
||||
} else if ss[0].Link.MFile == nil {
|
||||
// alias、crypt 驱动
|
||||
archiveMetaProvider.Expiration = ss.Link.Expiration
|
||||
archiveMetaProvider.Expiration = ss[0].Link.Expiration
|
||||
}
|
||||
return obj, archiveMetaProvider, err
|
||||
}
|
||||
@ -188,13 +248,17 @@ func _listArchive(ctx context.Context, storage driver.Driver, path string, args
|
||||
return obj, files, err
|
||||
}
|
||||
}
|
||||
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := ss.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
var e error
|
||||
for _, s := range ss {
|
||||
e = stderrors.Join(e, s.Close())
|
||||
}
|
||||
if e != nil {
|
||||
log.Errorf("failed to close file streamer, %v", e)
|
||||
}
|
||||
}()
|
||||
files, err := t.List(ss, args.ArchiveInnerArgs)
|
||||
@ -378,8 +442,8 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
}
|
||||
|
||||
type streamWithParent struct {
|
||||
rc io.ReadCloser
|
||||
parent *stream.SeekableStream
|
||||
rc io.ReadCloser
|
||||
parents []*stream.SeekableStream
|
||||
}
|
||||
|
||||
func (s *streamWithParent) Read(p []byte) (int, error) {
|
||||
@ -387,24 +451,31 @@ func (s *streamWithParent) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (s *streamWithParent) Close() error {
|
||||
err1 := s.rc.Close()
|
||||
err2 := s.parent.Close()
|
||||
return stderrors.Join(err1, err2)
|
||||
err := s.rc.Close()
|
||||
for _, ss := range s.parents {
|
||||
err = stderrors.Join(err, ss.Close())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
_, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
_, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
rc, size, err := t.Extract(ss, args)
|
||||
if err != nil {
|
||||
if e := ss.Close(); e != nil {
|
||||
var e error
|
||||
for _, s := range ss {
|
||||
e = stderrors.Join(e, s.Close())
|
||||
}
|
||||
if e != nil {
|
||||
log.Errorf("failed to close file streamer, %v", e)
|
||||
err = stderrors.Join(err, e)
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
return &streamWithParent{rc: rc, parent: ss}, size, nil
|
||||
return &streamWithParent{rc: rc, parents: ss}, size, nil
|
||||
}
|
||||
|
||||
func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {
|
||||
|
@ -139,7 +139,7 @@ type RateLimitRangeReadCloser struct {
|
||||
Limiter Limiter
|
||||
}
|
||||
|
||||
func (rrc RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
func (rrc *RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go4.org/readerutil"
|
||||
)
|
||||
|
||||
type FileStream struct {
|
||||
@ -159,6 +160,10 @@ var _ model.FileStreamer = (*FileStream)(nil)
|
||||
//var _ seekableStream = (*FileStream)(nil)
|
||||
|
||||
// for most internal stream, which is either RangeReadCloser or MFile
|
||||
// Any functionality implemented based on SeekableStream should implement a Close method,
|
||||
// whose only purpose is to close the SeekableStream object. If such functionality has
|
||||
// additional resources that need to be closed, they should be added to the Closer property of
|
||||
// the SeekableStream object and be closed together when the SeekableStream object is closed.
|
||||
type SeekableStream struct {
|
||||
FileStream
|
||||
Link *model.Link
|
||||
@ -196,7 +201,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
||||
return &ss, nil
|
||||
}
|
||||
if ss.Link.RangeReadCloser != nil {
|
||||
ss.rangeReadCloser = RateLimitRangeReadCloser{
|
||||
ss.rangeReadCloser = &RateLimitRangeReadCloser{
|
||||
RangeReadCloserIF: ss.Link.RangeReadCloser,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
@ -208,7 +213,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc = RateLimitRangeReadCloser{
|
||||
rrc = &RateLimitRangeReadCloser{
|
||||
RangeReadCloserIF: rrc,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
@ -364,7 +369,7 @@ type RangeReadReadAtSeeker struct {
|
||||
ss *SeekableStream
|
||||
masterOff int64
|
||||
readers []*readerCur
|
||||
*headCache
|
||||
headCache *headCache
|
||||
}
|
||||
|
||||
type headCache struct {
|
||||
@ -406,7 +411,7 @@ func (c *headCache) read(p []byte) (n int, err error) {
|
||||
}
|
||||
return
|
||||
}
|
||||
func (r *headCache) close() error {
|
||||
func (r *headCache) Close() error {
|
||||
for i := range r.bufs {
|
||||
r.bufs[i] = nil
|
||||
}
|
||||
@ -419,6 +424,7 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
reader := r.readers[0]
|
||||
r.readers = r.readers[1:]
|
||||
r.headCache = &headCache{readerCur: reader}
|
||||
r.ss.Closers.Add(r.headCache)
|
||||
}
|
||||
}
|
||||
|
||||
@ -449,6 +455,18 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStr
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
|
||||
readers := make([]readerutil.SizeReaderAt, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
ra, err := NewReadAtSeeker(s, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readers = append(readers, io.NewSectionReader(ra, 0, s.GetSize()))
|
||||
}
|
||||
return readerutil.NewMultiReaderAt(readers...), nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
|
||||
return r.ss
|
||||
}
|
||||
@ -559,9 +577,6 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Close() error {
|
||||
if r.headCache != nil {
|
||||
_ = r.headCache.close()
|
||||
}
|
||||
return r.ss.Close()
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
@ -208,14 +209,30 @@ func FsArchiveList(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
type StringOrArray []string
|
||||
|
||||
func (s *StringOrArray) UnmarshalJSON(data []byte) error {
|
||||
var value string
|
||||
if err := json.Unmarshal(data, &value); err == nil {
|
||||
*s = []string{value}
|
||||
return nil
|
||||
}
|
||||
var sliceValue []string
|
||||
if err := json.Unmarshal(data, &sliceValue); err != nil {
|
||||
return err
|
||||
}
|
||||
*s = sliceValue
|
||||
return nil
|
||||
}
|
||||
|
||||
type ArchiveDecompressReq struct {
|
||||
SrcDir string `json:"src_dir" form:"src_dir"`
|
||||
DstDir string `json:"dst_dir" form:"dst_dir"`
|
||||
Name string `json:"name" form:"name"`
|
||||
ArchivePass string `json:"archive_pass" form:"archive_pass"`
|
||||
InnerPath string `json:"inner_path" form:"inner_path"`
|
||||
CacheFull bool `json:"cache_full" form:"cache_full"`
|
||||
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
|
||||
SrcDir string `json:"src_dir" form:"src_dir"`
|
||||
DstDir string `json:"dst_dir" form:"dst_dir"`
|
||||
Name StringOrArray `json:"name" form:"name"`
|
||||
ArchivePass string `json:"archive_pass" form:"archive_pass"`
|
||||
InnerPath string `json:"inner_path" form:"inner_path"`
|
||||
CacheFull bool `json:"cache_full" form:"cache_full"`
|
||||
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
|
||||
}
|
||||
|
||||
func FsArchiveDecompress(c *gin.Context) {
|
||||
@ -229,41 +246,51 @@ func FsArchiveDecompress(c *gin.Context) {
|
||||
common.ErrorResp(c, errs.PermissionDenied, 403)
|
||||
return
|
||||
}
|
||||
srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, req.Name))
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 403)
|
||||
return
|
||||
srcPaths := make([]string, 0, len(req.Name))
|
||||
for _, name := range req.Name {
|
||||
srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, name))
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 403)
|
||||
return
|
||||
}
|
||||
srcPaths = append(srcPaths, srcPath)
|
||||
}
|
||||
dstDir, err := user.JoinPath(req.DstDir)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 403)
|
||||
return
|
||||
}
|
||||
t, err := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{
|
||||
ArchiveInnerArgs: model.ArchiveInnerArgs{
|
||||
ArchiveArgs: model.ArchiveArgs{
|
||||
LinkArgs: model.LinkArgs{
|
||||
Header: c.Request.Header,
|
||||
Type: c.Query("type"),
|
||||
HttpReq: c.Request,
|
||||
tasks := make([]task.TaskExtensionInfo, 0, len(srcPaths))
|
||||
for _, srcPath := range srcPaths {
|
||||
t, e := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{
|
||||
ArchiveInnerArgs: model.ArchiveInnerArgs{
|
||||
ArchiveArgs: model.ArchiveArgs{
|
||||
LinkArgs: model.LinkArgs{
|
||||
Header: c.Request.Header,
|
||||
Type: c.Query("type"),
|
||||
HttpReq: c.Request,
|
||||
},
|
||||
Password: req.ArchivePass,
|
||||
},
|
||||
Password: req.ArchivePass,
|
||||
InnerPath: utils.FixAndCleanPath(req.InnerPath),
|
||||
},
|
||||
InnerPath: utils.FixAndCleanPath(req.InnerPath),
|
||||
},
|
||||
CacheFull: req.CacheFull,
|
||||
PutIntoNewDir: req.PutIntoNewDir,
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.WrongArchivePassword) {
|
||||
common.ErrorResp(c, err, 202)
|
||||
} else {
|
||||
common.ErrorResp(c, err, 500)
|
||||
CacheFull: req.CacheFull,
|
||||
PutIntoNewDir: req.PutIntoNewDir,
|
||||
})
|
||||
if e != nil {
|
||||
if errors.Is(e, errs.WrongArchivePassword) {
|
||||
common.ErrorResp(c, e, 202)
|
||||
} else {
|
||||
common.ErrorResp(c, e, 500)
|
||||
}
|
||||
return
|
||||
}
|
||||
if t != nil {
|
||||
tasks = append(tasks, t)
|
||||
}
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, gin.H{
|
||||
"task": getTaskInfo(t),
|
||||
"task": getTaskInfos(tasks),
|
||||
})
|
||||
}
|
||||
|
||||
@ -376,7 +403,7 @@ func ArchiveInternalExtract(c *gin.Context) {
|
||||
func ArchiveExtensions(c *gin.Context) {
|
||||
var ext []string
|
||||
for key := range tool.Tools {
|
||||
ext = append(ext, strings.TrimPrefix(key, "."))
|
||||
ext = append(ext, key)
|
||||
}
|
||||
common.SuccessResp(c, ext)
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user