Merge 26d89becbfdaf16d14410419688851f155595f5e into 0010090d0517976248894bbd48a9bb1ac5bc0182

This commit is contained in:
tbodt 2025-02-26 18:10:07 +05:30 committed by GitHub
commit c8f218b04e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 3033 additions and 5 deletions

View File

@ -14,6 +14,7 @@ import (
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/cryptomator"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"

View File

@ -0,0 +1,859 @@
// Package cryptomator provides wrappers for Fs and Object which implement Cryptomator encryption
package cryptomator
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/google/uuid"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/dircache"
)
// Errors
var (
errorMetaTooBig = errors.New("metadata file is too big")
)
const (
dirIDC9r = "dir.c9r"
dirIDBackupC9r = "dirid.c9r"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cryptomator",
Description: "Encrypt/Decrypt Cryptomator-format vaults",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: "Remote to use as a Cryptomator vault.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "password",
Help: "Password for Cryptomator vault.",
IsPassword: true,
Required: true,
}},
})
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
remote := opt.Remote
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point cryptomator remote at itself - check the value of the remote setting")
}
wrappedFs, err := cache.Get(ctx, remote)
if err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
// Remove slashes on start or end, which would otherwise confuse the dirCache (as is documented on dircache.SplitPath).
root = strings.Trim(root, "/")
f := &Fs{
wrapped: wrappedFs,
name: name,
root: root,
opt: *opt,
}
cache.PinUntilFinalized(f.wrapped, f)
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
ReadDirMetadata: true,
WriteDirMetadata: true,
UserDirMetadata: true,
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Cryptomator's obfuscated directory structure can always support empty directories
f.features.CanHaveEmptyDirectories = true
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, fmt.Errorf("failed to decrypt password: %w", err)
}
err = f.loadOrCreateVault(ctx, password)
if err != nil {
return nil, err
}
f.cryptor, err = newCryptor(f.masterKey, f.vaultConfig.CipherCombo)
if err != nil {
return nil, err
}
// Make sure the root directory exists
rootDirID := f.dirIDPath("")
// TODO: make directory ID backup
err = f.wrapped.Mkdir(ctx, rootDirID)
if err != nil {
return nil, fmt.Errorf("failed to create root dir at %q: %s", rootDirID, err)
}
f.dirCache = dircache.New(root, "", f)
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, "", &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, fmt.Errorf("incomprehensible error while checking for whether the root at %q is a file: %w", root, err)
}
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
Password string `config:"password"`
}
// Fs wraps another fs and encrypts the directory
// structure, filenames, and file contents as outlined
// in https://docs.cryptomator.org/en/latest/security/architecture/
type Fs struct {
wrapped fs.Fs
name string
root string
opt Options
features *fs.Features
wrapper fs.Fs
masterKey masterKey
vaultConfig vaultConfig
cryptor
dirCache *dircache.DirCache
}
// -------- fs.Info
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Cryptomator vault '%s:%s'", f.Name(), f.Root())
}
// Precision of the remote
func (f *Fs) Precision() time.Duration { return f.wrapped.Precision() }
// Hashes returns nothing as the hashes returned by the backend would be of encrypted data, not plaintext
// TODO: does cryptomator have plaintext hashes readily available?
func (f *Fs) Hashes() hash.Set { return hash.NewHashSet() }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// -------- Directories
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
dirID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
dirPath := f.dirIDPath(dirID)
encryptedEntries, err := f.wrapped.List(ctx, dirPath)
if err != nil {
return nil, err
}
for _, entry := range encryptedEntries {
encryptedFilename := path.Base(entry.Remote())
encryptedFilename, ok := strings.CutSuffix(encryptedFilename, ".c9r")
if !ok {
continue
}
if encryptedFilename == "dirid" {
continue
}
filename, err := f.decryptFilename(encryptedFilename, dirID)
if err != nil {
return nil, fmt.Errorf("failed to decrypt filename %q: %w", encryptedFilename, err)
}
remote := path.Join(dir, filename)
switch entry := entry.(type) {
case fs.Directory:
// Get the path of the real directory from dir.c9r.
dirID, err := f.readSmallFile(ctx, path.Join(entry.Remote(), dirIDC9r), 100)
if err != nil {
return nil, err
}
dirIDPath := f.dirIDPath(string(dirID))
// Turning that path into an fs.Directory is really annoying. The only thing in the standard Fs interface that returns fs.Directory objects is List, so We have to list the parent.
dirIDParent, dirIDLeaf := path.Split(dirIDPath)
subEntries, err := f.wrapped.List(ctx, dirIDParent)
if err != nil {
return nil, err
}
var realDir fs.Directory
for i := range subEntries {
dir, ok := subEntries[i].(fs.Directory)
if ok && path.Base(dir.Remote()) == dirIDLeaf {
realDir = dir
break
}
}
if realDir == nil {
err = fmt.Errorf("couldn't find %q in listing of %q (has directory been removed?)", dirIDLeaf, dirIDParent)
}
if err != nil {
return nil, err
}
entries = append(entries, &Directory{DirWrapper: fs.NewDirWrapper(remote, realDir), f: f})
case fs.Object:
entries = append(entries, &DecryptingObject{Object: entry, f: f, decRemote: remote})
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
}
}
return
}
// FindLeaf finds a child of name leaf in the directory with id pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
subdirIDFile := path.Join(f.leafPath(leaf, pathID), dirIDC9r)
subdirID, err := f.readSmallFile(ctx, subdirIDFile, 100)
if errors.Is(err, fs.ErrorObjectNotFound) {
// If the directory doesn't exist, return found=false and no error to let the DirCache create the directory if it wants.
err = nil
return
}
if err != nil {
err = fmt.Errorf("failed to read ID of subdir from %q: %w", subdirIDFile, err)
return
}
pathIDOut = string(subdirID)
found = true
return
}
// CreateDir creates a directory at the request of the DirCache
func (f *Fs) CreateDir(ctx context.Context, parentID string, leaf string) (newID string, err error) {
leafPath := f.leafPath(leaf, parentID)
newID = uuid.NewString()
dirPath := f.dirIDPath(newID)
// Put directory ID backup file, thus creating the directory
data := f.encryptReader(bytes.NewBuffer([]byte(newID)))
info := object.NewStaticObjectInfo(path.Join(dirPath, dirIDBackupC9r), time.Now(), -1, true, nil, nil)
_, err = f.wrapped.Put(ctx, data, info)
if err != nil {
return
}
// Write pointer to directory
// XXX if someone else attempts to create the same directory at the same time, one of them will win and the other will get an orphaned directory.
// Without an atomic "create if not exists" for this next writeSmallFile operation, this can't be fixed.
err = f.writeSmallFile(ctx, path.Join(leafPath, dirIDC9r), []byte(newID))
return
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
return err
}
// MkdirMetadata makes the directory passed in as dir.
//
// It shouldn't return an error if it already exists.
//
// If the metadata is not nil it is set.
//
// It returns the directory that was created.
func (f *Fs) MkdirMetadata(ctx context.Context, dirPath string, metadata fs.Metadata) (dir fs.Directory, err error) {
do := f.wrapped.Features().MkdirMetadata
if do == nil {
return nil, errorNotSupportedByUnderlyingRemote
}
// First create the directory normally, then call MkdirMetadata to update its metadata.
// This is for a really silly reason: if you call MkdirMetadata first, creating dirid.c9r will reset the mtime! which is one of the things that can be set in the metadata.
dirID, err := f.dirCache.FindDir(ctx, dirPath, true)
if err != nil {
return nil, err
}
dir, err = do(ctx, f.dirIDPath(dirID), metadata)
if dir != nil {
dir = &Directory{DirWrapper: fs.NewDirWrapper(dirPath, dir), f: f}
}
if err != nil {
return
}
return
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
dirID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return fmt.Errorf("failed to find ID for directory %q: %w", dir, err)
}
leaf, parentID, err := f.dirCache.FindPath(ctx, dir, false)
if err != nil {
return fmt.Errorf("failed to find ID for parent of directory %q: %w", dir, err)
}
// These need to get deleted, in this order
var (
// The dirid.c9r backup is likely in every directory and needs to be deleted before the directory.
dirIDBackup string
// Now the directory. But, if this fails (e.g. due to the directory not being empty), we need to go recreate the dir ID backup!
dirPath string
// Finally the pointer to the directory. First the file
dirPointerFile string
// Then the directory containing the pointer
dirPointerPath string
)
dirPath = f.dirIDPath(dirID)
dirIDBackup = path.Join(dirPath, dirIDBackupC9r)
dirPointerPath = f.leafPath(leaf, parentID)
dirPointerFile = path.Join(dirPointerPath, dirIDC9r)
// Quick check for if the directory is empty - someone else could create a file between this and the final rmdir, so we still need that code that recreates the dir ID backup!
entries, err := f.wrapped.List(ctx, dirPath)
if err != nil {
return err
}
empty := true
for _, entry := range entries {
if path.Base(entry.Remote()) != dirIDBackupC9r {
empty = false
break
}
}
if !empty {
return fs.ErrorDirectoryNotEmpty
}
// Now delete them
// dirIDBackup
obj, err := f.wrapped.NewObject(ctx, dirIDBackup)
if err == nil {
err = obj.Remove(ctx)
}
if err != nil && !errors.Is(err, fs.ErrorObjectNotFound) {
return fmt.Errorf("couldn't remove dir id backup: %w", err)
}
// dirPath
err = f.wrapped.Rmdir(ctx, dirPath)
if err != nil {
err = fmt.Errorf("failed to rmdir: %w", err)
// put the directory ID backup back!
data := f.encryptReader(bytes.NewBuffer([]byte(dirID)))
info := object.NewStaticObjectInfo(path.Join(dirPath, dirIDBackupC9r), time.Now(), -1, true, nil, nil)
_, err2 := f.wrapped.Put(ctx, data, info)
if err2 != nil {
err = fmt.Errorf("%w (also failed to restore dir id backup: %w)", err, err2)
}
return err
}
// dirPointerFile
obj, err = f.wrapped.NewObject(ctx, dirPointerFile)
if err == nil {
err = obj.Remove(ctx)
}
// dirPointerPath
if err == nil {
err = f.wrapped.Rmdir(ctx, dirPointerPath)
}
if err != nil {
return fmt.Errorf("couldn't rmdir dir pointer %q: %w", dirPointerFile, err)
}
f.dirCache.FlushDir(dir)
return nil
}
// -------- fs.Directory
// Directory wraps the underlying fs.Directory, the one named with a hash of the encrypted directory ID that contains the subnodes and the dirid.c9r backup, not the little directory in its parent that just has dir.c9r.
type Directory struct {
*fs.DirWrapper
f *Fs
}
// Fs returns read only access to the Fs that this object is part of
func (d *Directory) Fs() fs.Info { return d.f }
// -------- Objects
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
//
// If remote points to a directory then it should return
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if errors.Is(err, fs.ErrorDirNotFound) {
return nil, fs.ErrorObjectNotFound
}
return nil, fmt.Errorf("failed to find ID for directory of file %q: %w", remote, err)
}
encryptedPath := f.leafPath(leaf, dirID)
wrappedObj, err := f.wrapped.NewObject(ctx, encryptedPath)
if err != nil {
return nil, err
}
return f.newDecryptingObject(wrappedObj, remote), nil
}
// DecryptingObject wraps the underlying fs.Object and handles decrypting it
type DecryptingObject struct {
fs.Object
f *Fs
decRemote string
}
func (f *Fs) newDecryptingObject(o fs.Object, decRemote string) *DecryptingObject {
return &DecryptingObject{
Object: o,
f: f,
decRemote: decRemote,
}
}
// TODO: override all relevant methods
// Fs returns read only access to the Fs that this object is part of
func (o *DecryptingObject) Fs() fs.Info { return o.f }
// Remote returns the decrypted remote path
func (o *DecryptingObject) Remote() string { return o.decRemote }
// String returns a description of the object
func (o *DecryptingObject) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Size returns the size of the object after being decrypted
func (o *DecryptingObject) Size() int64 {
return o.f.decryptedFileSize(o.Object.Size())
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
//
// This calls Open on the object of the underlying remote with fs.SeekOption
// and fs.RangeOption removes. This is strictly necessary as the file header
// contains all the information to decrypt the file.
//
// TODO: Since the files are encrypted in 32kb chunks, it would be possible to
// support real seek and range requests. However, it would be necessary to make
// two requests, one for the file header and one for the requested range.
//
// We wrap the reader of the underlying object to decrypt the data.
// - For fs.SeekOption we just discard all the bytes until we reach the Offset
// - For fs.RangeOption we do the same and then wrap the reader in io.LimitReader
func (o *DecryptingObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var offset, limit int64 = 0, -1
var newOptions []fs.OpenOption
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
newOptions = append(newOptions, option)
}
}
options = newOptions
reader, err := o.Object.Open(ctx, options...)
defer func() {
if err != nil && reader != nil {
_ = reader.Close()
}
}()
if err != nil {
return nil, err
}
var decryptReader io.Reader
decryptReader, err = o.f.newReader(reader)
if err != nil {
return nil, err
}
if _, err = io.CopyN(io.Discard, decryptReader, offset); err != nil {
return nil, err
}
if limit != -1 {
decryptReader = io.LimitReader(decryptReader, limit)
}
return struct {
io.Reader
io.Closer
}{
Reader: decryptReader,
Closer: reader,
}, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *DecryptingObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
encIn := o.f.encryptReader(in)
encSrc := &EncryptingObjectInfo{
ObjectInfo: src,
f: o.f,
encRemote: o.Object.Remote(),
}
return o.Object.Update(ctx, encIn, encSrc, options...)
}
// Hash returns no checksum as it is not possible to quickly obtain a hash of the plaintext of an encrypted file
func (o *DecryptingObject) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// -------- Put
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
encIn := f.encryptReader(in)
leaf, dirID, err := f.dirCache.FindPath(ctx, src.Remote(), true)
if err != nil {
return nil, err
}
encRemotePath := f.leafPath(leaf, dirID)
encSrc := &EncryptingObjectInfo{
ObjectInfo: src,
f: f,
encRemote: encRemotePath,
}
obj, err := put(ctx, encIn, encSrc, options...)
if obj != nil {
obj = f.newDecryptingObject(obj, src.Remote())
}
return obj, err
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.wrapped.Put)
}
// PutUnchecked uploads to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.wrapped.Features().PutUnchecked
if do == nil {
return nil, errorNotSupportedByUnderlyingRemote
}
return f.put(ctx, in, src, options, do)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.wrapped.Features().PutStream
if do == nil {
return nil, errorNotSupportedByUnderlyingRemote
}
return f.put(ctx, in, src, options, do)
}
// EncryptingObjectInfo wraps the ObjectInfo provided to Put and transforms its attributes to match the encrypted version of the file.
type EncryptingObjectInfo struct {
fs.ObjectInfo
f *Fs
encRemote string
}
// Fs returns read only access to the Fs that this object is part of
func (i *EncryptingObjectInfo) Fs() fs.Info { return i.f }
// Remote returns the encrypted remote path
func (i *EncryptingObjectInfo) Remote() string { return i.encRemote }
// String returns a description of the Object
func (i *EncryptingObjectInfo) String() string {
if i == nil {
return "<nil>"
}
return i.encRemote
}
// Size returns the size of the object after being encrypted
func (i *EncryptingObjectInfo) Size() int64 {
return i.f.encryptedFileSize(i.ObjectInfo.Size())
}
// Hash returns no checksum as it is not possible to quickly obtain a hash of the plaintext of an encrypted file
func (i *EncryptingObjectInfo) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Copy src to this remote using server-side copy operations.
//
// # This is stored with the remote path given
//
// # It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
//
// Cryptomator: Can just pass through the copy operation, since the encryption of file contents is independent of the directory.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.wrapped.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*DecryptingObject)
if !ok {
return nil, fs.ErrorCantCopy
}
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
}
encryptedPath := f.leafPath(leaf, dirID)
obj, err := do(ctx, o.Object, encryptedPath)
if obj != nil {
obj = f.newDecryptingObject(obj, remote)
}
return obj, err
}
// Move src to this remote using server-side move operations.
//
// # This is stored with the remote path given
//
// # It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
//
// Cryptomator: Can just pass through the move operation, since the encryption of file contents is independent of the directory.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.wrapped.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*DecryptingObject)
if !ok {
return nil, fs.ErrorCantMove
}
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
}
encryptedPath := f.leafPath(leaf, dirID)
obj, err := do(ctx, o.Object, encryptedPath)
if obj != nil {
obj = f.newDecryptingObject(obj, remote)
}
return obj, err
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
do := f.wrapped.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// TODO: It would be almost as easy to implement this operation without server-side support, by deleting and recreating the dir.c9r file (though it wouldn't be atomic.)
_, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcEncPath := f.leafPath(srcLeaf, srcDirectoryID)
dstEncPath := f.leafPath(dstLeaf, dstDirectoryID)
err = do(ctx, srcFs.wrapped, srcEncPath, dstEncPath)
if err != nil {
return err
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// -------- private
// dirIDPath returns the encrypted path to the directory with a given ID.
func (f *Fs) dirIDPath(dirID string) string {
encryptedDirID := f.encryptDirID(dirID)
dirPath := path.Join("d", encryptedDirID[:2], encryptedDirID[2:])
// TODO: verify that dirid.c9r inside the directory contains dirID
return dirPath
}
// leafPath returns the encrypted path to a leaf node with the given name in the directory with the given ID.
func (f *Fs) leafPath(leaf, dirID string) string {
dirPath := f.dirIDPath(dirID)
encryptedFilename := f.encryptFilename(leaf, dirID)
return path.Join(dirPath, encryptedFilename+".c9r")
}
// encryptReader returns a reader that produces an encrypted version of the data in r, suitable for storing directly in the wrapped filesystem.
func (f *Fs) encryptReader(r io.Reader) io.Reader {
pipeReader, pipeWriter := io.Pipe()
go func() {
encWriter, err := f.newWriter(pipeWriter)
if err != nil {
pipeWriter.CloseWithError(err)
return
}
if _, err = io.Copy(encWriter, r); err != nil {
pipeWriter.CloseWithError(err)
return
}
pipeWriter.CloseWithError(encWriter.Close())
}()
return pipeReader
}
// readSmallFile reads a file in full from the wrapped filesystem and returns it as bytes.
func (f *Fs) readSmallFile(ctx context.Context, path string, maxLen int64) ([]byte, error) {
obj, err := f.wrapped.NewObject(ctx, path)
if err != nil {
return nil, err
}
if obj.Size() > maxLen {
return nil, errorMetaTooBig
}
reader, err := obj.Open(ctx)
if err != nil {
return nil, err
}
data, err := io.ReadAll(reader)
_ = reader.Close()
return data, err
}
// writeSmallFile writes a byte slice to a file in the wrapped filesystem.
func (f *Fs) writeSmallFile(ctx context.Context, path string, data []byte) error {
info := object.NewStaticObjectInfo(path, time.Now(), int64(len(data)), true, nil, nil)
_, err := f.wrapped.Put(ctx, bytes.NewReader(data), info)
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)
// TODO: implement OpenChunkWriter. It's entirely possible to encrypt chunks of a file in parallel.
)

View File

@ -0,0 +1,263 @@
// Test Cryptomator filesystem interface
package cryptomator_test
import (
"bufio"
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/cryptomator"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/rc/webgui"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/file"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/webdav"
)
var (
UnimplementableFsMethods = []string{
// TODO: implement these:
// It's not possible to complete this in one call, but Purge could still be implemented more efficiently than the fallback by
// recursing and deleting a full directory at a time (instead of each file individually.)
"Purge",
// MergeDirs could be implemented by merging the underlying directories, while taking care to leave the dirid.c9r alone.
"MergeDirs",
// OpenWriterAt could be implemented by a strategy such as: to write to a chunk, read and decrypt it, handle all writes, then reencrypt and upload.
"OpenWriterAt",
// OpenChunkWriter could be implemented, at least if the backend's chunk size is a multiple of Cryptomator's chunk size.
"OpenChunkWriter",
// Having ListR on the backend doesn't help at all for implementing it in Cryptomator.
"ListR",
// ChangeNotify would have to undo the dir to dir ID conversion, which is lossy. It can be done, but not without scanning and caching the full hierarchy.
"ChangeNotify",
}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*cryptomator.DecryptingObject)(nil),
TiersToTest: []string{"REDUCED_REDUNDANCY", "STANDARD"},
UnimplementableFsMethods: UnimplementableFsMethods,
})
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-cryptomator-test-standard")
name := "TestCryptomator"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cryptomator.DecryptingObject)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "cryptomator"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
},
QuickTestOK: true,
UnimplementableFsMethods: UnimplementableFsMethods,
})
}
func runCryptomator(ctx context.Context, t *testing.T, vaultPath string, password string) string {
// Download
cryptomatorCliDownload := map[string]map[string]string{
"darwin": {
"arm64": "https://github.com/cryptomator/cli/releases/download/0.6.1/cryptomator-cli-0.6.1-mac-arm64.zip",
"amd64": "https://github.com/cryptomator/cli/releases/download/0.6.1/cryptomator-cli-0.6.1-mac-x64.zip",
},
"linux": {
"arm64": "https://github.com/cryptomator/cli/releases/download/0.6.1/cryptomator-cli-0.6.1-linux-arm64.zip",
"amd64": "https://github.com/cryptomator/cli/releases/download/0.6.1/cryptomator-cli-0.6.1-linux-x64.zip",
},
}
var dlURL string
if archMap, ok := cryptomatorCliDownload[runtime.GOOS]; ok {
if url, ok := archMap[runtime.GOARCH]; ok {
dlURL = url
}
}
if dlURL == "" {
t.Skipf("no cryptomator download available for GOOS=%s GOOARCH=%s, skipping", runtime.GOOS, runtime.GOARCH)
}
cacheDir := filepath.Join(config.GetCacheDir(), "test-cryptomator")
zipPath := filepath.Join(cacheDir, path.Base(dlURL))
extractDir := filepath.Join(cacheDir, "bin")
err := file.MkdirAll(cacheDir, 0755)
require.NoError(t, err)
if _, err := os.Stat(zipPath); err != nil {
t.Logf("will download cryptomator from %s to %s", dlURL, zipPath)
err = os.RemoveAll(zipPath)
require.NoError(t, err)
err = webgui.DownloadFile(zipPath, dlURL)
require.NoError(t, err)
err = os.RemoveAll(extractDir)
require.NoError(t, err)
}
if _, err := os.Stat(extractDir); err != nil {
t.Logf("will extract from %s to %s", zipPath, extractDir)
err = webgui.Unzip(zipPath, extractDir)
require.NoError(t, err)
}
t.Logf("have cryptomator cli at %q", extractDir)
// Run
var exe string
switch runtime.GOOS {
case "darwin":
exe = filepath.Join(extractDir, "cryptomator-cli.app", "Contents", "MacOS", "cryptomator-cli")
case "linux":
exe = filepath.Join(extractDir, "cryptomator-cli", "bin", "cryptomator-cli")
}
err = os.Chmod(exe, 0755)
require.NoError(t, err)
cmd := exec.CommandContext(
ctx,
exe,
"unlock", vaultPath,
"--mounter=org.cryptomator.frontend.webdav.mount.FallbackMounter",
"--password:env=CRYPTOMATOR_PASSWORD",
)
cmd.Env = append(cmd.Env, fmt.Sprintf("CRYPTOMATOR_PASSWORD=%s", password))
cmd.Stderr = os.Stderr
stdout, err := cmd.StdoutPipe()
require.NoError(t, err)
err = cmd.Start()
require.NoError(t, err)
re := regexp.MustCompile(`Unlocked and mounted vault successfully to (\S+)`)
webdavURL := make(chan string)
go func() {
scanner := bufio.NewScanner(stdout)
done := false
for scanner.Scan() {
t.Log(scanner.Text())
if done {
continue
}
matches := re.FindSubmatch([]byte(scanner.Text()))
if matches != nil {
webdavURL <- string(matches[1])
done = true
}
}
}()
return <-webdavURL
}
// TestAgainstCryptomator tests rclone against the Cryptomator CLI
func TestAgainstCryptomator(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
localPath, err := fstest.LocalRemote()
require.NoError(t, err)
password := "potato"
t.Cleanup(func() {
_ = os.RemoveAll(localPath)
})
fstest.Initialise()
config.FileSetValue("TestCryptomatorRclone", "type", "cryptomator")
config.FileSetValue("TestCryptomatorRclone", "remote", localPath)
config.FileSetValue("TestCryptomatorRclone", "password", obscure.MustObscure(password))
rcloneFs, err := fs.NewFs(ctx, "TestCryptomatorRclone:")
require.NoError(t, err)
webdavURL := runCryptomator(ctx, t, localPath, password)
config.FileSetValue("TestCryptomatorCli", "type", "webdav")
config.FileSetValue("TestCryptomatorCli", "url", webdavURL)
cryptomFs, err := fs.NewFs(ctx, "TestCryptomatorCli:")
require.NoError(t, err)
check := func(items []fstest.Item, dirs []string) {
t.Logf("comparing %v with %v", cryptomFs, rcloneFs)
fstest.CheckListingWithPrecision(t, rcloneFs, items, dirs, fs.GetModifyWindow(ctx, cryptomFs))
fstest.CheckListingWithPrecision(t, cryptomFs, items, dirs, fs.GetModifyWindow(ctx, cryptomFs))
buf := &bytes.Buffer{}
err = operations.CheckDownload(ctx, &operations.CheckOpt{
Fdst: cryptomFs,
Fsrc: rcloneFs,
Combined: buf,
})
scan := bufio.NewScanner(buf)
for scan.Scan() {
line := scan.Text()
if strings.HasPrefix(line, "= ") {
t.Log(line)
continue
}
t.Error(line)
}
assert.NoError(t, err)
t.Logf("matched %v %v", items, dirs)
}
put := func(fs fs.Fs, path string, content string) (fstest.Item, fs.Object) {
now := time.Now()
obj, err := fs.Put(ctx, bytes.NewBufferString(content), object.NewStaticObjectInfo(path, time.Now(), -1, true, nil, nil))
assert.NoError(t, err)
item := fstest.NewItem(path, content, now)
return item, obj
}
get := func(fs fs.Fs, path string) fs.Object {
obj, err := fs.NewObject(ctx, path)
assert.NoError(t, err)
return obj
}
rclone1, _ := put(rcloneFs, "rclone1", "testing 123")
cryptom1, _ := put(cryptomFs, "cryptom1", "testing 456")
check([]fstest.Item{rclone1, cryptom1}, []string{})
err = rcloneFs.Mkdir(ctx, "rclone2")
assert.NoError(t, err)
err = cryptomFs.Mkdir(ctx, "cryptom2")
assert.NoError(t, err)
check([]fstest.Item{rclone1, cryptom1}, []string{"rclone2", "cryptom2"})
_, err = rcloneFs.Features().Move(ctx, get(rcloneFs, "cryptom1"), "rclone2/cryptom1")
assert.NoError(t, err)
rclone1.Path = "cryptom2/rclone1"
_, err = cryptomFs.Features().Move(ctx, get(cryptomFs, "rclone1"), "cryptom2/rclone1")
assert.NoError(t, err)
cryptom1.Path = "rclone2/cryptom1"
check([]fstest.Item{rclone1, cryptom1}, []string{"rclone2", "cryptom2"})
err = get(cryptomFs, rclone1.Path).Remove(ctx)
assert.NoError(t, err)
check([]fstest.Item{cryptom1}, []string{"rclone2", "cryptom2"})
err = get(rcloneFs, cryptom1.Path).Remove(ctx)
assert.NoError(t, err)
check([]fstest.Item{}, []string{"rclone2", "cryptom2"})
}

View File

@ -0,0 +1,196 @@
package cryptomator
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"encoding/base32"
"encoding/base64"
"encoding/binary"
"fmt"
"hash"
"github.com/miscreant/miscreant.go"
)
const (
// cipherComboSivGcm uses AES-SIV for filenames and AES-GCM for contents. It is the current Cryptomator default.
cipherComboSivGcm = "SIV_GCM"
// cipherComboSivCtrMac uses AES-SIV for filenames and AES-CTR plus an HMAC for contents. It was the default until Cryptomator 1.7.
cipherComboSivCtrMac = "SIV_CTRMAC"
)
// cryptor implements encryption operations for Cryptomator vaults.
type cryptor struct {
masterKey masterKey
sivKey []byte
cipherCombo string
contentCryptor
}
type contentCryptor interface {
encryptChunk(plaintext, nonce, additionalData []byte) (ciphertext []byte)
decryptChunk(ciphertext, additionalData []byte) ([]byte, error)
fileAssociatedData(fileNonce []byte, chunkNr uint64) []byte
nonceSize() int
tagSize() int
}
// newCryptor creates a new cryptor from vault configuration.
func newCryptor(key masterKey, cipherCombo string) (c cryptor, err error) {
c.masterKey = key
c.sivKey = append(key.MacKey, key.EncryptKey...)
c.cipherCombo = cipherCombo
c.contentCryptor, err = c.newContentCryptor(key.EncryptKey)
if err != nil {
return
}
return
}
func (c *cryptor) newSIV() *miscreant.Cipher {
siv, err := miscreant.NewAESCMACSIV(c.sivKey)
if err != nil {
panic(err)
}
return siv
}
// encryptDirID encrypts a directory ID.
func (c *cryptor) encryptDirID(dirID string) string {
ciphertext, err := c.newSIV().Seal(nil, []byte(dirID))
if err != nil {
// Seal can only actually fail if you pass in more than 126 associated data items.
panic(err)
}
hash := sha1.Sum(ciphertext)
return base32.StdEncoding.EncodeToString(hash[:])
}
// encryptFilename encrypts a filename.
func (c *cryptor) encryptFilename(filename string, dirID string) string {
ciphertext, err := c.newSIV().Seal(nil, []byte(filename), []byte(dirID))
if err != nil {
// Seal can only actually fail if you pass in more than 126 associated data items.
panic(err)
}
return base64.URLEncoding.EncodeToString(ciphertext)
}
// decryptFilename decrypts a filename.
func (c *cryptor) decryptFilename(filename string, dirID string) (string, error) {
filenameBytes, err := base64.URLEncoding.DecodeString(filename)
if err != nil {
return "", err
}
plaintext, err := c.newSIV().Open(nil, filenameBytes, []byte(dirID))
if err != nil {
return "", err
}
return string(plaintext), nil
}
func (c *cryptor) newContentCryptor(key []byte) (contentCryptor, error) {
aes, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
switch c.cipherCombo {
default:
return nil, fmt.Errorf("unsupported cipher combo %q", c.cipherCombo)
case cipherComboSivGcm:
aesGcm, err := cipher.NewGCM(aes)
if err != nil {
return nil, err
}
return &gcmCryptor{aesGcm}, nil
case cipherComboSivCtrMac:
return &ctrMacCryptor{aes: aes, hmacKey: c.masterKey.MacKey}, nil
}
}
// encryptionOverhead returns how much longer a payload of the given size would be made by EncryptChunk.
func (c *cryptor) encryptionOverhead() int {
return c.nonceSize() + c.tagSize()
}
type gcmCryptor struct {
aesGcm cipher.AEAD
}
func (*gcmCryptor) nonceSize() int { return 12 }
func (*gcmCryptor) tagSize() int { return 16 }
func (c *gcmCryptor) encryptChunk(payload, nonce, additionalData []byte) (ciphertext []byte) {
buf := bytes.Buffer{}
buf.Write(nonce)
buf.Write(c.aesGcm.Seal(nil, nonce, payload, additionalData))
return buf.Bytes()
}
func (c *gcmCryptor) decryptChunk(chunk, additionalData []byte) ([]byte, error) {
nonce := chunk[:c.nonceSize()]
return c.aesGcm.Open(nil, nonce, chunk[c.nonceSize():], additionalData)
}
func (c *gcmCryptor) fileAssociatedData(fileNonce []byte, chunkNr uint64) []byte {
buf := bytes.Buffer{}
_ = binary.Write(&buf, binary.BigEndian, chunkNr)
buf.Write(fileNonce)
return buf.Bytes()
}
type ctrMacCryptor struct {
aes cipher.Block
hmacKey []byte
}
func (*ctrMacCryptor) nonceSize() int { return 16 }
func (*ctrMacCryptor) tagSize() int { return 32 }
func (c *ctrMacCryptor) newCTR(nonce []byte) cipher.Stream { return cipher.NewCTR(c.aes, nonce) }
func (c *ctrMacCryptor) newHMAC() hash.Hash { return hmac.New(sha256.New, c.hmacKey) }
func (c *ctrMacCryptor) encryptChunk(payload, nonce, additionalData []byte) (ciphertext []byte) {
c.newCTR(nonce).XORKeyStream(payload, payload)
buf := bytes.Buffer{}
buf.Write(nonce)
buf.Write(payload)
hash := c.newHMAC()
hash.Write(additionalData)
hash.Write(buf.Bytes())
buf.Write(hash.Sum(nil))
return buf.Bytes()
}
func (c *ctrMacCryptor) decryptChunk(chunk, additionalData []byte) ([]byte, error) {
startMac := len(chunk) - c.tagSize()
mac := chunk[startMac:]
chunk = chunk[:startMac]
hash := c.newHMAC()
hash.Write(additionalData)
hash.Write(chunk)
if !hmac.Equal(mac, hash.Sum(nil)) {
return nil, fmt.Errorf("hmac failed")
}
nonce := chunk[:c.nonceSize()]
chunk = chunk[c.nonceSize():]
c.newCTR(nonce).XORKeyStream(chunk, chunk)
return chunk, nil
}
func (c *ctrMacCryptor) fileAssociatedData(fileNonce []byte, chunkNr uint64) []byte {
buf := bytes.Buffer{}
buf.Write(fileNonce)
_ = binary.Write(&buf, binary.BigEndian, chunkNr)
return buf.Bytes()
}

View File

@ -0,0 +1,40 @@
package cryptomator
import (
"testing"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
var cipherCombos = []string{cipherComboSivCtrMac, cipherComboSivGcm}
func drawCipherCombo(t *rapid.T) string {
return rapid.SampledFrom(cipherCombos).Draw(t, "cipherCombo")
}
func drawMasterKey(t *rapid.T) masterKey {
encKey := fixedSizeByteArray(masterEncryptKeySize).Draw(t, "encKey")
macKey := fixedSizeByteArray(masterMacKeySize).Draw(t, "macKey")
return masterKey{EncryptKey: encKey, MacKey: macKey}
}
func drawTestCryptor(t *rapid.T) *cryptor {
cryptor, err := newCryptor(drawMasterKey(t), drawCipherCombo(t))
assert.NoError(t, err, "creating cryptor")
return &cryptor
}
func TestEncryptDecryptFilename(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
name := rapid.String().Draw(t, "name")
dirID := rapid.String().Draw(t, "dirID")
cryptor := drawTestCryptor(t)
encName := cryptor.encryptFilename(name, dirID)
decName, err := cryptor.decryptFilename(encName, dirID)
assert.NoError(t, err, "decryption error")
assert.Equal(t, name, decName)
})
}

View File

@ -0,0 +1,106 @@
package cryptomator
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"io"
"unsafe"
)
// fileHeader is the header of an encrypted Cryptomator file.
type fileHeader struct {
// The nonce used to encrypt the file header. Each file content chunk, while encrypted with its own nonce, also mixes the file header nonce into the MAC.
Nonce []byte
Reserved []byte
// The AES key used to encrypt the file contents.
ContentKey []byte
}
const (
// headerContentKeySize is the size of the ContentKey in the FileHeader.
headerContentKeySize = 32
// headerReservedSize is the size of the Reserved data in the FileHeader.
headerReservedSize = 8
// headerPayloadSize is the size of the encrypted part of the file header.
headerPayloadSize = headerContentKeySize + headerReservedSize
// headerReservedValue is the expected value of the Reserved data.
headerReservedValue uint64 = 0xFFFFFFFFFFFFFFFF
)
// NewHeader creates a new randomly initialized FileHeader
func (c *cryptor) NewHeader() (header fileHeader, err error) {
header.Nonce = make([]byte, c.nonceSize())
header.ContentKey = make([]byte, headerContentKeySize)
header.Reserved = make([]byte, headerReservedSize)
if _, err = rand.Read(header.Nonce); err != nil {
return
}
if _, err = rand.Read(header.ContentKey); err != nil {
return
}
binary.BigEndian.PutUint64(header.Reserved, headerReservedValue)
return
}
type headerPayload struct {
Reserved [headerReservedSize]byte
ContentKey [headerContentKeySize]byte
}
var _ [0]struct{} = [unsafe.Sizeof(headerPayload{}) - headerPayloadSize]struct{}{}
func copySameLength(dst, src []byte, name string) error {
if len(dst) != len(src) {
return fmt.Errorf("incorrect length of %s: expected %d got %d", name, len(dst), len(src))
}
copy(dst, src)
return nil
}
// marshalHeader encrypts the header and writes it in encrypted form to the writer.
func (c *cryptor) marshalHeader(w io.Writer, h fileHeader) (err error) {
var payload headerPayload
if err = copySameLength(payload.Reserved[:], h.Reserved, "Reserved"); err != nil {
return
}
if err = copySameLength(payload.ContentKey[:], h.ContentKey, "ContentKey"); err != nil {
return
}
var encBuffer bytes.Buffer
if err = binary.Write(&encBuffer, binary.BigEndian, &payload); err != nil {
return
}
encPayload := c.encryptChunk(encBuffer.Bytes(), h.Nonce, nil)
_, err = w.Write(encPayload)
return
}
// unmarshalHeader reads an encrypted header from the reader and decrypts it.
func (c *cryptor) unmarshalHeader(r io.Reader) (header fileHeader, err error) {
encHeader := make([]byte, c.nonceSize()+headerPayloadSize+c.tagSize())
_, err = io.ReadFull(r, encHeader)
if err != nil {
return
}
nonce := encHeader[:c.nonceSize()]
encHeader, err = c.decryptChunk(encHeader, nil)
if err != nil {
return
}
var payload headerPayload
if err = binary.Read(bytes.NewReader(encHeader), binary.BigEndian, &payload); err != nil {
return
}
header.Nonce = nonce
header.ContentKey = payload.ContentKey[:]
header.Reserved = payload.Reserved[:]
return
}

View File

@ -0,0 +1,95 @@
package cryptomator
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
func TestHeaderNew(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
cryptor := drawTestCryptor(t)
h, err := cryptor.NewHeader()
assert.NoError(t, err)
assert.Len(t, h.Nonce, cryptor.nonceSize())
assert.Len(t, h.ContentKey, headerContentKeySize)
assert.Len(t, h.Reserved, headerReservedSize)
assert.Equal(t, headerReservedValue, binary.BigEndian.Uint64(h.Reserved))
})
}
func TestHeaderRoundTrip(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
buf := &bytes.Buffer{}
cryptor := drawTestCryptor(t)
h1, err := cryptor.NewHeader()
assert.NoError(t, err)
err = cryptor.marshalHeader(buf, h1)
assert.NoError(t, err)
assert.Len(t, buf.Bytes(), headerPayloadSize+cryptor.encryptionOverhead())
h2, err := cryptor.unmarshalHeader(buf)
assert.NoError(t, err)
assert.Equal(t, h1, h2)
})
}
type encHeader struct {
CipherCombo string
Header []byte
EncKey []byte
MacKey []byte
}
func TestUnmarshalReferenceHeader(t *testing.T) {
paths, err := filepath.Glob(filepath.Join("testdata", "header*.input"))
assert.NoError(t, err)
for _, path := range paths {
filename := filepath.Base(path)
testname := strings.TrimSuffix(filename, filepath.Ext(filename))
input, err := os.ReadFile(path)
assert.NoError(t, err)
golden, err := os.ReadFile(filepath.Join("testdata", testname+".golden"))
assert.NoError(t, err)
var encHeaders map[string]encHeader
err = json.Unmarshal(input, &encHeaders)
assert.NoError(t, err)
var headers map[string]fileHeader
err = json.Unmarshal(golden, &headers)
assert.NoError(t, err)
for name, encHeader := range encHeaders {
t.Run(fmt.Sprintf("%s:%s", testname, name), func(t *testing.T) {
key := masterKey{EncryptKey: encHeader.EncKey, MacKey: encHeader.MacKey}
cryptor, err := newCryptor(key, encHeader.CipherCombo)
assert.NoError(t, err)
buf := bytes.NewBuffer(encHeader.Header)
h, err := cryptor.unmarshalHeader(buf)
assert.NoError(t, err)
assert.Equal(t, headers[name], h)
})
}
}
}

View File

@ -0,0 +1,131 @@
package cryptomator
import (
"crypto/aes"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"encoding/json"
"fmt"
"io"
aeswrap "github.com/NickBall/go-aes-key-wrap"
"golang.org/x/crypto/scrypt"
)
const (
masterEncryptKeySize = 32
masterMacKeySize = masterEncryptKeySize
masterDefaultVersion = 999
masterDefaultScryptCostParam = 32 * 1024
masterDefaultScryptBlockSize = 8
masterDefaultScryptSaltSize = 32
)
// masterKey is the master key for a Cryptomator vault, typically saved in masterkey.cryptomator at the root of the vault.
type masterKey struct {
EncryptKey []byte
MacKey []byte
}
type encryptedMasterKey struct {
ScryptSalt []byte `json:"scryptSalt"`
ScryptCostParam int `json:"scryptCostParam"`
ScryptBlockSize int `json:"scryptBlockSize"`
PrimaryMasterKey []byte `json:"primaryMasterKey"`
HmacMasterKey []byte `json:"hmacMasterKey"`
// Deprecated: Vault format 8 no longer uses this field.
// When compatibility with older vault formats is implemented, code will need to be added to verify this field will need to be verified against VersionMac.
Version uint32 `json:"version"`
// Deprecated: Vault format 8 no longer uses this field.
VersionMac []byte `json:"versionMac"`
}
// newMasterKey creates a new randomly initialized MasterKey.
func newMasterKey() (m masterKey, err error) {
m.EncryptKey = make([]byte, masterEncryptKeySize)
m.MacKey = make([]byte, masterMacKeySize)
if _, err = rand.Read(m.EncryptKey); err != nil {
return
}
_, err = rand.Read(m.MacKey)
return
}
// Marshal encrypts the MasterKey with a passphrase and writes it.
func (m masterKey) Marshal(w io.Writer, passphrase string) (err error) {
encKey := encryptedMasterKey{
Version: masterDefaultVersion,
ScryptCostParam: masterDefaultScryptCostParam,
ScryptBlockSize: masterDefaultScryptBlockSize,
}
encKey.ScryptSalt = make([]byte, masterDefaultScryptSaltSize)
if _, err = rand.Read(encKey.ScryptSalt); err != nil {
return
}
kek, err := scrypt.Key([]byte(passphrase), encKey.ScryptSalt, encKey.ScryptCostParam, encKey.ScryptBlockSize, 1, masterEncryptKeySize)
if err != nil {
return
}
cipher, err := aes.NewCipher(kek)
if err != nil {
return
}
if encKey.PrimaryMasterKey, err = aeswrap.Wrap(cipher, m.EncryptKey); err != nil {
return
}
if encKey.HmacMasterKey, err = aeswrap.Wrap(cipher, m.MacKey); err != nil {
return
}
hash := hmac.New(sha256.New, m.MacKey)
if err = binary.Write(hash, binary.BigEndian, encKey.Version); err != nil {
return
}
encKey.VersionMac = hash.Sum(nil)
err = json.NewEncoder(w).Encode(encKey)
return
}
// unmarshalMasterKey reads the master key and decrypts it with a passphrase.
func unmarshalMasterKey(r io.Reader, passphrase string) (m masterKey, err error) {
encKey := &encryptedMasterKey{}
if err = json.NewDecoder(r).Decode(encKey); err != nil {
err = fmt.Errorf("failed to parse master key json: %w", err)
return
}
kek, err := scrypt.Key([]byte(passphrase), encKey.ScryptSalt, encKey.ScryptCostParam, encKey.ScryptBlockSize, 1, masterEncryptKeySize)
if err != nil {
return
}
cipher, err := aes.NewCipher(kek)
if err != nil {
return
}
if m.EncryptKey, err = aeswrap.Unwrap(cipher, encKey.PrimaryMasterKey); err != nil {
err = fmt.Errorf("failed to unwrap primary key: %w", err)
return
}
if m.MacKey, err = aeswrap.Unwrap(cipher, encKey.HmacMasterKey); err != nil {
err = fmt.Errorf("failed to unwrap hmac key: %w", err)
return
}
return
}

View File

@ -0,0 +1,87 @@
package cryptomator
import (
"bytes"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
func TestNewMasterKey(t *testing.T) {
k, err := newMasterKey()
assert.NoError(t, err, "got an error while creating the master key")
assert.Len(t, k.EncryptKey, masterEncryptKeySize, "invalid encryption key size")
assert.Len(t, k.MacKey, masterMacKeySize, "invalid mac key size")
}
func TestMasterKeyRoundTrip(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
passphrase := rapid.String().Draw(t, "passphrase")
k1, err := newMasterKey()
assert.NoError(t, err, "got an error while creating the master key")
buf := &bytes.Buffer{}
err = k1.Marshal(buf, passphrase)
assert.NoError(t, err, "got an error while marshalling")
assert.NotEmpty(t, buf.Bytes(), "buffer is empty after marshalling")
k2, err := unmarshalMasterKey(buf, passphrase)
assert.NoError(t, err, "got an error while unmarshalling")
assert.Empty(t, buf.Bytes(), "buffer is not empty after unmarshalling")
assert.Equal(t, k1, k2)
})
}
type encKey struct {
EncryptedMasterKey []byte
Passphrase string
}
func TestMasterKeyUnmarshalReference(t *testing.T) {
paths, err := filepath.Glob(filepath.Join("testdata", "masterkey*.input"))
assert.NoError(t, err)
for _, path := range paths {
filename := filepath.Base(path)
testname := strings.TrimSuffix(filename, filepath.Ext(filename))
input, err := os.ReadFile(path)
assert.NoError(t, err)
golden, err := os.ReadFile(filepath.Join("testdata", testname+".golden"))
assert.NoError(t, err)
var encKeys map[string]encKey
err = json.Unmarshal(input, &encKeys)
assert.NoError(t, err)
var keys map[string]masterKey
err = json.Unmarshal(golden, &keys)
assert.NoError(t, err)
for name, encKey := range encKeys {
t.Run(fmt.Sprintf("%s:%s", testname, name), func(t *testing.T) {
buf := bytes.NewBuffer(encKey.EncryptedMasterKey)
h, err := unmarshalMasterKey(buf, encKey.Passphrase)
assert.NoError(t, err)
assert.Empty(t, buf.Bytes())
assert.Equal(t, keys[name], h)
})
}
}
}

View File

@ -0,0 +1,260 @@
package cryptomator
import (
"crypto/rand"
"errors"
"fmt"
"io"
)
const (
// ChunkPayloadSize is the size of the encrypted payload of each file chunk.
ChunkPayloadSize = 32 * 1024
)
// encryptedFileSize returns the size of the file after encrypting it
func (c *cryptor) encryptedFileSize(size int64) int64 {
overhead := int64(c.encryptionOverhead())
fullChunksSize := (size / ChunkPayloadSize) * (ChunkPayloadSize + overhead)
rest := size % ChunkPayloadSize
if rest > 0 {
rest += overhead
}
return headerPayloadSize + overhead + fullChunksSize + rest
}
// decryptedFileSize returns the size of the file after decrypting it
func (c *cryptor) decryptedFileSize(size int64) int64 {
overhead := int64(c.encryptionOverhead())
size = size - headerPayloadSize - overhead
fullChunksSize := (size / (ChunkPayloadSize + overhead)) * ChunkPayloadSize
rest := size % (ChunkPayloadSize + overhead)
if rest > 0 {
rest -= overhead
}
return fullChunksSize + rest
}
const (
lastChunk = true
notLastChunk = false
)
// reader decrypts a Cryptomator file as it is read from.
type reader struct {
cryptor contentCryptor
header fileHeader
src io.Reader
unread []byte
buf []byte
chunkNr uint64
err error
}
// newContentReader creates a new Reader for the file content using the previously file header.
func (c *cryptor) newContentReader(src io.Reader, header fileHeader) (*reader, error) {
cryptor, err := c.newContentCryptor(header.ContentKey)
if err != nil {
return nil, err
}
return &reader{
cryptor: cryptor,
header: header,
src: src,
buf: make([]byte, ChunkPayloadSize+c.encryptionOverhead()),
}, nil
}
// newReader reads the file header and returns a Reader for the content.
func (c *cryptor) newReader(src io.Reader) (r *reader, err error) {
header, err := c.unmarshalHeader(src)
if err != nil {
return
}
return c.newContentReader(src, header)
}
func (r *reader) Read(p []byte) (int, error) {
if len(r.unread) > 0 {
n := copy(p, r.unread)
r.unread = r.unread[n:]
return n, nil
}
if r.err != nil {
return 0, r.err
}
if len(p) == 0 {
return 0, nil
}
last, err := r.readChunk()
if err != nil {
r.err = err
return 0, err
}
n := copy(p, r.unread)
r.unread = r.unread[n:]
if last {
if _, err := r.src.Read(make([]byte, 1)); err == nil {
r.err = errors.New("trailing data after end of encrypted file")
} else if err != io.EOF {
r.err = fmt.Errorf("non-EOF error reading after end of encrypted file: %w", err)
} else {
r.err = io.EOF
}
}
return n, nil
}
func (r *reader) readChunk() (last bool, err error) {
if len(r.unread) != 0 {
panic("stream: internal error: readChunk called with dirty buffer")
}
in := r.buf
n, err := io.ReadFull(r.src, in)
switch {
case err == io.EOF:
// TODO
// return false, io.ErrUnexpectedEOF
return true, nil
case err == io.ErrUnexpectedEOF:
last = true
in = in[:n]
case err != nil:
return false, err
}
ad := r.cryptor.fileAssociatedData(r.header.Nonce, r.chunkNr)
payload, err := r.cryptor.decryptChunk(in, ad)
if err != nil {
return
}
r.chunkNr++
r.unread = r.buf[:copy(r.buf, payload)]
return last, nil
}
// writer encrypts a Cryptomator file as it is written to.
type writer struct {
cryptor contentCryptor
header fileHeader
dst io.Writer
unwritten []byte
buf []byte
err error
chunkNr uint64
}
// newContentWriter creates a new Writer for the file content using the already written file header.
func (c *cryptor) newContentWriter(dst io.Writer, header fileHeader) (*writer, error) {
cryptor, err := c.newContentCryptor(header.ContentKey)
if err != nil {
return nil, err
}
w := &writer{
cryptor: cryptor,
header: header,
dst: dst,
buf: make([]byte, ChunkPayloadSize+c.encryptionOverhead()),
}
w.unwritten = w.buf[:0]
return w, nil
}
// newWriter creates and writes a random file header and returns a writer for the file content.
func (c *cryptor) newWriter(dst io.Writer) (w *writer, err error) {
header, err := c.NewHeader()
if err != nil {
return
}
err = c.marshalHeader(dst, header)
if err != nil {
return
}
return c.newContentWriter(dst, header)
}
func (w *writer) Write(p []byte) (n int, err error) {
if w.err != nil {
return 0, w.err
}
if len(p) == 0 {
return 0, nil
}
total := len(p)
for len(p) > 0 {
freeBuf := w.buf[len(w.unwritten):ChunkPayloadSize]
n := copy(freeBuf, p)
p = p[n:]
w.unwritten = w.unwritten[:len(w.unwritten)+n]
if len(w.unwritten) == ChunkPayloadSize && len(p) > 0 {
if err := w.flushChunk(notLastChunk); err != nil {
w.err = err
return 0, err
}
}
}
return total, nil
}
// Close flushes the last chunk. It doesn't close the underlying Writer.
func (w *writer) Close() error {
if w.err != nil {
return w.err
}
w.err = w.flushChunk(lastChunk)
if w.err != nil {
return w.err
}
w.err = errors.New("stream.Writer is already closed")
return nil
}
func (w *writer) flushChunk(last bool) error {
if !last && len(w.unwritten) != ChunkPayloadSize {
panic("stream: internal error: flush called with partial chunk")
}
if len(w.unwritten) == 0 {
return nil
}
nonce := make([]byte, w.cryptor.nonceSize())
_, err := rand.Read(nonce)
if err != nil {
return fmt.Errorf("stream: generating nonce failed: %w", err)
}
ad := w.cryptor.fileAssociatedData(w.header.Nonce, w.chunkNr)
out := w.cryptor.encryptChunk(w.unwritten, nonce, ad)
_, err = w.dst.Write(out)
w.unwritten = w.buf[:0]
w.chunkNr++
return err
}

View File

@ -0,0 +1,197 @@
package cryptomator
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
type encryptedFile struct {
CipherCombo string
ContentKey []byte
Nonce []byte
MacKey []byte
Ciphertext []byte
}
func TestDecryptReferenceStream(t *testing.T) {
paths, err := filepath.Glob(filepath.Join("testdata", "stream*.input"))
assert.NoError(t, err)
for _, path := range paths {
filename := filepath.Base(path)
testname := strings.TrimSuffix(filename, filepath.Ext(filename))
input, err := os.ReadFile(path)
assert.NoError(t, err)
golden, err := os.ReadFile(filepath.Join("testdata", testname+".golden"))
assert.NoError(t, err)
var encFiles map[string]encryptedFile
err = json.Unmarshal(input, &encFiles)
assert.NoError(t, err)
var plainTexts map[string][]byte
err = json.Unmarshal(golden, &plainTexts)
assert.NoError(t, err)
for name, encFile := range encFiles {
t.Run(fmt.Sprintf("%s:%s", testname, name), func(t *testing.T) {
buf := bytes.NewBuffer(encFile.Ciphertext)
key := masterKey{EncryptKey: make([]byte, masterEncryptKeySize), MacKey: encFile.MacKey}
cryptor, err := newCryptor(key, encFile.CipherCombo)
assert.NoError(t, err)
header := fileHeader{ContentKey: encFile.ContentKey, Nonce: encFile.Nonce}
r, err := cryptor.newContentReader(buf, header)
assert.NoError(t, err)
output, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, plainTexts[name], output)
})
}
}
}
func TestStreamRoundTrip(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
stepSize := rapid.SampledFrom([]int{512, 600, 1000, ChunkPayloadSize}).Draw(t, "stepSize")
// Maxlength due to memory problems when using math.MaxInt
// maxLength := 1000000
maxLength := 10000
length := rapid.IntRange(0, maxLength).Draw(t, "length")
src := fixedSizeByteArray(length).Draw(t, "src")
cryptor := drawTestCryptor(t)
nonce := fixedSizeByteArray(cryptor.nonceSize()).Draw(t, "nonce")
contentKey := fixedSizeByteArray(headerContentKeySize).Draw(t, "contentKey")
header := fileHeader{ContentKey: contentKey, Nonce: nonce}
buf := &bytes.Buffer{}
w, err := cryptor.newContentWriter(buf, header)
assert.NoError(t, err)
n := 0
for n < length {
b := length - n
if b > stepSize {
b = stepSize
}
nn, err := w.Write(src[n : n+b])
assert.NoError(t, err)
assert.Equal(t, b, nn, "wrong number of bytes written")
n += nn
nn, err = w.Write(src[n:n])
assert.NoError(t, err)
assert.Zero(t, nn, "more than 0 bytes written")
}
err = w.Close()
assert.NoError(t, err, "close returned an error")
t.Logf("buffer size: %d", buf.Len())
r, err := cryptor.newContentReader(buf, header)
assert.NoError(t, err)
n = 0
readBuf := make([]byte, stepSize)
for n < length {
nn, err := r.Read(readBuf)
assert.NoErrorf(t, err, "read error at index %d", n)
assert.Equalf(t, readBuf[:nn], src[n:n+nn], "wrong data at indexes %d - %d", n, n+nn)
if nn == 0 {
t.Fatal() // Avoid infinite loop
}
n += nn
}
})
}
func TestHeaderWriter(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
maxLength := 10000
length := rapid.IntRange(0, maxLength).Draw(t, "length")
data := fixedSizeByteArray(length).Draw(t, "src")
cryptor := drawTestCryptor(t)
buf := &bytes.Buffer{}
w, err := cryptor.newWriter(buf)
assert.NoError(t, err)
_, err = w.Write(data)
assert.NoError(t, err)
err = w.Close()
assert.NoError(t, err)
header, err := cryptor.unmarshalHeader(buf)
assert.NoError(t, err)
r, err := cryptor.newContentReader(buf, header)
assert.NoError(t, err)
readBuf := make([]byte, length)
_, err = io.ReadFull(r, readBuf)
assert.NoError(t, err)
assert.Equal(t, data, readBuf)
})
}
func TestHeaderReader(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
maxLength := 10000
length := rapid.IntRange(0, maxLength).Draw(t, "length")
data := fixedSizeByteArray(length).Draw(t, "src")
cryptor := drawTestCryptor(t)
buf := &bytes.Buffer{}
header, err := cryptor.NewHeader()
assert.NoError(t, err)
err = cryptor.marshalHeader(buf, header)
assert.NoError(t, err)
w, err := cryptor.newContentWriter(buf, header)
assert.NoError(t, err)
_, err = w.Write(data)
assert.NoError(t, err)
err = w.Close()
assert.NoError(t, err)
r, err := cryptor.newReader(buf)
assert.NoError(t, err)
readBuf := make([]byte, length)
_, err = io.ReadFull(r, readBuf)
assert.NoError(t, err)
assert.Equal(t, data, readBuf)
})
}
func TestEncryptedSize(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
key := drawMasterKey(t)
cryptor, err := newCryptor(key, cipherComboSivGcm)
assert.NoError(t, err)
assert.EqualValues(t, 196, cryptor.encryptedFileSize(100))
assert.EqualValues(t, 100, cryptor.decryptedFileSize(196))
})
}

View File

@ -0,0 +1,12 @@
{
"siv_ctrmac": {
"Nonce": "mDCOS8MT0etiyDKaXv8X0Q==",
"Reserved": "//////////8=",
"ContentKey": "RgbuFMgbZP051gzgXp8WbQNJ5y50eNyyBL/6qdQNn6c="
},
"siv_gcm": {
"Nonce": "WSGqVbG3C7X981Cd",
"Reserved": "//////////8=",
"ContentKey": "dBS1BH/nsGxL5AF6Z0I+P6V5PSelEFarrIylAIqWmlc="
}
}

View File

@ -0,0 +1,14 @@
{
"siv_ctrmac": {
"CipherCombo": "SIV_CTRMAC",
"Header": "mDCOS8MT0etiyDKaXv8X0cD1ayTewk/Uir95AZO5t4bwy8sQm5yl23Y0XxyQtg43/IQJLyHLJSuVfKLEQuPmncqbbndSiMzU2iS00Mo9lM3/1XCmLJWmig==",
"EncKey": "ABygFz/iZU0HXYlE5NtYFPgEmKqyWEh4wGjHo0fJgNY=",
"MacKey": "lwdzbaZqIfibYj8LSjcUAqpOJfdku/qN13f2OZWpxDk="
},
"siv_gcm": {
"CipherCombo": "SIV_GCM",
"Header": "WSGqVbG3C7X981CdJIiOFFht5ldI6GzWWFhzPSNaKbw8DZcvjoMVB3tiNNRmFTS2K1Y8rBYx0c18Bbj7Tr/gYuseGMY=",
"EncKey": "gk3ygEvJMA/z4e4QzItHXtQmM2GEHaXN9fm9esQxTcQ=",
"MacKey": "zUq4ZFL65Fz6w9htPAggqfllEv3OUQDAbZZJc2eIyJ8="
}
}

View File

@ -0,0 +1,6 @@
{
"example": {
"EncryptKey": "xcYD4Ftr2zXebvrJ7ccCfIwOMFtJpBLBr62/9rXyCZk=",
"MacKey": "CY1D3+ZyelyFZlXZPidWNjcJqAleyUtGWBNKoA9vbOM="
}
}

View File

@ -0,0 +1,6 @@
{
"example": {
"EncryptedMasterKey": "ewogICJ2ZXJzaW9uIjogOTk5LAogICJzY3J5cHRTYWx0IjogIksydnRvNzNyUlk0PSIsCiAgInNjcnlwdENvc3RQYXJhbSI6IDMyNzY4LAogICJzY3J5cHRCbG9ja1NpemUiOiA4LAogICJwcmltYXJ5TWFzdGVyS2V5IjogIjJzWXFMSWsyVGU2UDRNbEI2M0xhZUFLakVlWXhHeG9lZDJKbXdhKytkeU9idWFHeUw4WUpiZz09IiwKICAiaG1hY01hc3RlcktleSI6ICI5NkIzVFVwYW9OYlhLMzVMSDhRdVpjRzVLbXk0TWxOU2t1ZzZIQnFsUStHN094UW5CcEh5U2c9PSIsCiAgInZlcnNpb25NYWMiOiAiSFAyY2tFdGxwZ0wyckQrS2R5UElsWk9DNmdLYzB3cmhyYUdRUG1RY3ppOD0iCn0=",
"Passphrase": "abcdefgh"
}
}

View File

@ -0,0 +1,4 @@
{
"example": "TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWduYSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3MgZXQgYWNjdXNhbSBldCBqdXN0byBkdW8gZG9sb3JlcyBldCBlYSByZWJ1bS4gU3RldCBjbGl0YSBrYXNkIGd1YmVyZ3Jlbiwgbm8gc2VhIHRha2ltYXRhIHNhbmN0dXMgZXN0IExvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0LiBMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2V0ZXR1ciBzYWRpcHNjaW5nIGVsaXRyLCBzZWQgZGlhbSBub251bXkgZWlybW9kIHRlbXBvciBpbnZpZHVudCB1dCBsYWJvcmUgZXQgZG9sb3JlIG1hZ25hIGFsaXF1eWFtIGVyYXQsIHNlZCBkaWFtIHZvbHVwdHVhLiBBdCB2ZXJvIGVvcyBldCBhY2N1c2FtIGV0IGp1c3RvIGR1byBkb2xvcmVzIGV0IGVhIHJlYnVtLiBTdGV0IGNsaXRhIGthc2QgZ3ViZXJncmVuLCBubyBzZWEgdGFraW1hdGEgc2FuY3R1cyBlc3QgTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQuCg==",
"helloWorld": "aGVsbG8gd29ybGQK"
}

View File

@ -0,0 +1,16 @@
{
"example": {
"CipherCombo": "SIV_CTRMAC",
"ContentKey": "NSIfk1Uz4Xi0w8P+ulukdoifG5mtFL3TE7zhh/EWZU0=",
"Nonce": "a/vkY30qi89n93yMT4SQxQ==",
"MacKey": "lwdzbaZqIfibYj8LSjcUAqpOJfdku/qN13f2OZWpxDk=",
"Ciphertext": "eAGXNbHreJt00KFj3eVuj/E7snF86cgZ3bCMJOYOdIPRozg3GoQ20t1Fls3KAAEwqNzKpefVO790yZEKKTaTLEy0ZzcO0c7G2i9I2jL3WRM/nE1kZO4YiwEimKoiYnzDVCOqb6DHuGcvp+Zphx/LhrnON/U8fQjijPkU2cr8mU5+3ZMhnsNutHtUygUpK50Y4RS50JJjVlCBzLpD9X2QsnRBODBZybDQ5ZMHYhx/4ogqz0Ww1VZb7n/E9rxZtbbxnMFbcH7TT8cA76mYVxUP+wf57rGH08hLf9J7Ue904w9g/OhNPSeIZbzajafysxpbxTtnsMoOJmRDtbaAiSC4ppHT5fPXzZRPTQLHqhDPtAsJQ2HAhhEhSnp7Bg5bZPA/Vsi2KvCJfGYi1RY9oqMzBYot2fyAc/nlbueGIrM3FokolcJ1QUJsts/Qq2/hMW7gv3zuncsPCssAnXMd0vIhOP8RYmeRx4pStPqvkg51kPAOkDL4xoVA2aQXjZGVXTZ38kkpGFU+okDGRCH4e96w5M2trROq7AvhlxxN/V23/dht7BL9iYcb01XWO7jNt0eh2cXNFunKnq17HsVTRXJOXrawDnFL1fzHT3aSo2T2DQt4SOQS1BMjcZvsD/Fh2JckqbLuPRBwgKiRZYNl0fidwQ9BMMOTxKLhsPv0sTe7JTiqQ5Xjax+eNX+9nwuXO0hXNcg4fqYMLZl4oYRGblufNbpv2kTQMhgzzGBSIyoALHOT0ft68dppRxAx2+tLXaaBJ1o5WCtrLu++KoOSlb106+cZohLBwMFBE1j/93A0wlY4WP4GqrnIG/nvGOs5fPKVulp++Ozl0o/CyLdr9XZboQ=="
},
"helloWorld": {
"CipherCombo": "SIV_CTRMAC",
"Nonce": "mDCOS8MT0etiyDKaXv8X0Q==",
"ContentKey": "RgbuFMgbZP051gzgXp8WbQNJ5y50eNyyBL/6qdQNn6c=",
"MacKey": "lwdzbaZqIfibYj8LSjcUAqpOJfdku/qN13f2OZWpxDk=",
"Ciphertext": "ug/ko337a8QOjGML9gPpIb6JWlUV5172MFzWWdAANNCh7xiiboSSuv1uU+MGG+bNxXET1fYvEfItU11v"
}
}

View File

@ -0,0 +1,155 @@
package cryptomator
import (
"bytes"
"context"
"errors"
"fmt"
"strings"
"github.com/golang-jwt/jwt/v4"
"github.com/google/uuid"
"github.com/rclone/rclone/fs"
)
const (
configKeyIDTag = "kid"
configFileName = "vault.cryptomator"
masterKeyFileName = "masterkey.cryptomator"
)
type keyID string
func (kid keyID) Scheme() string {
return strings.Split(string(kid), ":")[0]
}
func (kid keyID) URI() string {
return strings.Split(string(kid), ":")[1]
}
func (m masterKey) jwtKey() []byte {
return append(m.EncryptKey, m.MacKey...)
}
// vaultConfig is the configuration for the vault, saved in vault.cryptomator at the root of the vault.
type vaultConfig struct {
Format int `json:"format"`
ShorteningThreshold int `json:"shorteningThreshold"`
Jti string `json:"jti"`
CipherCombo string `json:"cipherCombo"`
}
// newVaultConfig creates a new VaultConfig with the default settings and signs it.
func newVaultConfig() vaultConfig {
return vaultConfig{
Format: 8,
ShorteningThreshold: 220,
Jti: uuid.NewString(),
CipherCombo: cipherComboSivGcm,
}
}
// Valid tests the validity of the VaultConfig during JWT parsing.
func (c *vaultConfig) Valid() error {
if c.Format != 8 {
return fmt.Errorf("unsupported vault format: %d", c.Format)
}
return nil
}
// Marshal makes a signed JWT from the VaultConfig.
func (c vaultConfig) Marshal(masterKey masterKey) ([]byte, error) {
keyID := keyID("masterkeyfile:" + masterKeyFileName)
token := jwt.NewWithClaims(jwt.SigningMethodHS256, &c)
token.Header[configKeyIDTag] = string(keyID)
rawToken, err := token.SignedString(masterKey.jwtKey())
if err != nil {
return nil, err
}
return []byte(rawToken), nil
}
// unmarshalVaultConfig parses the JWT without verifying it
func unmarshalVaultConfig(tokenBytes []byte, keyFunc func(masterKeyPath string) (*masterKey, error)) (c vaultConfig, err error) {
_, err = jwt.ParseWithClaims(string(tokenBytes), &c, func(token *jwt.Token) (any, error) {
kidObj, ok := token.Header[configKeyIDTag]
if !ok {
return nil, fmt.Errorf("no key url in vault.cryptomator jwt")
}
kid, ok := kidObj.(string)
if !ok {
return nil, fmt.Errorf("key url in vault.cryptomator jwt is not a string")
}
keyID := keyID(kid)
masterKey, err := keyFunc(keyID.URI())
if err != nil {
return nil, err
}
return masterKey.jwtKey(), nil
}, jwt.WithValidMethods([]string{"HS256", "HS384", "HS512"}))
return
}
func (f *Fs) loadOrCreateVault(ctx context.Context, passphrase string) error {
configData, err := f.readSmallFile(ctx, configFileName, 1024)
if err != nil {
if !errors.Is(err, fs.ErrorObjectNotFound) {
return fmt.Errorf("failed to read config at %s: %w", configFileName, err)
}
// Vault does not exist, so create it
err = f.createVault(ctx, passphrase)
if err != nil {
return fmt.Errorf("failed to create new vault: %w", err)
}
configData, err = f.readSmallFile(ctx, "vault.cryptomator", 1024)
if err != nil {
return fmt.Errorf("failed to read vault config after creating new vault: %w", err)
}
}
f.vaultConfig, err = unmarshalVaultConfig(configData, func(masterKeyPath string) (*masterKey, error) {
masterKeyData, err := f.readSmallFile(ctx, masterKeyPath, 1024)
if err != nil {
return nil, fmt.Errorf("failed to read master key: %w", err)
}
f.masterKey, err = unmarshalMasterKey(bytes.NewReader(masterKeyData), passphrase)
if err != nil {
return nil, err
}
return &f.masterKey, nil
})
if err != nil {
return fmt.Errorf("failed to parse jwt: %w", err)
}
return nil
}
func (f *Fs) createVault(ctx context.Context, passphrase string) error {
masterKey, err := newMasterKey()
if err != nil {
return fmt.Errorf("failed to create master key: %w", err)
}
buf := bytes.Buffer{}
err = masterKey.Marshal(&buf, passphrase)
if err != nil {
return fmt.Errorf("failed to encrypt master key: %w", err)
}
err = f.writeSmallFile(ctx, masterKeyFileName, buf.Bytes())
if err != nil {
return fmt.Errorf("failed to save master key: %w", err)
}
vaultConfig := newVaultConfig()
configBytes, err := vaultConfig.Marshal(masterKey)
if err != nil {
return fmt.Errorf("failed to encrypt vault config: %w", err)
}
err = f.writeSmallFile(ctx, configFileName, configBytes)
if err != nil {
return fmt.Errorf("failed to save master key: %w", err)
}
return nil
}

View File

@ -0,0 +1,30 @@
package cryptomator
import (
"testing"
"github.com/stretchr/testify/assert"
"pgregory.net/rapid"
)
func fixedSizeByteArray(constant int) *rapid.Generator[[]byte] {
return rapid.SliceOfN(rapid.Byte(), constant, constant)
}
func TestVaultConfigRoundTrip(t *testing.T) {
rapid.Check(t, func(t *rapid.T) {
key := drawMasterKey(t)
c1 := newVaultConfig()
token, err := c1.Marshal(key)
assert.NoError(t, err)
c2, err := unmarshalVaultConfig(token, func(string) (*masterKey, error) {
return &key, nil
})
assert.NoError(t, err)
assert.Equal(t, c1, c2)
})
}

View File

@ -0,0 +1,239 @@
package cryptomator
import (
"context"
"errors"
"time"
"github.com/rclone/rclone/fs"
)
var (
errorNotSupportedByUnderlyingRemote = errors.New("not supported by underlying remote")
)
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs { return f.wrapped }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.wrapper.Features().DirCacheFlush
if do != nil {
do()
}
f.dirCache.Flush()
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.wrapped.Features().PublicLink
if do == nil {
return "", errorNotSupportedByUnderlyingRemote
}
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return "", err
}
encryptedPath := f.leafPath(leaf, dirID)
return do(ctx, encryptedPath, expire, unlink)
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
do := f.wrapped.Features().DirSetModTime
if do == nil {
return errorNotSupportedByUnderlyingRemote
}
dirID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
return do(ctx, f.dirIDPath(dirID), modTime)
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.wrapped.Features().CleanUp
if do == nil {
return errorNotSupportedByUnderlyingRemote
}
return do(ctx)
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.wrapped.Features().About
if do == nil {
return nil, errorNotSupportedByUnderlyingRemote
}
return do(ctx)
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.wrapped.Features().UserInfo
if do == nil {
return nil, errorNotSupportedByUnderlyingRemote
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.wrapped.Features().Disconnect
if do == nil {
return errorNotSupportedByUnderlyingRemote
}
return do(ctx)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.wrapped.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *DecryptingObject) MimeType(ctx context.Context) string {
return ""
}
// ID returns the ID of the Object if known, or "" if not
func (o *DecryptingObject) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// ParentID returns the ID of the parent directory if known or nil if not
func (o *DecryptingObject) ParentID() string {
do, ok := o.Object.(fs.ParentIDer)
if !ok {
return ""
}
return do.ParentID()
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *DecryptingObject) UnWrap() fs.Object { return o.Object }
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *DecryptingObject) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errorNotSupportedByUnderlyingRemote
}
return do.SetTier(tier)
}
// GetTier returns storage tier or class of the Object
func (o *DecryptingObject) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *DecryptingObject) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetMetadata sets metadata for an Object
//
// It should return fs.ErrorNotImplemented if it can't set metadata
func (o *DecryptingObject) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
do, ok := o.Object.(fs.SetMetadataer)
if !ok {
return fs.ErrorNotImplemented
}
return do.SetMetadata(ctx, metadata)
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (i *EncryptingObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(i.ObjectInfo)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (i *EncryptingObjectInfo) MimeType(ctx context.Context) string {
return ""
}
// ID returns the ID of the Object if known, or "" if not
func (i *EncryptingObjectInfo) ID() string {
do, ok := i.ObjectInfo.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// GetTier returns storage tier or class of the Object
func (i *EncryptingObjectInfo) GetTier() string {
do, ok := i.ObjectInfo.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (i *EncryptingObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := i.ObjectInfo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObject = (*DecryptingObject)(nil)
_ fs.FullObjectInfo = (*EncryptingObjectInfo)(nil)
_ fs.FullDirectory = (*Directory)(nil)
)

297
docs/content/cryptomator.md Normal file
View File

@ -0,0 +1,297 @@
---
title: "Cryptomator"
description: "Cryptomator-format encrypted vaults"
status: Experimental
---
# {{< icon "fa fa-user-secret" >}} Cryptomator
Rclone `cryptomator` remotes wrap other remotes containing a
[Cryptomator](https://cryptomator.org)-format vault.
For information on the Cryptomator vault format and how it encrypts files,
please read:
[Cryptomator Architecture](https://docs.cryptomator.org/en/latest/security/architecture/).
The `cryptomator` remote is **experimental**. It has received only limited
testing against the official Cryptomator clients. Use with caution.
Known issues:
* Rclone cannot yet parse old vault formats (before version 7).
* Rclone does not yet understand the filename shortening scheme used by Cryptomator.
* Rclone gets confused when a Cryptomator vault contains symlinks.
Cryptomator does not encrypt
* file length
* modification time - used for syncing
## Configuration
Here is an example of how to make a remote called `secret`.
To use `cryptomator`, first set up the underlying remote. Follow the
`rclone config` instructions for the specific backend.
Before configuring the crypt remote, check the underlying remote is
working. In this example the underlying remote is called `remote`.
We will configure a path `path` within this remote to contain the
encrypted content. Anything inside `remote:path` will be encrypted
and anything outside will not.
Configure `cryptomator` using `rclone config`. In this example the
`cryptomator` remote is called `secret`, to differentiate it from the
underlying `remote`.
When you are done you can use the crypt remote named `secret` just
as you would with any other remote, e.g. `rclone copy D:\docs secret:\docs`,
and rclone will encrypt and decrypt as needed on the fly.
If you access the wrapped remote `remote:path` directly you will bypass
the encryption, and anything you read will be in encrypted form, and
anything you write will be unencrypted. To avoid issues it is best to
configure a dedicated path for encrypted content, and access it
exclusively through a cryptomator remote.
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> secret
Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
[snip]
XX / Treat a remote as Cryptomator Vault
\ (cryptomator)
[snip]
Storage> cryptomator
** See help for crypt backend at: https://rclone.org/cryptomator/ **
Remote which contains the Cryptomator Vault
Normally should contain a ':' and a path, eg "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).
Enter a string value. Press Enter for the default ("").
remote> remote:path
Password or pass phrase for encryption.
y) Yes type in my own password
g) Generate random password
y/g> y
Enter the password:
password:
Confirm the password:
password:
Configuration complete.
Options:
- type: cryptomator
- remote: remote:path
- password: *** ENCRYPTED ***
Keep this "secret" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d>
```
**Important** The cryptomator password stored in `rclone.conf` is lightly
obscured. That only protects it from cursory inspection. It is not
secure unless [configuration encryption](https://rclone.org/docs/#configuration-encryption) of `rclone.conf` is specified.
A long passphrase is recommended, or `rclone config` can generate a
random one.
### Specifying the remote
When configuring the remote to encrypt/decrypt, you may specify any
string that rclone accepts as a source/destination of other commands.
The primary use case is to specify the path into an already configured
remote (e.g. `remote:path/to/dir` or `remote:bucket`), such that
data in a remote untrusted location can be stored encrypted.
You may also specify a local filesystem path, such as
`/path/to/dir` on Linux, `C:\path\to\dir` on Windows. By creating
a cryptomator remote pointing to such a local filesystem path, you can
use rclone as a utility for pure local file encryption, for example
to keep encrypted files on a removable USB drive.
**Note**: A string which do not contain a `:` will by rclone be treated
as a relative path in the local filesystem. For example, if you enter
the name `remote` without the trailing `:`, it will be treated as
a subdirectory of the current directory with name "remote".
If a path `remote:path/to/dir` is specified, rclone stores encrypted
files in `path/to/dir` on the remote. With file name encryption, files
saved to `secret:subdir/subfile` are stored in the unencrypted path
`path/to/dir` but the `subdir/subpath` element is encrypted.
The path you specify does not have to exist, rclone will create
it when needed.
If you intend to use the wrapped remote both directly for keeping unencrypted
content, as well as through a cryptomator remote for encrypted content, it is
recommended to point the cryptomator remote to a separate directory within the
wrapped remote. If you use a bucket-based storage system (e.g. Swift, S3,
Google Compute Storage, B2) it is necessary to wrap the cryptomator remote
around a specific bucket (`s3:bucket`). Otherwise, rclone will attempt to
create configuration files in the root of the storage (`s3:`).
### Example
Create the following file structure.
```
plaintext/
├── file0.txt
├── file1.txt
└── subdir
├── file2.txt
├── file3.txt
└── subsubdir
└── file4.txt
```
Copy these to the remote, and list them
```
$ rclone -q copy plaintext secret:
$ rclone -q ls secret:
6 file0.txt
7 file1.txt
8 subdir/file2.txt
9 subdir/file3.txt
10 subdir/subsubdir/file4.txt
```
The cryptomator vault looks like
```
$ rclone -q ls remote:path
333 masterkey.cryptomator
283 vault.cryptomator
104 d/KE/32SOK74WWKLZYJPR2KDINSPOW6KCF4/1tlc1uDSBOm1WbV83-682WMWkF_CBwzs2Q==.c9r
132 d/KE/32SOK74WWKLZYJPR2KDINSPOW6KCF4/dirid.c9r
105 d/KE/32SOK74WWKLZYJPR2KDINSPOW6KCF4/u85LJU0T8u7kour8CmukHpz9bUHc0ykRaw==.c9r
36 d/KE/32SOK74WWKLZYJPR2KDINSPOW6KCF4/YOv9E2fAfW3X4B9jY6prXszoDZosardIsA==.c9r/dir.c9r
106 d/M3/GSTDC7WEJHVDKTXFU4IYOCK4JZPL7Q/V87lwDcGAfL6kA0QJf24o_dLiRgjvRdfGQ==.c9r
132 d/M3/GSTDC7WEJHVDKTXFU4IYOCK4JZPL7Q/dirid.c9r
103 d/QK/GIR7IOTE5GB3VDRDKRZETC5RHCAXGQ/Q5L14GbiODO_U0GKprmnEe81wx6ZjDeb8g==.c9r
102 d/QK/GIR7IOTE5GB3VDRDKRZETC5RHCAXGQ/y2mmVT_4X58i3n4C06_nxzotUnxVk8vX2Q==.c9r
36 d/QK/GIR7IOTE5GB3VDRDKRZETC5RHCAXGQ/M0eewPxxsKq2ObhUJDOUZnnRCgE77g==.c9r/dir.c9r
```
The directory structure is preserved
```
$ rclone -q ls secret:subdir
8 file2.txt
9 file3.txt
10 subsubdir/file4.txt
```
### Modification times and hashes
Cryptomator stores modification times using the underlying remote so support
depends on that.
Hashes are not stored for cryptomator. However the data integrity is
protected by the cryptography itself.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/cryptomator/cryptomator.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to cryptomator (Encrypt/Decrypt Cryptomator-format vaults).
#### --cryptomator-remote
Remote to use as a Cryptomator vault.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or maybe "myremote:" (not recommended).
Properties:
- Config: remote
- Env Var: RCLONE_CRYPTOMATOR_REMOTE
- Type: string
- Required: true
#### --cryptomator-password
Password for Cryptomator vault.
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
Properties:
- Config: password
- Env Var: RCLONE_CRYPTOMATOR_PASSWORD
- Type: string
- Required: true
### Advanced options
Here are the Advanced options specific to cryptomator (Encrypt/Decrypt Cryptomator-format vaults).
#### --cryptomator-description
Description of the remote.
Properties:
- Config: description
- Env Var: RCLONE_CRYPTOMATOR_DESCRIPTION
- Type: string
- Required: false
### Metadata
Any metadata supported by the underlying remote is read and written.
See the [metadata](/docs/#metadata) docs for more info.
{{< rem autogenerated options stop >}}
## Backing up an encrypted remote
If you wish to backup an encrypted remote, it is recommended that you use
`rclone sync` on the encrypted files, and make sure the passwords are
the same in the new encrypted remote.
This will have the following advantages
* `rclone sync` will check the checksums while copying
* you can use `rclone check` between the encrypted remotes
* you don't decrypt and encrypt unnecessarily
For example, let's say you have your original remote at `remote:` with
the encrypted version at `eremote:` with path `remote:cryptomator`. You
would then set up the new remote `remote2:` and then the encrypted
version `eremote2:` with path `remote2:cryptomator` using the same
passwords as `eremote:`.
To sync the two remotes you would do
rclone sync --interactive remote:cryptomator remote2:cryptomator
## Limitations of Cryptomator encryption
Cryptomator encrypts, and will detect external modification to:
* file contents
* file names
* the parent of a directory
Cryptomator does not encrypt
* file length
* filename length
* modification time - used for syncing
* how many entries there are in a directory, and whether each one is a file
or a directory
Cryptomator cannot detect if a file or directory has been copied, moved, or
deleted by someone with access to the underlying storage. However, such an
adversary would have to guess which file to tamper with from the above
unencrypted attributes.

View File

@ -1479,8 +1479,8 @@ func Run(t *testing.T, opt *Opt) {
// check remotes
// remote should not exist here
_, err = f.List(ctx, "")
assert.True(t, errors.Is(err, fs.ErrorDirNotFound))
listing, err := f.List(ctx, "")
assert.ErrorIs(t, err, fs.ErrorDirNotFound, "got listing: %+v", listing)
//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
file1Copy := file1
file1Copy.Path = path.Join(newName, file1.Path)

View File

@ -36,6 +36,9 @@ backends:
- backend: "crypt"
remote: "TestCryptSwift:"
fastlist: false
- backend: "cryptomator"
remote: "TestCryptomatorLocal:"
fastlist: false
## chunker
- backend: "chunker"
remote: "TestChunkerLocal:"

3
go.mod
View File

@ -11,6 +11,7 @@ require (
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
github.com/Files-com/files-sdk-go/v3 v3.2.107
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
github.com/NickBall/go-aes-key-wrap v0.0.0-20170929221519-1c3aa3e4dfc5
github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
github.com/abbot/go-http-auth v0.4.0
@ -50,6 +51,7 @@ require (
github.com/mattn/go-colorable v0.1.14
github.com/mattn/go-runewidth v0.0.16
github.com/minio/minio-go/v7 v7.0.83
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75
github.com/mitchellh/go-homedir v1.1.0
github.com/moby/sys/mountinfo v0.7.2
github.com/ncw/swift/v2 v2.0.3
@ -90,6 +92,7 @@ require (
google.golang.org/api v0.216.0
gopkg.in/validator.v2 v2.0.1
gopkg.in/yaml.v3 v3.0.1
pgregory.net/rapid v1.1.0
storj.io/uplink v1.13.1
)

6
go.sum
View File

@ -72,6 +72,8 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIf
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/NickBall/go-aes-key-wrap v0.0.0-20170929221519-1c3aa3e4dfc5 h1:5BIUS5hwyLM298mOf8e8TEgD3cCYqc86uaJdQCYZo/o=
github.com/NickBall/go-aes-key-wrap v0.0.0-20170929221519-1c3aa3e4dfc5/go.mod h1:w5D10RxC0NmPYxmQ438CC1S07zaC1zpvuNW7s5sUk2Q=
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
@ -468,6 +470,8 @@ github.com/minio/minio-go/v7 v7.0.83/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU=
github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4=
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 h1:cUVxyR+UfmdEAZGJ8IiKld1O0dbGotEnkMolG5hfMSY=
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75/go.mod h1:pBbZyGwC5i16IBkjVKoy/sznA8jPD/K9iedwe1ESE6w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@ -1063,6 +1067,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs=
moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE=
pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -361,9 +361,11 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e
} else if dc.rootID == dc.trueRootID {
return "", errors.New("is root directory")
}
if dc.rootParentID == "" {
return "", errors.New("internal error: didn't find rootParentID")
}
// By this point, either dc.foundRoot was set to true by _findRoot which also set dc.rootParentID,
// or dc.foundRoot is false, and the if statement above juset set rootParentID.
// There used to be an explicit check here that dc.rootParentID != "",
// but there is a backend (cryptomator) whose root ID is actually "",
// conflicting with the zero value.
return dc.rootParentID, nil
}