mirror of
https://github.com/rclone/rclone.git
synced 2025-04-19 18:31:10 +08:00
Merge branch 'master' into cryptomator
This commit is contained in:
commit
26d89becbf
@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@ -0,0 +1,294 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build_docker_volume_plugin:
|
||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
||||
name: Build docker plugin job
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
@ -1,89 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
46
Dockerfile
46
Dockerfile
@ -1,21 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
@ -131,8 +131,8 @@ Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* Do the steps as above
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
|
@ -4,8 +4,10 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
@ -42,11 +44,13 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -310,6 +314,47 @@ Note that chunks are stored in memory and there may be up to
|
||||
in memory.`,
|
||||
Default: 16,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of chunk_size using the put block list API.
|
||||
|
||||
Files smaller than this limit will be copied with the Copy Blob API.`,
|
||||
Default: 8 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_concurrency",
|
||||
Help: `Concurrency for multipart copy.
|
||||
|
||||
This is the number of chunks of the same file that are copied
|
||||
concurrently.
|
||||
|
||||
These chunks are not buffered in memory and Microsoft recommends
|
||||
setting this value to greater than 1000 in the azcopy documentation.
|
||||
|
||||
https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-optimize#increase-concurrency
|
||||
|
||||
In tests, copy speed increases almost linearly with copy
|
||||
concurrency.`,
|
||||
Default: 512,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_copy_blob",
|
||||
Help: `Whether to use the Copy Blob API when copying to the same storage account.
|
||||
|
||||
If true (the default) then rclone will use the Copy Blob API for
|
||||
copies to the same storage account even when the size is above the
|
||||
copy_cutoff.
|
||||
|
||||
Rclone assumes that the same storage account means the same config
|
||||
and does not check for the same storage account in different configs.
|
||||
|
||||
There should be no need to change this value.
|
||||
`,
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
@ -476,6 +521,9 @@ type Options struct {
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyConcurrency int `config:"copy_concurrency"`
|
||||
UseCopyBlob bool `config:"use_copy_blob"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
@ -500,6 +548,9 @@ type Fs struct {
|
||||
cntSVCcacheMu sync.Mutex // mutex to protect cntSVCcache
|
||||
cntSVCcache map[string]*container.Client // reference to containerClient per container
|
||||
svc *service.Client // client to access azblob
|
||||
cred azcore.TokenCredential // how to generate tokens (may be nil)
|
||||
sharedKeyCred *service.SharedKeyCredential // shared key credentials (may be nil)
|
||||
anonymous bool // if this is anonymous access
|
||||
rootContainer string // container part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
isLimited bool // if limited to one container
|
||||
@ -639,6 +690,14 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type servicePrincipalCredentials struct {
|
||||
AppID string `json:"appId"`
|
||||
Password string `json:"password"`
|
||||
@ -724,12 +783,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.publicAccess = container.PublicAccessType(opt.PublicAccess)
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: true,
|
||||
DoubleSlash: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
@ -744,12 +805,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred, f.svc or anonymous
|
||||
var (
|
||||
cred azcore.TokenCredential
|
||||
sharedKeyCred *service.SharedKeyCredential
|
||||
anonymous = false
|
||||
)
|
||||
// Here we auth by setting one of f.cred, f.sharedKeyCred, f.svc or f.anonymous
|
||||
switch {
|
||||
case opt.EnvAuth:
|
||||
// Read account from environment if needed
|
||||
@ -761,7 +817,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
f.cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create azure environment credential failed: %w", err)
|
||||
}
|
||||
@ -775,12 +831,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = emulatorBlobEndpoint
|
||||
}
|
||||
sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
f.sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential for emulator failed: %w", err)
|
||||
}
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
f.sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
@ -815,7 +871,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
options := azidentity.ClientSecretCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
cred, err = azidentity.NewClientSecretCredential(opt.Tenant, opt.ClientID, opt.ClientSecret, &options)
|
||||
f.cred, err = azidentity.NewClientSecretCredential(opt.Tenant, opt.ClientID, opt.ClientSecret, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating a client secret credential: %w", err)
|
||||
}
|
||||
@ -849,7 +905,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ClientOptions: policyClientOptions,
|
||||
SendCertificateChain: opt.ClientSendCertificateChain,
|
||||
}
|
||||
cred, err = azidentity.NewClientCertificateCredential(
|
||||
f.cred, err = azidentity.NewClientCertificateCredential(
|
||||
opt.Tenant, opt.ClientID, certs, key, &options,
|
||||
)
|
||||
if err != nil {
|
||||
@ -864,7 +920,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("user password decode failed - did you obscure it?: %w", err)
|
||||
}
|
||||
cred, err = azidentity.NewUsernamePasswordCredential(
|
||||
f.cred, err = azidentity.NewUsernamePasswordCredential(
|
||||
opt.Tenant, opt.ClientID, opt.Username, password, &options,
|
||||
)
|
||||
if err != nil {
|
||||
@ -883,7 +939,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
options := azidentity.ClientSecretCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options)
|
||||
f.cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating a client secret credential: %w", err)
|
||||
}
|
||||
@ -905,19 +961,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case opt.MSIResourceID != "":
|
||||
options.ID = azidentity.ResourceID(opt.MSIResourceID)
|
||||
}
|
||||
cred, err = azidentity.NewManagedIdentityCredential(&options)
|
||||
f.cred, err = azidentity.NewManagedIdentityCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
f.cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
f.anonymous = true
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
@ -935,19 +991,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
opt.Endpoint = u.String()
|
||||
}
|
||||
if sharedKeyCred != nil {
|
||||
if f.sharedKeyCred != nil {
|
||||
// Shared key cred
|
||||
f.svc, err = service.NewClientWithSharedKeyCredential(opt.Endpoint, sharedKeyCred, &clientOpt)
|
||||
f.svc, err = service.NewClientWithSharedKeyCredential(opt.Endpoint, f.sharedKeyCred, &clientOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client with shared key failed: %w", err)
|
||||
}
|
||||
} else if cred != nil {
|
||||
} else if f.cred != nil {
|
||||
// Azidentity cred
|
||||
f.svc, err = service.NewClient(opt.Endpoint, cred, &clientOpt)
|
||||
f.svc, err = service.NewClient(opt.Endpoint, f.cred, &clientOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client failed: %w", err)
|
||||
}
|
||||
} else if anonymous {
|
||||
} else if f.anonymous {
|
||||
// Anonymous public access
|
||||
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
||||
if err != nil {
|
||||
@ -1080,7 +1136,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
if directory != "" && (prefix == "" && !bucket.IsAllSlashes(directory) || prefix != "" && !strings.HasSuffix(directory, "/")) {
|
||||
directory += "/"
|
||||
}
|
||||
delimiter := ""
|
||||
@ -1156,18 +1212,22 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
}
|
||||
// Send the subdirectories
|
||||
foundItems += len(response.Segment.BlobPrefixes)
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
if remote.Name == nil {
|
||||
for _, blobPrefix := range response.Segment.BlobPrefixes {
|
||||
if blobPrefix.Name == nil {
|
||||
fs.Debugf(f, "Nil prefix received")
|
||||
continue
|
||||
}
|
||||
remote := strings.TrimRight(*remote.Name, "/")
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
remote := f.opt.Enc.ToStandardPath(*blobPrefix.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
// Trim one slash off the remote name
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
if remote == "" || bucket.IsAllSlashes(remote) {
|
||||
remote += "/"
|
||||
}
|
||||
if addContainer {
|
||||
remote = path.Join(containerName, remote)
|
||||
}
|
||||
@ -1501,7 +1561,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
time.Sleep(12 * time.Second) // default 10 retries will be 120 seconds
|
||||
f.cache.MarkDeleted(container)
|
||||
return true, err
|
||||
case bloberror.AuthorizationFailure:
|
||||
@ -1609,6 +1669,220 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.deleteContainer(ctx, container)
|
||||
}
|
||||
|
||||
// getAuth gets auth to copy o.
|
||||
//
|
||||
// tokenOK is used to signal that token based auth (Microsoft Entra
|
||||
// ID) is acceptable.
|
||||
//
|
||||
// This will return srcURL to read the object, which may be a SAS URL.
|
||||
//
|
||||
// If noAuth is set then the srcURL returned will be a plain object
|
||||
// URL (not a SAS) and token will be empty.
|
||||
//
|
||||
// If tokenOK is true it may also return a token for the auth.
|
||||
func (o *Object) getAuth(ctx context.Context, tokenOK bool, noAuth bool) (srcURL string, token *string, err error) {
|
||||
f := o.fs
|
||||
srcBlobSVC := o.getBlobSVC()
|
||||
srcURL = srcBlobSVC.URL()
|
||||
|
||||
switch {
|
||||
case noAuth:
|
||||
// If same storage account then no auth needed
|
||||
case f.cred != nil:
|
||||
if !tokenOK {
|
||||
return srcURL, token, errors.New("not supported: Microsoft Entra ID")
|
||||
}
|
||||
options := policy.TokenRequestOptions{}
|
||||
accessToken, err := f.cred.GetToken(ctx, options)
|
||||
if err != nil {
|
||||
return srcURL, token, fmt.Errorf("failed to create access token: %w", err)
|
||||
}
|
||||
token = &accessToken.Token
|
||||
case f.sharedKeyCred != nil:
|
||||
// Generate a short lived SAS URL if using shared key credentials
|
||||
expiry := time.Now().Add(time.Hour)
|
||||
sasOptions := blob.GetSASURLOptions{}
|
||||
srcURL, err = srcBlobSVC.GetSASURL(sas.BlobPermissions{Read: true}, expiry, &sasOptions)
|
||||
if err != nil {
|
||||
return srcURL, token, fmt.Errorf("failed to create SAS URL: %w", err)
|
||||
}
|
||||
case f.anonymous || f.opt.SASURL != "":
|
||||
// If using a SASURL or anonymous, no need for any extra auth
|
||||
default:
|
||||
return srcURL, token, errors.New("unknown authentication type")
|
||||
}
|
||||
return srcURL, token, nil
|
||||
}
|
||||
|
||||
// Do multipart parallel copy.
|
||||
//
|
||||
// This uses these APIs:
|
||||
//
|
||||
// - PutBlockFromURL - https://learn.microsoft.com/en-us/rest/api/storageservices/put-block-from-url
|
||||
// - PutBlockList - https://learn.microsoft.com/en-us/rest/api/storageservices/put-block-list
|
||||
func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath string, src *Object) (dst fs.Object, err error) {
|
||||
srcProperties, err := src.readMetaDataAlways(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("multipart copy: failed to read source object: %w", err)
|
||||
}
|
||||
|
||||
// Create the dst object by altering a copy of the src object
|
||||
obj := *src
|
||||
o := &obj
|
||||
o.fs = f
|
||||
o.remote = remote
|
||||
|
||||
srcURL, token, err := src.getAuth(ctx, true, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("multipart copy: %w", err)
|
||||
}
|
||||
|
||||
bic, err := newBlockIDCreator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstBlockBlobSVC := f.getBlockBlobSVC(dstContainer, dstPath)
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
// Try to abort the upload, but ignore the error.
|
||||
fs.Debugf(o, "Cancelling multipart copy")
|
||||
_ = o.clearUncommittedBlocks(ctx)
|
||||
})()
|
||||
|
||||
var (
|
||||
srcSize = src.size
|
||||
partSize = int64(chunksize.Calculator(o, src.size, blockblob.MaxBlocks, f.opt.ChunkSize))
|
||||
numParts = (srcSize-1)/partSize + 1
|
||||
blockIDs = make([]string, numParts) // list of blocks for finalize
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
checker = newCheckForInvalidBlockOrBlob("copy", o)
|
||||
)
|
||||
g.SetLimit(f.opt.CopyConcurrency)
|
||||
|
||||
fs.Debugf(o, "Starting multipart copy with %d parts of size %v", numParts, fs.SizeSuffix(partSize))
|
||||
for partNum := uint64(0); partNum < uint64(numParts); partNum++ {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
partNum := partNum // for closure
|
||||
g.Go(func() error {
|
||||
blockID := bic.newBlockID(partNum)
|
||||
options := blockblob.StageBlockFromURLOptions{
|
||||
Range: blob.HTTPRange{
|
||||
Offset: int64(partNum) * partSize,
|
||||
Count: partSize,
|
||||
},
|
||||
// Specifies the authorization scheme and signature for the copy source.
|
||||
CopySourceAuthorization: token,
|
||||
// CPKInfo *blob.CPKInfo
|
||||
// CPKScopeInfo *blob.CPKScopeInfo
|
||||
}
|
||||
// Partial last block
|
||||
if remaining := srcSize - options.Range.Offset; remaining < options.Range.Count {
|
||||
options.Range.Count = remaining
|
||||
}
|
||||
fs.Debugf(o, "multipart copy: starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(options.Range.Count), fs.SizeSuffix(options.Range.Offset), fs.SizeSuffix(srcSize))
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
checker.start()
|
||||
_, err := dstBlockBlobSVC.StageBlockFromURL(ctx, blockID, srcURL, &options)
|
||||
checker.stop()
|
||||
if err != nil {
|
||||
if checker.checkErr(ctx, err) {
|
||||
return true, err
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart copy: failed to copy chunk %d with %v bytes: %w", partNum+1, -1, err)
|
||||
}
|
||||
blockIDs[partNum] = blockID
|
||||
return nil
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert metadata from source object
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: srcProperties.Metadata,
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
HTTPHeaders: &blob.HTTPHeaders{
|
||||
BlobCacheControl: srcProperties.CacheControl,
|
||||
BlobContentDisposition: srcProperties.ContentDisposition,
|
||||
BlobContentEncoding: srcProperties.ContentEncoding,
|
||||
BlobContentLanguage: srcProperties.ContentLanguage,
|
||||
BlobContentMD5: srcProperties.ContentMD5,
|
||||
BlobContentType: srcProperties.ContentType,
|
||||
},
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err := dstBlockBlobSVC.CommitBlockList(ctx, blockIDs, &options)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to complete multipart copy: %w", err)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "multipart copy finished")
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Do single part copy.
|
||||
//
|
||||
// This uses these APIs:
|
||||
//
|
||||
// - Copy Blob - https://docs.microsoft.com/rest/api/storageservices/copy-blob
|
||||
// - Get Blob Properties - https://docs.microsoft.com/rest/api/storageservices/get-blob-properties
|
||||
func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath string, src *Object) (dst fs.Object, err error) {
|
||||
dstBlobSVC := f.getBlobSVC(dstContainer, dstPath)
|
||||
|
||||
// Get the source auth - none needed for same storage account
|
||||
srcURL, _, err := src.getAuth(ctx, false, f == src.fs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("single part copy: source auth: %w", err)
|
||||
}
|
||||
|
||||
// Start the copy
|
||||
options := blob.StartCopyFromURLOptions{
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
}
|
||||
var startCopy blob.StartCopyFromURLResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("single part copy: copy blob: %w", err)
|
||||
}
|
||||
|
||||
// Poll for completion if necessary
|
||||
//
|
||||
// The for loop is never executed for same storage account copies.
|
||||
copyStatus := startCopy.CopyStatus
|
||||
getOptions := blob.GetPropertiesOptions{}
|
||||
pollTime := 100 * time.Millisecond
|
||||
for copyStatus != nil && string(*copyStatus) == string(container.CopyStatusTypePending) {
|
||||
time.Sleep(pollTime)
|
||||
getMetadata, err := dstBlobSVC.GetProperties(ctx, &getOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = getMetadata.CopyStatus
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
@ -1629,34 +1903,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
dstBlobSVC := f.getBlobSVC(dstContainer, dstPath)
|
||||
srcBlobSVC := srcObj.getBlobSVC()
|
||||
srcURL := srcBlobSVC.URL()
|
||||
|
||||
options := blob.StartCopyFromURLOptions{
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
}
|
||||
var startCopy blob.StartCopyFromURLResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Assume we are copying to a different storage account if we
|
||||
// are copying across configs.
|
||||
sameStorageAccount := f == srcObj.fs
|
||||
|
||||
// If we are using Microsoft Entra ID token based auth then
|
||||
// copySinglepart does not work
|
||||
usingEntraID := f.cred != nil
|
||||
|
||||
// Use multipart copy if size > cutoff
|
||||
// or using Entra ID and we are not using the same storage account
|
||||
useMultiPart := srcObj.size >= int64(f.opt.CopyCutoff) || (usingEntraID && !sameStorageAccount)
|
||||
|
||||
// Force the use of copy blob if on the same storage account
|
||||
// and the user hasn't forbidden it.
|
||||
if f.opt.UseCopyBlob && sameStorageAccount {
|
||||
useMultiPart = false
|
||||
}
|
||||
|
||||
copyStatus := startCopy.CopyStatus
|
||||
getOptions := blob.GetPropertiesOptions{}
|
||||
for copyStatus != nil && string(*copyStatus) == string(container.CopyStatusTypePending) {
|
||||
time.Sleep(1 * time.Second)
|
||||
getMetadata, err := dstBlobSVC.GetProperties(ctx, &getOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = getMetadata.CopyStatus
|
||||
if useMultiPart {
|
||||
return f.copyMultipart(ctx, remote, dstContainer, dstPath, srcObj)
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
return f.copySinglepart(ctx, remote, dstContainer, dstPath, srcObj)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -1888,33 +2157,66 @@ func (o *Object) getTags() (tags map[string]string) {
|
||||
// getBlobSVC creates a blob client
|
||||
func (o *Object) getBlobSVC() *blob.Client {
|
||||
container, directory := o.split()
|
||||
// If we are trying to remove an all / directory marker then
|
||||
// this will have one / too many now.
|
||||
if bucket.IsAllSlashes(o.remote) {
|
||||
directory = strings.TrimSuffix(directory, "/")
|
||||
}
|
||||
return o.fs.getBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// getBlockBlobSVC creates a block blob client
|
||||
func (o *Object) getBlockBlobSVC() *blockblob.Client {
|
||||
container, directory := o.split()
|
||||
return o.fs.getBlockBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// clearMetaData clears enough metadata so readMetaData will re-read it
|
||||
func (o *Object) clearMetaData() {
|
||||
o.modTime = time.Time{}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (f *Fs) readMetaData(ctx context.Context, container, containerPath string) (blobProperties blob.GetPropertiesResponse, err error) {
|
||||
func (f *Fs) readMetaData(ctx context.Context, container, containerPath string) (blobProperties *blob.GetPropertiesResponse, err error) {
|
||||
if !f.containerOK(container) {
|
||||
return blobProperties, fs.ErrorObjectNotFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
blb := f.getBlobSVC(container, containerPath)
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
options := blob.GetPropertiesOptions{}
|
||||
var resp blob.GetPropertiesResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
blobProperties, err = blb.GetProperties(ctx, &options)
|
||||
resp, err = blb.GetProperties(ctx, &options)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(*azcore.ResponseError); ok && (storageErr.ErrorCode == string(bloberror.BlobNotFound) || storageErr.StatusCode == http.StatusNotFound) {
|
||||
return blobProperties, fs.ErrorObjectNotFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return blobProperties, err
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// readMetaDataAlways gets the metadata unconditionally and also the blob properties.
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaDataAlways(ctx context.Context) (blobProperties *blob.GetPropertiesResponse, err error) {
|
||||
container, containerPath := o.split()
|
||||
blobProperties, err = o.fs.readMetaData(ctx, container, containerPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blobProperties, nil
|
||||
}
|
||||
@ -1931,12 +2233,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
container, containerPath := o.split()
|
||||
blobProperties, err := o.fs.readMetaData(ctx, container, containerPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataFromPropertiesResponse(&blobProperties)
|
||||
_, err = o.readMetaDataAlways(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@ -2043,6 +2341,58 @@ func (rs *readSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// a creator for blockIDs with an incrementing part and a random part
|
||||
//
|
||||
// The random part is to make sure that blockIDs don't collide between
|
||||
// uploads. We need block IDs not to be shared between upload attempts
|
||||
// so we can remove the uncommitted blocks properly on errors.
|
||||
type blockIDCreator struct {
|
||||
random [8]byte // randomness to make sure blocks don't collide
|
||||
}
|
||||
|
||||
// create a new blockID creator with a random suffix
|
||||
func newBlockIDCreator() (bic *blockIDCreator, err error) {
|
||||
bic = &blockIDCreator{}
|
||||
n, err := rand.Read(bic.random[:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("crypto rand failed: %w", err)
|
||||
}
|
||||
if n != len(bic.random) {
|
||||
return nil, errors.New("crypto rand failed: short read")
|
||||
}
|
||||
return bic, nil
|
||||
}
|
||||
|
||||
// create a new block ID for chunkNumber
|
||||
func (bic *blockIDCreator) newBlockID(chunkNumber uint64) string {
|
||||
var binaryBlockID [16]byte
|
||||
// block counter as LSB first 8 bytes
|
||||
binary.BigEndian.PutUint64(binaryBlockID[:8], chunkNumber)
|
||||
// random bits at the end
|
||||
copy(binaryBlockID[8:], bic.random[:])
|
||||
// return base64 encoded value
|
||||
return base64.StdEncoding.EncodeToString(binaryBlockID[:])
|
||||
}
|
||||
|
||||
// Check the chunkNumber is correct in the id
|
||||
func (bic *blockIDCreator) checkID(chunkNumber uint64, id string) error {
|
||||
binaryBlockID, err := base64.StdEncoding.DecodeString(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("internal error: bad block ID: %w", err)
|
||||
}
|
||||
if len(binaryBlockID) != 16 {
|
||||
return errors.New("internal error: bad block ID length")
|
||||
}
|
||||
gotChunkNumber := binary.BigEndian.Uint64(binaryBlockID[:8])
|
||||
if chunkNumber != gotChunkNumber {
|
||||
return fmt.Errorf("internal error: expecting decoded chunkNumber %d but got %d", chunkNumber, gotChunkNumber)
|
||||
}
|
||||
if !bytes.Equal(binaryBlockID[8:], bic.random[:]) {
|
||||
return fmt.Errorf("internal error: random bytes are incorrect")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// record chunk number and id for Close
|
||||
type azBlock struct {
|
||||
chunkNumber uint64
|
||||
@ -2058,6 +2408,8 @@ type azChunkWriter struct {
|
||||
blocksMu sync.Mutex // protects the below
|
||||
blocks []azBlock // list of blocks for finalize
|
||||
o *Object
|
||||
bic *blockIDCreator
|
||||
checker *checkForInvalidBlockOrBlob
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
@ -2114,16 +2466,96 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
f: f,
|
||||
ui: ui,
|
||||
o: o,
|
||||
checker: newCheckForInvalidBlockOrBlob("upload", o),
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(partSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
chunkWriter.bic, err = newBlockIDCreator()
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload")
|
||||
return info, chunkWriter, nil
|
||||
}
|
||||
|
||||
// isInvalidBlockOrBlob looks for the InvalidBlockOrBlob error in err
|
||||
// returning true if it is found
|
||||
func isInvalidBlockOrBlob(err error) bool {
|
||||
var storageErr *azcore.ResponseError
|
||||
if errors.As(err, &storageErr) {
|
||||
return storageErr.ErrorCode == string(bloberror.InvalidBlobOrBlock)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Struct to hold state for checking for InvalidBlockOrBlob
|
||||
type checkForInvalidBlockOrBlob struct {
|
||||
startMu sync.Mutex // hold when starting transactions
|
||||
inFlight sync.WaitGroup // transactions in flight
|
||||
what string // "copy" or "upload"
|
||||
o *Object // object we are working on
|
||||
cleared bool // set if we have cleared the uncommitted blocks - we only do this once
|
||||
}
|
||||
|
||||
// Make InvalidBlockOrBlob checker
|
||||
func newCheckForInvalidBlockOrBlob(what string, o *Object) *checkForInvalidBlockOrBlob {
|
||||
return &checkForInvalidBlockOrBlob{
|
||||
what: what,
|
||||
o: o,
|
||||
}
|
||||
}
|
||||
|
||||
// start marks that there is a transaction in progress
|
||||
func (c *checkForInvalidBlockOrBlob) start() {
|
||||
c.startMu.Lock()
|
||||
defer c.startMu.Unlock()
|
||||
c.inFlight.Add(1)
|
||||
}
|
||||
|
||||
// stop marks that this transaction has finished
|
||||
func (c *checkForInvalidBlockOrBlob) stop() {
|
||||
c.inFlight.Done()
|
||||
}
|
||||
|
||||
// checkErr looks for the InvalidBlockOrBlob error in err, and if it
|
||||
// is found, it clears uncommitted blocks in o to clear the error.
|
||||
//
|
||||
// It returns a bool indicating whether the error was found or not.
|
||||
//
|
||||
// See https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
func (c *checkForInvalidBlockOrBlob) checkErr(ctx context.Context, err error) (result bool) {
|
||||
// defer log.Trace(c.o, "err=%#v, what=%q", err, c.what)("result=%v", &result)
|
||||
if !isInvalidBlockOrBlob(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Prevent more transactions starting
|
||||
c.startMu.Lock()
|
||||
defer c.startMu.Unlock()
|
||||
|
||||
if c.cleared {
|
||||
fs.Debugf(c.o, "multipart %s: received %s error: already cleared", c.what, bloberror.InvalidBlobOrBlock)
|
||||
return true
|
||||
}
|
||||
|
||||
// Wait for any other outstanding transactions to finish
|
||||
c.inFlight.Wait()
|
||||
|
||||
// Clear uncommitted blocks
|
||||
fs.Debugf(c.o, "multipart %s: received %s error: clearing uncommitted blocks and retrying", c.what, bloberror.InvalidBlobOrBlock)
|
||||
clearErr := c.o.clearUncommittedBlocks(ctx)
|
||||
if clearErr != nil {
|
||||
fs.Debugf(c.o, "multipart %s: error fixing %s: %v", c.what, bloberror.InvalidBlobOrBlock, clearErr)
|
||||
}
|
||||
fs.Debugf(c.o, "multipart %s: fixed %s", c.what, bloberror.InvalidBlobOrBlock)
|
||||
c.cleared = true
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
|
||||
if chunkNumber < 0 {
|
||||
@ -2143,10 +2575,8 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
binary.LittleEndian.PutUint64(binaryBlockID[:], uint64(chunkNumber))
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID[:])
|
||||
// Create a new blockID
|
||||
blockID := w.bic.newBlockID(uint64(chunkNumber))
|
||||
|
||||
// Save the blockID for the commit
|
||||
w.blocksMu.Lock()
|
||||
@ -2166,8 +2596,13 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
|
||||
}
|
||||
w.checker.start()
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
w.checker.stop()
|
||||
if err != nil {
|
||||
if w.checker.checkErr(ctx, err) {
|
||||
return true, err
|
||||
}
|
||||
if chunkNumber <= 8 {
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
}
|
||||
@ -2187,31 +2622,111 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
return currentChunkSize, err
|
||||
}
|
||||
|
||||
// Abort the multipart upload.
|
||||
// Clear uncommitted blocks
|
||||
//
|
||||
// FIXME it would be nice to delete uncommitted blocks.
|
||||
// There isn't an API to clear uncommitted blocks.
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/5583
|
||||
// However they are released when a Commit is called. Doing this will
|
||||
// instantiate the object so we don't want to overwrite an existing
|
||||
// object.
|
||||
//
|
||||
// However there doesn't seem to be an easy way of doing this other than
|
||||
// by deleting the target.
|
||||
// We will use this algorithm:
|
||||
//
|
||||
// This means that a failed upload deletes the target which isn't ideal.
|
||||
//
|
||||
// Uploading a zero length blob and deleting it will remove the
|
||||
// uncommitted blocks I think.
|
||||
//
|
||||
// Could check to see if a file exists already and if it doesn't then
|
||||
// create a 0 length file and delete it to flush the uncommitted
|
||||
// blocks.
|
||||
//
|
||||
// This is what azcopy does
|
||||
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
|
||||
func (w *azChunkWriter) Abort(ctx context.Context) error {
|
||||
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
|
||||
// Attempt to read committed blocks from the object
|
||||
// If the object exists
|
||||
// - Commit the existing blocks again
|
||||
// - This should get rid of the uncommitted blocks without changing the existing object
|
||||
// If the object does not exist then
|
||||
// - Commit an empty block list
|
||||
// - This will get rid of the uncommitted blocks
|
||||
// - This will also create a 0 length blob
|
||||
// - So delete the 0 length blob
|
||||
func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
fs.Debugf(o, "Clearing uncommitted blocks")
|
||||
var (
|
||||
blockBlobSVC = o.getBlockBlobSVC()
|
||||
objectExists = true
|
||||
blockIDs []string
|
||||
blockList blockblob.GetBlockListResponse
|
||||
properties *blob.GetPropertiesResponse
|
||||
options *blockblob.CommitBlockListOptions
|
||||
)
|
||||
|
||||
properties, err = o.readMetaDataAlways(ctx)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
objectExists = false
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("clear uncommitted blocks: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
if objectExists {
|
||||
// Get the committed block list
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("clear uncommitted blocks: failed to read uncommitted block list: %w", err)
|
||||
}
|
||||
if len(blockList.UncommittedBlocks) == 0 {
|
||||
fs.Debugf(o, "No uncommitted blocks - exiting")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "%d Uncommitted blocks found", len(blockList.UncommittedBlocks))
|
||||
objectExists = true
|
||||
uncommittedBlocks := make(map[string]struct{}, len(blockList.UncommittedBlocks))
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
uncommittedBlocks[*block.Name] = struct{}{}
|
||||
}
|
||||
for _, block := range blockList.CommittedBlocks {
|
||||
name := *block.Name
|
||||
if _, found := uncommittedBlocks[name]; found {
|
||||
return fmt.Errorf("clear uncommitted blocks: can't safely clear uncommitted blocks as committed and uncommitted IDs overlap. Delete the existing object to clear the uncommitted blocks")
|
||||
}
|
||||
blockIDs = append(blockIDs, name)
|
||||
}
|
||||
|
||||
// Reconstruct metadata from existing object as CommitBlockList overwrites it
|
||||
options = &blockblob.CommitBlockListOptions{
|
||||
Metadata: properties.Metadata,
|
||||
Tier: (*blob.AccessTier)(properties.AccessTier),
|
||||
HTTPHeaders: &blob.HTTPHeaders{
|
||||
BlobCacheControl: properties.CacheControl,
|
||||
BlobContentDisposition: properties.ContentDisposition,
|
||||
BlobContentEncoding: properties.ContentEncoding,
|
||||
BlobContentLanguage: properties.ContentLanguage,
|
||||
BlobContentMD5: properties.ContentMD5,
|
||||
BlobContentType: properties.ContentType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Commit only the committed blocks
|
||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("clear uncommitted blocks: failed to commit block list: %w", err)
|
||||
}
|
||||
|
||||
// If object didn't exist before, then delete it
|
||||
if !objectExists {
|
||||
fs.Debugf(o, "Removing empty object")
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("clear uncommitted blocks: failed to remove empty object: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Abort the multipart upload.
|
||||
func (w *azChunkWriter) Abort(ctx context.Context) error {
|
||||
return w.o.clearUncommittedBlocks(ctx)
|
||||
}
|
||||
|
||||
// Close and finalise the multipart upload
|
||||
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
|
||||
// sort the completed parts by part number
|
||||
@ -2225,13 +2740,9 @@ func (w *azChunkWriter) Close(ctx context.Context) (err error) {
|
||||
if w.blocks[i].chunkNumber != uint64(i) {
|
||||
return fmt.Errorf("internal error: expecting chunkNumber %d but got %d", i, w.blocks[i].chunkNumber)
|
||||
}
|
||||
chunkBytes, err := base64.StdEncoding.DecodeString(w.blocks[i].id)
|
||||
err := w.bic.checkID(w.blocks[i].chunkNumber, w.blocks[i].id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("internal error: bad block ID: %w", err)
|
||||
}
|
||||
chunkNumber := binary.LittleEndian.Uint64(chunkBytes)
|
||||
if w.blocks[i].chunkNumber != chunkNumber {
|
||||
return fmt.Errorf("internal error: expecting decoded chunkNumber %d but got %d", w.blocks[i].chunkNumber, chunkNumber)
|
||||
return err
|
||||
}
|
||||
blockIDs[i] = w.blocks[i].id
|
||||
}
|
||||
|
@ -3,16 +3,149 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
func TestBlockIDCreator(t *testing.T) {
|
||||
// Check creation and random number
|
||||
bic, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
bic2, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bic.random, bic2.random)
|
||||
assert.NotEqual(t, bic.random, [8]byte{})
|
||||
|
||||
// Set random to known value for tests
|
||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||
|
||||
// Check creation of ID
|
||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||
got := bic.newBlockID(chunkNumber)
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||
|
||||
// Test checkID is working
|
||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||
}
|
||||
|
||||
func (f *Fs) testFeatures(t *testing.T) {
|
||||
// Check first feature flags are set on this remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
type ReadSeekCloser struct {
|
||||
*strings.Reader
|
||||
}
|
||||
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage a block at remote but don't commit it
|
||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||
var (
|
||||
containerName, blobPath = f.split(remote)
|
||||
containerClient = f.cntSVC(containerName)
|
||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||
data = "uncommitted data"
|
||||
blockID = "1"
|
||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||
)
|
||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the block is staged but not committed
|
||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
if *block.Name == blockIDBase64 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||
}
|
||||
|
||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||
//
|
||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
//
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
remote = "testBlob"
|
||||
)
|
||||
|
||||
// Multipart copy the blob please
|
||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||
f.opt.UseCopyBlob = false
|
||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||
defer func() {
|
||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||
}()
|
||||
|
||||
// Create a blob with uncommitted blocks
|
||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||
|
||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||
|
||||
// Check the object does not exist
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||
size := 4*int(f.opt.ChunkSize) - 1
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), o.Size())
|
||||
|
||||
// Create a new blob with uncommitted blocks
|
||||
newRemote := "testBlob2"
|
||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||
|
||||
// Copy over that block
|
||||
dst, err := f.Copy(ctx, o, newRemote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check basics
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
assert.Equal(t, newRemote, dst.Remote())
|
||||
|
||||
// Check contents
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
|
||||
// Remove the object
|
||||
require.NoError(t, dst.Remove(ctx))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
}
|
||||
|
@ -15,13 +15,17 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
|
@ -237,6 +237,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@ -319,10 +343,12 @@ type Options struct {
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||
@ -414,7 +440,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
@ -425,6 +452,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.SASURL != "":
|
||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||
if err != nil {
|
||||
|
@ -300,14 +300,13 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -1605,9 +1604,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1887,13 +1883,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
@ -258,12 +258,6 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
|
@ -2480,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
@ -203,7 +203,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -1212,6 +1211,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@ -1222,9 +1222,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@ -1657,7 +1659,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@ -1848,6 +1851,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@ -1893,7 +1897,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@ -2689,7 +2694,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@ -2926,7 +2931,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -3187,6 +3191,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@ -4018,14 +4023,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
@ -4085,6 +4089,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
@ -4099,7 +4104,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@ -4312,12 +4318,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@ -4344,9 +4351,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@ -4371,7 +4379,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@ -4449,6 +4458,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@ -4544,6 +4554,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
@ -92,6 +92,9 @@ const (
|
||||
maxFileNameLength = 255
|
||||
)
|
||||
|
||||
type exportAPIFormat string
|
||||
type exportExtension string // dotless
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
@ -132,6 +135,16 @@ var (
|
||||
DefaultTimeoutAsync: 10 * time.Second,
|
||||
DefaultBatchSizeAsync: 100,
|
||||
}
|
||||
|
||||
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
|
||||
"markdown": "md",
|
||||
"html": "html",
|
||||
}
|
||||
// Populated based on exportKnownAPIFormats
|
||||
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
|
||||
|
||||
paperExtension = ".paper"
|
||||
paperTemplateExtension = ".papert"
|
||||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
@ -247,23 +260,61 @@ folders.`,
|
||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
}, {
|
||||
Name: "export_formats",
|
||||
Help: `Comma separated list of preferred formats for exporting files
|
||||
|
||||
Certain Dropbox files can only be accessed by exporting them to another format.
|
||||
These include Dropbox Paper documents.
|
||||
|
||||
For each such file, rclone will choose the first format on this list that Dropbox
|
||||
considers valid. If none is valid, it will choose Dropbox's default format.
|
||||
|
||||
Known formats include: "html", "md" (markdown)`,
|
||||
Default: fs.CommaSepList{"html", "md"},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_exports",
|
||||
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "show_all_exports",
|
||||
Default: false,
|
||||
Help: `Show all exportable files in listings.
|
||||
|
||||
Adding this flag will allow all exportable files to be server side copied.
|
||||
Note that rclone doesn't add extensions to the exportable file names in this mode.
|
||||
|
||||
Do **not** use this flag when trying to download exportable files - rclone
|
||||
will fail to download them.
|
||||
`,
|
||||
Advanced: true,
|
||||
},
|
||||
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
})
|
||||
|
||||
for apiFormat, ext := range exportKnownAPIFormats {
|
||||
exportKnownExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ExportFormats fs.CommaSepList `config:"export_formats"`
|
||||
SkipExports bool `config:"skip_exports"`
|
||||
ShowAllExports bool `config:"show_all_exports"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@ -283,8 +334,18 @@ type Fs struct {
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
||||
exportExts []exportExtension
|
||||
}
|
||||
|
||||
type exportType int
|
||||
|
||||
const (
|
||||
notExport exportType = iota // a regular file
|
||||
exportHide // should be hidden
|
||||
exportListOnly // listable, but can't export
|
||||
exportExportable // can export
|
||||
)
|
||||
|
||||
// Object describes a dropbox object
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
@ -296,6 +357,9 @@ type Object struct {
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hash string // content_hash of the object
|
||||
|
||||
exportType exportType
|
||||
exportAPIFormat exportAPIFormat
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@ -436,6 +500,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
for _, e := range opt.ExportFormats {
|
||||
ext := exportExtension(e)
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
|
||||
}
|
||||
f.exportExts = append(f.exportExts, ext)
|
||||
}
|
||||
|
||||
// unauthorized config for endpoints that fail with auth
|
||||
ucfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
@ -588,38 +660,126 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
}
|
||||
|
||||
type getMetadataResult struct {
|
||||
entry files.IsMetadata
|
||||
notFound bool
|
||||
err error
|
||||
}
|
||||
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) {
|
||||
res.err = f.pacer.Call(func() (bool, error) {
|
||||
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
return shouldRetry(ctx, res.err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
if res.err != nil {
|
||||
switch e := res.err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
notFound = true
|
||||
err = nil
|
||||
res.notFound = true
|
||||
res.err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Get metadata such that the result would be exported with the given extension
|
||||
// Return a channel that will eventually receive the metadata
|
||||
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
|
||||
ch := make(chan getMetadataResult, 1)
|
||||
wantDownloadable := (wantExportExtension == "")
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
res := f.getMetadata(ctx, filePath)
|
||||
info, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok { // Can't check anything about file, just return what we have
|
||||
ch <- res
|
||||
return
|
||||
}
|
||||
|
||||
// Return notFound if downloadability or extension doesn't match
|
||||
if wantDownloadable != info.IsDownloadable {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
if !info.IsDownloadable {
|
||||
_, ext := f.chooseExportFormat(info)
|
||||
if ext != wantExportExtension {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Return our real result or error
|
||||
ch <- res
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
|
||||
// Multiple paths might be plausible, due to export path munging.
|
||||
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
|
||||
ret = []<-chan getMetadataResult{}
|
||||
|
||||
// Prefer an exact match
|
||||
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
|
||||
|
||||
// Check if we're plausibly an export path, otherwise we're done
|
||||
if f.opt.SkipExports || f.opt.ShowAllExports {
|
||||
return
|
||||
}
|
||||
if notFound {
|
||||
dotted := path.Ext(filePath)
|
||||
if dotted == "" {
|
||||
return
|
||||
}
|
||||
ext := exportExtension(dotted[1:])
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// We might be an export path! Try all possibilities
|
||||
base := strings.TrimSuffix(filePath, dotted)
|
||||
|
||||
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
|
||||
if strings.HasSuffix(base, paperTemplateExtension) {
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) {
|
||||
var res getMetadataResult
|
||||
|
||||
// Try all possible metadatas
|
||||
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
|
||||
for _, ch := range possibleMetadatas {
|
||||
res = <-ch
|
||||
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if !res.notFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
fileInfo, ok := entry.(*files.FileMetadata)
|
||||
|
||||
fileInfo, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok {
|
||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
||||
if _, ok = res.entry.(*files.FolderMetadata); ok {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
@ -628,15 +788,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
||||
}
|
||||
|
||||
// getDirMetadata gets the metadata for a directory
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) {
|
||||
res := f.getMetadata(ctx, dirPath)
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if notFound {
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
dirInfo, ok := entry.(*files.FolderMetadata)
|
||||
dirInfo, ok := res.entry.(*files.FolderMetadata)
|
||||
if !ok {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
@ -836,16 +996,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var res *files.ListFolderResult
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
Limit: 1000,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root))
|
||||
arg.Recursive = false
|
||||
arg.Limit = 1000
|
||||
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -898,7 +1057,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
if o.(*Object).exportType.listable() {
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !res.HasMore {
|
||||
@ -984,16 +1145,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
}
|
||||
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: encRoot,
|
||||
Recursive: false,
|
||||
}
|
||||
arg := files.NewListFolderArg(encRoot)
|
||||
arg.Recursive = false
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -1174,6 +1333,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
|
||||
// Some plans can't create links with expiry
|
||||
fs.Debugf(absPath, "can't create link with expiry, trying without")
|
||||
createArg.Settings.Expires = nil
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
@ -1338,16 +1507,14 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
|
||||
var startCursor *files.ListFolderGetLatestCursorResult
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
Recursive: true,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot))
|
||||
arg.Recursive = true
|
||||
|
||||
if arg.Path == "/" {
|
||||
arg.Path = ""
|
||||
}
|
||||
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(arg)
|
||||
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
@ -1451,8 +1618,50 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
|
||||
// Find API export formats Dropbox supports for this file
|
||||
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
|
||||
ei := info.ExportInfo
|
||||
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
|
||||
|
||||
// Find which extensions these correspond to
|
||||
exportExtensions := map[exportExtension]exportAPIFormat{}
|
||||
var dropboxPreferredAPIFormat exportAPIFormat
|
||||
var dropboxPreferredExtension exportExtension
|
||||
for _, format := range dropboxFormatStrings {
|
||||
apiFormat := exportAPIFormat(format)
|
||||
// Only consider formats we know about
|
||||
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
|
||||
if dropboxPreferredAPIFormat == "" {
|
||||
dropboxPreferredAPIFormat = apiFormat
|
||||
dropboxPreferredExtension = ext
|
||||
}
|
||||
exportExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// See if the user picked a valid extension
|
||||
for _, ext := range f.exportExts {
|
||||
if apiFormat, ok := exportExtensions[ext]; ok {
|
||||
return apiFormat, ext
|
||||
}
|
||||
}
|
||||
|
||||
// If no matches, prefer the first valid format Dropbox lists
|
||||
return dropboxPreferredAPIFormat, dropboxPreferredExtension
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func (et exportType) listable() bool {
|
||||
return et != exportHide
|
||||
}
|
||||
|
||||
// something we should _try_ to export
|
||||
func (et exportType) exportable() bool {
|
||||
return et == exportExportable || et == exportListOnly
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
@ -1496,6 +1705,32 @@ func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
|
||||
o.bytes = -1
|
||||
o.hash = ""
|
||||
|
||||
if o.fs.opt.SkipExports {
|
||||
o.exportType = exportHide
|
||||
return
|
||||
}
|
||||
if o.fs.opt.ShowAllExports {
|
||||
o.exportType = exportListOnly
|
||||
return
|
||||
}
|
||||
|
||||
var exportExt exportExtension
|
||||
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
|
||||
if o.exportAPIFormat == "" {
|
||||
o.exportType = exportHide
|
||||
} else {
|
||||
o.exportType = exportExportable
|
||||
// get rid of any paper extension, if present
|
||||
o.remote = strings.TrimSuffix(o.remote, paperExtension)
|
||||
// add the export extension
|
||||
o.remote += "." + string(exportExt)
|
||||
}
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
@ -1504,6 +1739,10 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
|
||||
if !info.IsDownloadable {
|
||||
o.setMetadataForExport(info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1567,6 +1806,27 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
|
||||
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
|
||||
fs.Debugf(o.remote, "No export format found")
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
|
||||
var exportResult *files.ExportResult
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
exportResult, in, err = o.fs.srv.Export(&arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.bytes = int64(exportResult.ExportMetadata.Size)
|
||||
o.hash = exportResult.ExportMetadata.ExportHash
|
||||
return
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
@ -1586,6 +1846,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return
|
||||
}
|
||||
|
||||
if o.exportType.exportable() {
|
||||
return o.export(ctx)
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
|
@ -1,9 +1,16 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalCheckPathLength(t *testing.T) {
|
||||
@ -42,3 +49,54 @@ func TestInternalCheckPathLength(t *testing.T) {
|
||||
assert.Equal(t, test.ok, err == nil, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) importPaperForTest(t *testing.T) {
|
||||
content := `# test doc
|
||||
|
||||
Lorem ipsum __dolor__ sit amet
|
||||
[link](http://google.com)
|
||||
`
|
||||
|
||||
arg := files.PaperCreateArg{
|
||||
Path: f.slashRootSlash + "export.paper",
|
||||
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
|
||||
}
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
reader := strings.NewReader(content)
|
||||
_, err = f.srv.PaperCreate(&arg, reader)
|
||||
return shouldRetry(context.Background(), err)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestPaperExport(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f.importPaperForTest(t)
|
||||
|
||||
f.exportExts = []exportExtension{"html"}
|
||||
|
||||
obj, err := f.NewObject(ctx, "export.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
buf, err := io.ReadAll(rc)
|
||||
require.NoError(t, err)
|
||||
text := string(buf)
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum",
|
||||
"<b>dolor</b>",
|
||||
`href="http://google.com"`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PaperExport", f.InternalTestPaperExport)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
@ -476,7 +476,7 @@ func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID st
|
||||
|
||||
// CopyDocByItemID copies a document by its item ID.
|
||||
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
|
||||
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
|
||||
// putting name in info doesn't work. extension does work so assume this is a bug in the endpoint
|
||||
values := map[string]any{
|
||||
"info_to_update": map[string]any{},
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ func init() {
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
|
@ -92,6 +92,21 @@ Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.`,
|
||||
Default: 10 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access",
|
||||
Help: "Files and folders will be uploaded with this access permission (default private)",
|
||||
Default: "private",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them.",
|
||||
}, {
|
||||
Value: "public",
|
||||
Help: "The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way,",
|
||||
}, {
|
||||
Value: "hidden",
|
||||
Help: "The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@ -102,6 +117,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Access string `config:"access"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
@ -735,6 +751,23 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// getAccessLevel is a helper function to determine access level integer
|
||||
func getAccessLevel(access string) int64 {
|
||||
var accessLevel int64
|
||||
switch access {
|
||||
case "private":
|
||||
accessLevel = 0
|
||||
case "public":
|
||||
accessLevel = 1
|
||||
case "hidden":
|
||||
accessLevel = 2
|
||||
default:
|
||||
accessLevel = 0
|
||||
fs.Errorf(nil, "Invalid access: %s, defaulting to private", access)
|
||||
}
|
||||
return accessLevel
|
||||
}
|
||||
|
||||
// DirCacher methods
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
@ -747,7 +780,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: f.opt.Enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderIsPublic: getAccessLevel(f.opt.Access),
|
||||
FolderPublicUpl: 0,
|
||||
FolderPublicDisplay: 0,
|
||||
FolderPublicDnl: 0,
|
||||
@ -1080,7 +1113,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Set permissions
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0}
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: getAccessLevel(o.fs.opt.Access)}
|
||||
// fs.Debugf(nil, "Permissions : %#v", update)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
59
backend/s3/ibm_signer.go
Normal file
59
backend/s3/ibm_signer.go
Normal file
@ -0,0 +1,59 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/IBM/go-sdk-core/v5/core"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// Authenticator defines an interface for obtaining an IAM token.
|
||||
type Authenticator interface {
|
||||
GetToken() (string, error)
|
||||
}
|
||||
|
||||
// IbmIamSigner is a structure for signing requests using IBM IAM.
|
||||
// Requeres APIKey and Resource InstanceID
|
||||
type IbmIamSigner struct {
|
||||
APIKey string
|
||||
InstanceID string
|
||||
Auth Authenticator
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using IBM IAM token.
|
||||
func (signer *IbmIamSigner) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
var authenticator Authenticator
|
||||
if signer.Auth != nil {
|
||||
authenticator = signer.Auth
|
||||
} else {
|
||||
authenticator = &core.IamAuthenticator{ApiKey: signer.APIKey}
|
||||
}
|
||||
token, err := authenticator.GetToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
req.Header.Set("ibm-service-instance-id", signer.InstanceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NoOpCredentialsProvider is needed since S3 SDK requires having credentials, even though authentication is happening via IBM IAM.
|
||||
type NoOpCredentialsProvider struct{}
|
||||
|
||||
// Retrieve returns mock credentials for the NoOpCredentialsProvider.
|
||||
func (n *NoOpCredentialsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
||||
return aws.Credentials{
|
||||
AccessKeyID: "NoOpAccessKey",
|
||||
SecretAccessKey: "NoOpSecretKey",
|
||||
SessionToken: "",
|
||||
Source: "NoOpCredentialsProvider",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired always returns false
|
||||
func (n *NoOpCredentialsProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
47
backend/s3/ibm_signer_test.go
Normal file
47
backend/s3/ibm_signer_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type MockAuthenticator struct {
|
||||
Token string
|
||||
Error error
|
||||
}
|
||||
|
||||
func (m *MockAuthenticator) GetToken() (string, error) {
|
||||
return m.Token, m.Error
|
||||
}
|
||||
|
||||
func TestSignHTTP(t *testing.T) {
|
||||
apiKey := "mock-api-key"
|
||||
instanceID := "mock-instance-id"
|
||||
token := "mock-iam-token"
|
||||
mockAuth := &MockAuthenticator{
|
||||
Token: token,
|
||||
Error: nil,
|
||||
}
|
||||
signer := &IbmIamSigner{
|
||||
APIKey: apiKey,
|
||||
InstanceID: instanceID,
|
||||
Auth: mockAuth,
|
||||
}
|
||||
req, err := http.NewRequest("GET", "https://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request: %v", err)
|
||||
}
|
||||
credentials := aws.Credentials{
|
||||
AccessKeyID: "mock-access-key",
|
||||
SecretAccessKey: "mock-secret-key",
|
||||
}
|
||||
err = signer.SignHTTP(context.TODO(), credentials, req, "payload-hash", "service", "region", time.Now())
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
assert.Equal(t, "Bearer "+token, req.Header.Get("Authorization"), "Authorization header should be set correctly")
|
||||
assert.Equal(t, instanceID, req.Header.Get("ibm-service-instance-id"), "ibm-service-instance-id header should be set correctly")
|
||||
}
|
203
backend/s3/s3.go
203
backend/s3/s3.go
@ -36,8 +36,8 @@ import (
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
@ -934,34 +934,67 @@ func init() {
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// Linode endpoints: https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
|
||||
// Linode endpoints: https://techdocs.akamai.com/cloud-computing/docs/object-storage-product-limits#supported-endpoint-types-by-region
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Linode Object Storage API.",
|
||||
Provider: "Linode",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nl-ams-1.linodeobjects.com",
|
||||
Help: "Amsterdam (Netherlands), nl-ams-1",
|
||||
}, {
|
||||
Value: "us-southeast-1.linodeobjects.com",
|
||||
Help: "Atlanta, GA (USA), us-southeast-1",
|
||||
}, {
|
||||
Value: "in-maa-1.linodeobjects.com",
|
||||
Help: "Chennai (India), in-maa-1",
|
||||
}, {
|
||||
Value: "us-ord-1.linodeobjects.com",
|
||||
Help: "Chicago, IL (USA), us-ord-1",
|
||||
}, {
|
||||
Value: "eu-central-1.linodeobjects.com",
|
||||
Help: "Frankfurt (Germany), eu-central-1",
|
||||
}, {
|
||||
Value: "id-cgk-1.linodeobjects.com",
|
||||
Help: "Jakarta (Indonesia), id-cgk-1",
|
||||
}, {
|
||||
Value: "gb-lon-1.linodeobjects.com",
|
||||
Help: "London 2 (Great Britain), gb-lon-1",
|
||||
}, {
|
||||
Value: "us-lax-1.linodeobjects.com",
|
||||
Help: "Los Angeles, CA (USA), us-lax-1",
|
||||
}, {
|
||||
Value: "es-mad-1.linodeobjects.com",
|
||||
Help: "Madrid (Spain), es-mad-1",
|
||||
}, {
|
||||
Value: "au-mel-1.linodeobjects.com",
|
||||
Help: "Melbourne (Australia), au-mel-1",
|
||||
}, {
|
||||
Value: "us-mia-1.linodeobjects.com",
|
||||
Help: "Miami, FL (USA), us-mia-1",
|
||||
}, {
|
||||
Value: "it-mil-1.linodeobjects.com",
|
||||
Help: "Milan (Italy), it-mil-1",
|
||||
}, {
|
||||
Value: "us-east-1.linodeobjects.com",
|
||||
Help: "Newark, NJ (USA), us-east-1",
|
||||
}, {
|
||||
Value: "jp-osa-1.linodeobjects.com",
|
||||
Help: "Osaka (Japan), jp-osa-1",
|
||||
}, {
|
||||
Value: "fr-par-1.linodeobjects.com",
|
||||
Help: "Paris (France), fr-par-1",
|
||||
}, {
|
||||
Value: "br-gru-1.linodeobjects.com",
|
||||
Help: "São Paulo (Brazil), br-gru-1",
|
||||
}, {
|
||||
Value: "us-sea-1.linodeobjects.com",
|
||||
Help: "Seattle, WA (USA), us-sea-1",
|
||||
}, {
|
||||
Value: "ap-south-1.linodeobjects.com",
|
||||
Help: "Singapore ap-south-1",
|
||||
Help: "Singapore, ap-south-1",
|
||||
}, {
|
||||
Value: "sg-sin-1.linodeobjects.com",
|
||||
Help: "Singapore 2, sg-sin-1",
|
||||
}, {
|
||||
Value: "se-sto-1.linodeobjects.com",
|
||||
Help: "Stockholm (Sweden), se-sto-1",
|
||||
@ -1356,6 +1389,10 @@ func init() {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo2.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 2",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
@ -1372,6 +1409,18 @@ func init() {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "lon1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces London 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "tor1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Toronto 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "blr1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Bangalore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
@ -2664,6 +2713,34 @@ knows about - please make a bug report if not.
|
||||
You can change this if you want to disable the use of multipart uploads.
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_x_id",
|
||||
Help: `Set if rclone should add x-id URL parameters.
|
||||
|
||||
You can change this if you want to disable the AWS SDK from
|
||||
adding x-id URL parameters.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sign_accept_encoding",
|
||||
Help: `Set if rclone should include Accept-Encoding as part of the signature.
|
||||
|
||||
You can change this if you want to stop rclone including
|
||||
Accept-Encoding as part of the signature.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
@ -2720,6 +2797,16 @@ use |-vv| to see the debug level logs.
|
||||
Default: sdkLogMode(0),
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "ibm_api_key",
|
||||
Help: "IBM API Key to be used to obtain IAM token",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
{
|
||||
Name: "ibm_resource_instance_id",
|
||||
Help: "IBM service instance id",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
@ -2873,6 +2960,10 @@ type Options struct {
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
IBMAPIKey string `config:"ibm_api_key"`
|
||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||
UseXID fs.Tristate `config:"use_x_id"`
|
||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@ -3057,59 +3148,67 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Fixup the request if needed.
|
||||
//
|
||||
// Google Cloud Storage alters the Accept-Encoding header, which
|
||||
// breaks the v2 request signature
|
||||
// breaks the v2 request signature. This is set with opt.SignAcceptEncoding.
|
||||
//
|
||||
// It also doesn't like the x-id URL parameter SDKv2 puts in so we
|
||||
// remove that too.
|
||||
// remove that too. This is set with opt.UseXID.Value.
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-go-v2/issues/1816.
|
||||
// Adapted from: https://github.com/aws/aws-sdk-go-v2/issues/1816#issuecomment-1927281540
|
||||
func fixupGCS(o *s3.Options) {
|
||||
func fixupRequest(o *s3.Options, opt *Options) {
|
||||
type ignoredHeadersKey struct{}
|
||||
headers := []string{"Accept-Encoding"}
|
||||
|
||||
fixup := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupGCS",
|
||||
"FixupRequest",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
}
|
||||
|
||||
// Remove x-id because Google doesn't like them
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
if !opt.UseXID.Value {
|
||||
// Remove x-id
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
},
|
||||
)
|
||||
|
||||
// Restore headers if necessary
|
||||
restore := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupGCSRestoreHeaders",
|
||||
"FixupRequestRestoreHeaders",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
@ -3155,6 +3254,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
// Try to fill in the config from the environment if env_auth=true
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
|
||||
configOpts := []func(*awsconfig.LoadOptions) error{}
|
||||
// Set the name of the profile if supplied
|
||||
if opt.Profile != "" {
|
||||
@ -3168,8 +3268,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't load configuration with env_auth=true: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
switch {
|
||||
case opt.Provider == "IBMCOS" && opt.V2Auth:
|
||||
awsConfig.Credentials = &NoOpCredentialsProvider{}
|
||||
fs.Debugf(nil, "Using IBM IAM")
|
||||
case opt.AccessKeyID == "" && opt.SecretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
awsConfig.Credentials = aws.AnonymousCredentials{}
|
||||
@ -3223,14 +3327,21 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
if opt.Provider == "IBMCOS" && opt.IBMAPIKey != "" && opt.IBMInstanceID != "" {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &IbmIamSigner{APIKey: opt.IBMAPIKey, InstanceID: opt.IBMInstanceID}
|
||||
})
|
||||
} else {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if opt.Provider == "GCS" {
|
||||
// Fixup the request if needed
|
||||
if !opt.UseXID.Value || !opt.SignAcceptEncoding.Value {
|
||||
options = append(options, func(o *s3.Options) {
|
||||
fixupGCS(o)
|
||||
fixupRequest(o, opt)
|
||||
})
|
||||
}
|
||||
|
||||
@ -3345,6 +3456,8 @@ func setQuirks(opt *Options) {
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
useMultipartUploads = true // Set if provider supports multipart uploads
|
||||
useUnsignedPayload = true // Do we need to use unsigned payloads to avoid seeking in PutObject
|
||||
useXID = true // Add x-id URL parameter into requests
|
||||
signAcceptEncoding = true // If we should include AcceptEncoding in the signature
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@ -3489,11 +3602,14 @@ func setQuirks(opt *Options) {
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
signAcceptEncoding = false
|
||||
useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
// GCS S3 doesn't support multi-part server side copy:
|
||||
// See: https://issuetracker.google.com/issues/323465186
|
||||
// So make cutoff very large which it does seem to support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
// GCS doesn't like the x-id URL parameter the SDKv2 inserts
|
||||
useXID = false
|
||||
default: //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid defaultCaseOrder: consider to make `default` case as first or as last case
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
fallthrough
|
||||
@ -3563,6 +3679,18 @@ func setQuirks(opt *Options) {
|
||||
opt.UseUnsignedPayload.Valid = true
|
||||
opt.UseUnsignedPayload.Value = useUnsignedPayload
|
||||
}
|
||||
|
||||
// Set the correct use UseXID if not manually set
|
||||
if !opt.UseXID.Valid {
|
||||
opt.UseXID.Valid = true
|
||||
opt.UseXID.Value = useXID
|
||||
}
|
||||
|
||||
// Set the correct SignAcceptEncoding if not manually set
|
||||
if !opt.SignAcceptEncoding.Valid {
|
||||
opt.SignAcceptEncoding.Valid = true
|
||||
opt.SignAcceptEncoding.Value = signAcceptEncoding
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@ -3677,6 +3805,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
if opt.Provider == "AWS" {
|
||||
f.features.DoubleSlash = true
|
||||
}
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
@ -4148,7 +4279,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if !opt.findFile {
|
||||
if opt.directory != "" {
|
||||
if opt.directory != "" && (opt.prefix == "" && !bucket.IsAllSlashes(opt.directory) || opt.prefix != "" && !strings.HasSuffix(opt.directory, "/")) {
|
||||
opt.directory += "/"
|
||||
}
|
||||
}
|
||||
@ -4245,14 +4376,18 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, opt.prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
fs.Logf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
// Trim one slash off the remote name
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
if remote == "" || bucket.IsAllSlashes(remote) {
|
||||
remote += "/"
|
||||
}
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &types.Object{Key: &remote}, nil, true)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
|
@ -2,8 +2,10 @@ package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/cloudsoda/go-smb2"
|
||||
@ -11,14 +13,17 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// dial starts a client connection to the given SMB server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SMB handshake, and then sets up a Client.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
tconn, err := dialer.Dial(network, addr)
|
||||
tconn, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -89,15 +94,7 @@ func (c *conn) close() (err error) {
|
||||
|
||||
// True if it's closed
|
||||
func (c *conn) closed() bool {
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr != nil
|
||||
return c.smbSession.Echo() != nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
@ -118,23 +115,20 @@ func (f *Fs) getSessions() int32 {
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
bgCtx := context.Background()
|
||||
|
||||
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
if share != "" {
|
||||
// mount the specified share as well if user requested
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
err = c.mountShare(share)
|
||||
if err != nil {
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(bgCtx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
@ -192,23 +186,30 @@ func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err erro
|
||||
// Return a SMB connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
func (f *Fs) putConnection(pc **conn) {
|
||||
c := *pc
|
||||
*pc = nil
|
||||
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.close()
|
||||
//
|
||||
// if err is not nil then it checks the connection is alive using an
|
||||
// ECHO request
|
||||
func (f *Fs) putConnection(pc **conn, err error) {
|
||||
if pc == nil {
|
||||
return
|
||||
}
|
||||
c := *pc
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular SMB error then check the connection
|
||||
if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrExist) || errors.Is(err, os.ErrPermission)) {
|
||||
echoErr := c.smbSession.Echo()
|
||||
if echoErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", echoErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
fs.Debugf(f, "Connection OK after error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
@ -235,15 +236,18 @@ func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
}
|
||||
|
||||
g, _ := errgroup.WithContext(ctx)
|
||||
for i, c := range f.pool {
|
||||
if !c.closed() {
|
||||
cErr := c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
g.Go(func() (err error) {
|
||||
if !c.closed() {
|
||||
err = c.close()
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
f.pool[i] = nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
@ -207,7 +207,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
// ignore stat error here
|
||||
return f, nil
|
||||
@ -268,7 +268,7 @@ func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Obj
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@ -290,7 +290,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.Remove(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -375,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
return nil, err
|
||||
}
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@ -412,7 +412,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
defer f.putConnection(&cn, err)
|
||||
|
||||
_, err = cn.smbShare.Stat(dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
@ -430,7 +430,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
defer f.putConnection(&cn, err)
|
||||
|
||||
if share == "" {
|
||||
shares, err := cn.smbSession.ListSharenames()
|
||||
@ -474,7 +474,7 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Statfs(dir)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -556,7 +556,7 @@ func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -604,7 +604,7 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer o.fs.putConnection(&cn)
|
||||
defer o.fs.putConnection(&cn, err)
|
||||
|
||||
err = cn.smbShare.Chtimes(reqDir, t, t)
|
||||
if err != nil {
|
||||
@ -650,24 +650,25 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
pos, err := fl.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, fmt.Errorf("failed to seek: %w", err)
|
||||
}
|
||||
if pos != offset {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
err = fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
in = readers.NewLimitedReadCloser(fl, limit)
|
||||
in = &boundReadCloser{
|
||||
rc: in,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, nil)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -697,7 +698,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
}()
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
@ -757,7 +758,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
err = cn.smbShare.Remove(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func init() {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
// err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
@ -327,7 +327,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
||||
func (f *Fs) getAuthToken(ctx context.Context) error {
|
||||
fs.Debugf(f, "Renewing token")
|
||||
|
||||
var authRequest = api.TokenAuthRequest{
|
||||
authRequest := api.TokenAuthRequest{
|
||||
AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
RefreshToken: f.opt.RefreshToken,
|
||||
@ -509,7 +509,7 @@ func errorHandler(resp *http.Response) (err error) {
|
||||
return fmt.Errorf("error reading error out of body: %w", err)
|
||||
}
|
||||
match := findError.FindSubmatch(body)
|
||||
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
||||
if len(match) < 2 || len(match[1]) == 0 {
|
||||
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||
}
|
||||
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
||||
@ -552,7 +552,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
|
@ -11,4 +11,5 @@
|
||||
<services+github@simjo.st>
|
||||
<seb•ɑƬ•chezwam•ɖɵʈ•org>
|
||||
<allllaboutyou@gmail.com>
|
||||
<psycho@feltzv.fr>
|
||||
<psycho@feltzv.fr>
|
||||
<afw5059@gmail.com>
|
@ -108,6 +108,8 @@ var logReplacements = []string{
|
||||
`^.*?Can't compare hashes, so using check --download.*?$`, dropMe,
|
||||
// ignore timestamps in directory time updates
|
||||
`^(INFO : .*?: (Made directory with|Set directory) (metadata|modification time)).*$`, dropMe,
|
||||
// ignore equivalent log for backends lacking dir modtime support
|
||||
`^(INFO : .*?: Making directory).*$`, dropMe,
|
||||
// ignore sizes in directory time updates
|
||||
`^(NOTICE: .*?: Skipped set directory modification time as --dry-run is set).*$`, dropMe,
|
||||
// ignore sizes in directory metadata updates
|
||||
@ -746,6 +748,16 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "concurrent-func":
|
||||
b.TestFn = func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
dst := "file1.txt"
|
||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
@ -871,10 +883,9 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
|
@ -161,9 +161,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -286,7 +284,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@ -367,7 +365,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
}
|
||||
}
|
||||
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
// if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
@ -392,7 +390,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
// if files are identical, leave them alone instead of renaming
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
@ -486,7 +484,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
@ -498,12 +495,11 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
@ -515,7 +511,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
@ -523,7 +519,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
@ -531,7 +527,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
|
@ -359,8 +359,6 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
@ -370,7 +368,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
@ -395,21 +393,11 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
b.replaceCurrentListings()
|
||||
} else {
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
}
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
}
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
|
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
@ -0,0 +1 @@
|
||||
"file1.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
[36m(01) :[0m [34mtest concurrent[0m
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(04) :[0m [34mtest changed on one path - file1[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2001-01-02 {datadir/} file5R.txt[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2023-08-26 {datadir/} file7.txt[0m
|
||||
[36m(07) :[0m [34mcopy-as {datadir/}file5R.txt {path2/} file1.txt[0m
|
||||
|
||||
[36m(08) :[0m [34mtest bisync with file changed during[0m
|
||||
[36m(09) :[0m [34mconcurrent-func[0m
|
||||
[36m(10) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : Path2: 1 changes: [32m 0 new[0m, [33m 1 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 0 older[0m, [36m 1 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file1.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(11) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
Newer version
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5R
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5L
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
test concurrent
|
||||
|
||||
test initial bisync
|
||||
bisync resync
|
||||
|
||||
test changed on one path - file1
|
||||
touch-glob 2001-01-02 {datadir/} file5R.txt
|
||||
touch-glob 2023-08-26 {datadir/} file7.txt
|
||||
copy-as {datadir/}file5R.txt {path2/} file1.txt
|
||||
|
||||
test bisync with file changed during
|
||||
concurrent-func
|
||||
bisync
|
||||
|
||||
bisync
|
@ -430,7 +430,7 @@ func initConfig() {
|
||||
}
|
||||
|
||||
// Start the metrics server if configured and not running the "rc" command
|
||||
if os.Args[1] != "rc" {
|
||||
if len(os.Args) >= 2 && os.Args[1] != "rc" {
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
|
@ -303,6 +303,9 @@ func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.Co
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if updateRemoteOpt.NoOutput {
|
||||
return nil
|
||||
}
|
||||
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
|
||||
config.ShowRemote(name)
|
||||
} else {
|
||||
@ -323,6 +326,7 @@ func init() {
|
||||
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoOutput, "no-output", "", false, "Don't provide any output", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions", "Config")
|
||||
|
@ -22,6 +22,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will
|
||||
implement this command directly, in which case ` + "`--checkers`" + ` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
|
@ -51,6 +51,7 @@ unless ` + "`--no-create`" + ` or ` + "`--recursive`" + ` is provided.
|
||||
If ` + "`--recursive`" + ` is used then recursively sets the modification
|
||||
time on all existing files that is found under the path. Filters are supported,
|
||||
and you can test with the ` + "`--dry-run`" + ` or the ` + "`--interactive`/`-i`" + ` flag.
|
||||
This will touch ` + "`--transfers`" + ` files concurrently.
|
||||
|
||||
If ` + "`--timestamp`" + ` is used then sets the modification time to that
|
||||
time instead of the current time. Times may be specified as one of:
|
||||
|
@ -809,7 +809,6 @@ put them back in again.` >}}
|
||||
* ben-ba <benjamin.brauner@gmx.de>
|
||||
* Eli Orzitzer <e_orz@yahoo.com>
|
||||
* Anthony Metzidis <anthony.metzidis@gmail.com>
|
||||
* emyarod <afw5059@gmail.com>
|
||||
* keongalvin <keongalvin@gmail.com>
|
||||
* rarspace01 <rarspace01@users.noreply.github.com>
|
||||
* Paul Stern <paulstern45@gmail.com>
|
||||
@ -928,3 +927,23 @@ put them back in again.` >}}
|
||||
* Matt Ickstadt <mattico8@gmail.com> <matt@beckenterprises.com>
|
||||
* Spencer McCullough <mccullough.spencer@gmail.com>
|
||||
* Jonathan Giannuzzi <jonathan@giannuzzi.me>
|
||||
* Christoph Berger <github@christophberger.com>
|
||||
* Tim White <tim.white@su.org.au>
|
||||
* Robin Schneider <robin.schneider@stackit.cloud>
|
||||
* izouxv <izouxv@users.noreply.github.com>
|
||||
* Moises Lima <mozlima@users.noreply.github.com>
|
||||
* Bruno Fernandes <bruno.fernandes1996@hotmail.com>
|
||||
* Corentin Barreau <corentin@archive.org>
|
||||
* hiddenmarten <hiddenmarten@gmail.com>
|
||||
* Trevor Starick <trevor.starick@gmail.com>
|
||||
* b-wimmer <132347192+b-wimmer@users.noreply.github.com>
|
||||
* Jess <jess@jessie.cafe>
|
||||
* Zachary Vorhies <zachvorhies@protonmail.com>
|
||||
* Alexander Minbaev <minbaev@gmail.com>
|
||||
* Joel K Biju <joelkbiju18@gmail.com>
|
||||
* ll3006 <doublel3006@gmail.com>
|
||||
* jbagwell-akamai <113531113+jbagwell-akamai@users.noreply.github.com>
|
||||
* Michael Kebe <michael.kebe@gmail.com>
|
||||
* Lorenz Brun <lorenz@brun.one>
|
||||
* Dave Vasilevsky <djvasi@gmail.com> <dave@vasilevsky.ca>
|
||||
* luzpaz <luzpaz@users.noreply.github.com>
|
||||
|
@ -206,6 +206,13 @@ If the resource has multiple user-assigned identities you will need to
|
||||
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
||||
section](#use_msi).
|
||||
|
||||
If you are operating in disconnected clouds, or private clouds such as
|
||||
Azure Stack you may want to set `disable_instance_discovery = true`.
|
||||
This determines whether rclone requests Microsoft Entra instance
|
||||
metadata from `https://login.microsoft.com/` before authenticating.
|
||||
Setting this to `true` will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
|
||||
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
||||
|
||||
Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
@ -288,6 +295,13 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
||||
|
||||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set `env_auth` at the same time.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azurefiles/azurefiles.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
@ -1815,6 +1815,9 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
|
@ -5,6 +5,32 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/oauthutil: Fix redirect URL mismatch errors (Nick Craig-Wood)
|
||||
* bisync: Fix listings missing concurrent modifications (nielash)
|
||||
* serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
* fs: Fix confusing "didn't find section in config file" error (Nick Craig-Wood)
|
||||
* doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
* build: Added parallel docker builds and caching for go build in the container (Anagh Kumar Baranwal)
|
||||
* VFS
|
||||
* Fix the cache failing to upload symlinks when `--links` was specified (Nick Craig-Wood)
|
||||
* Fix race detected by race detector (Nick Craig-Wood)
|
||||
* Close the change notify channel on Shutdown (izouxv)
|
||||
* B2
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* Iclouddrive
|
||||
* Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
* S3
|
||||
* Added new storage class to magalu provider (Bruno Fernandes)
|
||||
* Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
* Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
## v1.69.0 - 2025-01-12
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.69.0)
|
||||
|
@ -7,6 +7,8 @@ versionIntroduced: v1.65
|
||||
---
|
||||
# rclone serve nfs
|
||||
|
||||
*Not available in Windows.*
|
||||
|
||||
Serve the remote as an NFS mount
|
||||
|
||||
## Synopsis
|
||||
|
@ -190,6 +190,42 @@ with `--dropbox-batch-mode async` then do a final transfer with
|
||||
Note that there may be a pause when quitting rclone while rclone
|
||||
finishes up the last batch using this mode.
|
||||
|
||||
### Exporting files
|
||||
|
||||
Certain files in Dropbox are "exportable", such as Dropbox Paper
|
||||
documents. These files need to be converted to another format in
|
||||
order to be downloaded. Often multiple formats are available for
|
||||
conversion.
|
||||
|
||||
When rclone downloads a exportable file, it chooses the format to
|
||||
download based on the `--dropbox-export-formats` setting. By
|
||||
default, the export formats are `html,md`, which are sensible
|
||||
defaults for Dropbox Paper.
|
||||
|
||||
Rclone chooses the first format ID in the export formats list that
|
||||
Dropbox supports for a given file. If no format in the list is
|
||||
usable, rclone will choose the default format that Dropbox suggests.
|
||||
|
||||
Rclone will change the extension to correspond to the export format.
|
||||
Here are some examples of how extensions are mapped:
|
||||
|
||||
| File type | Filename in Dropbox | Filename in rclone |
|
||||
|----------------|---------------------|--------------------|
|
||||
| Paper | mydoc.paper | mydoc.html |
|
||||
| Paper template | mydoc.papert | mydoc.papert.html |
|
||||
| other | mydoc | mydoc.html |
|
||||
|
||||
_Importing_ exportable files is not yet supported by rclone.
|
||||
|
||||
Here are the supported export extensions known by rclone. Note that
|
||||
rclone does not currently support other formats not on this list,
|
||||
even if Dropbox supports them. Also, Dropbox could change the list
|
||||
of supported formats at any time.
|
||||
|
||||
| Format ID | Name | Description |
|
||||
|-----------|----------|----------------------|
|
||||
| html | HTML | HTML document |
|
||||
| md | Markdown | Markdown text format |
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/dropbox/dropbox.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
@ -522,6 +558,11 @@ non-personal account otherwise the visibility may not be correct.
|
||||
[forum discussion](https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211) and the
|
||||
[dropbox SDK issue](https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75).
|
||||
|
||||
Modification times for Dropbox Paper documents are not exact, and
|
||||
may not change for some period after the document is edited.
|
||||
To make sure you get recent changes in a sync, either wait an hour
|
||||
or so, or use `--ignore-times` to force a full sync.
|
||||
|
||||
## Get your own Dropbox App ID
|
||||
|
||||
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
|
||||
|
@ -2958,7 +2958,7 @@ Choose a number from below, or type in your own value
|
||||
location_constraint>1
|
||||
```
|
||||
|
||||
9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
8. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
```
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
@ -2974,8 +2974,7 @@ Choose a number from below, or type in your own value
|
||||
acl> 1
|
||||
```
|
||||
|
||||
|
||||
12. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
9. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
```
|
||||
[xxx]
|
||||
type = s3
|
||||
@ -2987,7 +2986,7 @@ acl> 1
|
||||
acl = private
|
||||
```
|
||||
|
||||
13. Execute rclone commands
|
||||
10. Execute rclone commands
|
||||
```
|
||||
1) Create a bucket.
|
||||
rclone mkdir IBM-COS-XREGION:newbucket
|
||||
@ -3006,6 +3005,35 @@ acl> 1
|
||||
rclone delete IBM-COS-XREGION:newbucket/file.txt
|
||||
```
|
||||
|
||||
#### IBM IAM authentication
|
||||
If using IBM IAM authentication with IBM API KEY you need to fill in these additional parameters
|
||||
1. Select false for env_auth
|
||||
2. Leave `access_key_id` and `secret_access_key` blank
|
||||
3. Paste your `ibm_api_key`
|
||||
```
|
||||
Option ibm_api_key.
|
||||
IBM API Key to be used to obtain IAM token
|
||||
Enter a value of type string. Press Enter for the default (1).
|
||||
ibm_api_key>
|
||||
```
|
||||
4. Paste your `ibm_resource_instance_id`
|
||||
```
|
||||
Option ibm_resource_instance_id.
|
||||
IBM service instance id
|
||||
Enter a value of type string. Press Enter for the default (2).
|
||||
ibm_resource_instance_id>
|
||||
```
|
||||
5. In advanced settings type true for `v2_auth`
|
||||
```
|
||||
Option v2_auth.
|
||||
If true use v2 authentication.
|
||||
If this is false (the default) then rclone will use v4 authentication.
|
||||
If it is set then rclone will use v2 authentication.
|
||||
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.
|
||||
Enter a boolean value (true or false). Press Enter for the default (true).
|
||||
v2_auth>
|
||||
```
|
||||
|
||||
### IDrive e2 {#idrive-e2}
|
||||
|
||||
Here is an example of making an [IDrive e2](https://www.idrive.com/e2/)
|
||||
@ -4845,27 +4873,49 @@ Option endpoint.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
@ -1108,7 +1108,8 @@ Properties:
|
||||
|
||||
On some SFTP servers (e.g. Synology) the paths are different
|
||||
for SSH and SFTP so the hashes can't be calculated properly.
|
||||
For them using `disable_hashcheck` is a good idea.
|
||||
You can either use [`--sftp-path-override`](#--sftp-path-override)
|
||||
or [`disable_hashcheck`](#--sftp-disable-hashcheck).
|
||||
|
||||
The only ssh agent supported under Windows is Putty's pageant.
|
||||
|
||||
|
@ -57,7 +57,8 @@ off donation.
|
||||
Thank you very much to our sponsors:
|
||||
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="300" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="285" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
|
@ -1,3 +1,3 @@
|
||||
<a href="{{ .Get "link" }}" target="_blank" >
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" style="{{ .Get "style" | safeCSS }}">
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" styleX="{{ .Get "style" | safeCSS }}" style="margin: 2px; padding: 1px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
</a>
|
||||
|
@ -65,28 +65,29 @@ type StatsInfo struct {
|
||||
}
|
||||
|
||||
type averageValues struct {
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
startOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
started bool
|
||||
}
|
||||
|
||||
// NewStats creates an initialised StatsInfo
|
||||
func NewStats(ctx context.Context) *StatsInfo {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return &StatsInfo{
|
||||
s := &StatsInfo{
|
||||
ctx: ctx,
|
||||
ci: ci,
|
||||
checking: newTransferMap(ci.Checkers, "checking"),
|
||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||
inProgress: newInProgress(ctx),
|
||||
startTime: time.Now(),
|
||||
average: averageValues{stop: make(chan bool)},
|
||||
average: averageValues{},
|
||||
}
|
||||
s.startAverageLoop()
|
||||
return s
|
||||
}
|
||||
|
||||
// RemoteStats returns stats for rc
|
||||
@ -328,61 +329,96 @@ func (s *StatsInfo) averageLoop() {
|
||||
ticker := time.NewTicker(averagePeriodLength)
|
||||
defer ticker.Stop()
|
||||
|
||||
startTime := time.Now()
|
||||
a := &s.average
|
||||
defer a.stopped.Done()
|
||||
|
||||
shouldRun := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case now := <-ticker.C:
|
||||
a.mu.Lock()
|
||||
var elapsed float64
|
||||
if a.lpTime.IsZero() {
|
||||
elapsed = now.Sub(startTime).Seconds()
|
||||
} else {
|
||||
elapsed = now.Sub(a.lpTime).Seconds()
|
||||
|
||||
if !shouldRun {
|
||||
a.mu.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
avg := 0.0
|
||||
elapsed := now.Sub(a.lpTime).Seconds()
|
||||
if elapsed > 0 {
|
||||
avg = float64(a.lpBytes) / elapsed
|
||||
}
|
||||
|
||||
if period < averagePeriod {
|
||||
period++
|
||||
}
|
||||
|
||||
a.speed = (avg + a.speed*(period-1)) / period
|
||||
a.lpBytes = 0
|
||||
a.lpTime = now
|
||||
|
||||
a.mu.Unlock()
|
||||
|
||||
case stop, ok := <-a.stop:
|
||||
if !ok {
|
||||
return // Channel closed, exit the loop
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
|
||||
// If we are resuming, store the current time
|
||||
if !shouldRun && !stop {
|
||||
a.lpTime = time.Now()
|
||||
}
|
||||
shouldRun = !stop
|
||||
|
||||
a.mu.Unlock()
|
||||
case <-a.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Resume the average loop
|
||||
func (s *StatsInfo) resumeAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- false
|
||||
}
|
||||
|
||||
// Pause the average loop
|
||||
func (s *StatsInfo) pauseAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- true
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _startAverageLoop() {
|
||||
if !s.average.started {
|
||||
s.average.stop = make(chan bool)
|
||||
s.average.started = true
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
func (s *StatsInfo) startAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.startOnce.Do(func() {
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
})
|
||||
s._startAverageLoop()
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _stopAverageLoop() {
|
||||
s.average.stopOnce.Do(func() {
|
||||
if s.average.started {
|
||||
close(s.average.stop)
|
||||
s.average.stopped.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
func (s *StatsInfo) stopAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s._stopAverageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// String convert the StatsInfo to a string for printing
|
||||
@ -564,9 +600,9 @@ func (s *StatsInfo) GetBytesWithPending() int64 {
|
||||
pending := int64(0)
|
||||
for _, tr := range s.startedTransfers {
|
||||
if tr.acc != nil {
|
||||
bytes, size := tr.acc.progress()
|
||||
if bytes < size {
|
||||
pending += size - bytes
|
||||
bytesRead, size := tr.acc.progress()
|
||||
if bytesRead < size {
|
||||
pending += size - bytesRead
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -699,7 +735,8 @@ func (s *StatsInfo) ResetCounters() {
|
||||
s.oldDuration = 0
|
||||
|
||||
s._stopAverageLoop()
|
||||
s.average = averageValues{stop: make(chan bool)}
|
||||
s.average = averageValues{}
|
||||
s._startAverageLoop()
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
|
||||
@ -788,7 +825,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
}
|
||||
tr := newTransfer(s, obj, srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.startAverageLoop()
|
||||
s.resumeAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@ -796,7 +833,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64, srcFs, dstFs fs.Fs) *Transfer {
|
||||
tr := newTransferRemoteSize(s, remote, size, false, "", srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.startAverageLoop()
|
||||
s.resumeAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@ -811,7 +848,7 @@ func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
|
||||
s.mu.Unlock()
|
||||
}
|
||||
if s.transferring.empty() && s.checking.empty() {
|
||||
time.AfterFunc(averageStopAfter, s.stopAverageLoop)
|
||||
s.pauseAverageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -237,5 +237,14 @@ func TestCountError(t *testing.T) {
|
||||
}
|
||||
|
||||
func percentDiff(start, end uint64) uint64 {
|
||||
return (start - end) * 100 / start
|
||||
if start == 0 {
|
||||
return 0 // Handle zero start value to avoid division by zero
|
||||
}
|
||||
var diff uint64
|
||||
if end > start {
|
||||
diff = end - start // Handle case where end is larger than start
|
||||
} else {
|
||||
diff = start - end
|
||||
}
|
||||
return (diff * 100) / start
|
||||
}
|
||||
|
@ -514,6 +514,8 @@ type UpdateRemoteOpt struct {
|
||||
Obscure bool `json:"obscure"`
|
||||
// Treat all passwords as obscured
|
||||
NoObscure bool `json:"noObscure"`
|
||||
// Don't provide any output
|
||||
NoOutput bool `json:"noOutput"`
|
||||
// Don't interact with the user - return questions
|
||||
NonInteractive bool `json:"nonInteractive"`
|
||||
// If set then supply state and result parameters to continue the process
|
||||
|
@ -120,6 +120,7 @@ func init() {
|
||||
extraHelp += `- opt - a dictionary of options to control the configuration
|
||||
- obscure - declare passwords are plain and need obscuring
|
||||
- noObscure - declare passwords are already obscured and don't need obscuring
|
||||
- noOutput - don't print anything to stdout
|
||||
- nonInteractive - don't interact with a user, return questions
|
||||
- continue - continue the config process with an answer
|
||||
- all - ask all the config questions not just the post config ones
|
||||
|
@ -39,6 +39,7 @@ type Features struct {
|
||||
NoMultiThreading bool // set if can't have multiplethreads on one download open
|
||||
Overlay bool // this wraps one or more backends to add functionality
|
||||
ChunkWriterDoesntSeek bool // set if the chunk writer doesn't need to read the data more than once
|
||||
DoubleSlash bool // set if backend supports double slashes in paths
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
@ -383,6 +384,8 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
|
||||
ft.PartialUploads = ft.PartialUploads && mask.PartialUploads
|
||||
ft.NoMultiThreading = ft.NoMultiThreading && mask.NoMultiThreading
|
||||
// ft.Overlay = ft.Overlay && mask.Overlay don't propagate Overlay
|
||||
ft.ChunkWriterDoesntSeek = ft.ChunkWriterDoesntSeek && mask.ChunkWriterDoesntSeek
|
||||
ft.DoubleSlash = ft.DoubleSlash && mask.DoubleSlash
|
||||
|
||||
if mask.Purge == nil {
|
||||
ft.Purge = nil
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
)
|
||||
|
||||
// DirSorted reads Object and *Dir into entries for the given Fs.
|
||||
@ -43,7 +44,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
||||
newEntries = entries[:0] // in place filter
|
||||
prefix := ""
|
||||
if dir != "" {
|
||||
prefix = dir + "/"
|
||||
prefix = dir
|
||||
if !bucket.IsAllSlashes(dir) {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
for _, entry := range entries {
|
||||
ok := true
|
||||
@ -77,10 +81,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
||||
case !strings.HasPrefix(remote, prefix):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
|
||||
case remote == prefix:
|
||||
case remote == dir:
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
|
||||
case strings.ContainsRune(remote[len(prefix):], '/'):
|
||||
case strings.ContainsRune(remote[len(prefix):], '/') && !bucket.IsAllSlashes(remote[len(prefix):]):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
|
||||
default:
|
||||
|
@ -49,7 +49,8 @@ func TestFilterAndSortIncludeAll(t *testing.T) {
|
||||
|
||||
func TestFilterAndSortCheckDir(t *testing.T) {
|
||||
// Check the different kinds of error when listing "dir"
|
||||
da := mockdir.New("dir/")
|
||||
da := mockdir.New("dir")
|
||||
da2 := mockdir.New("dir/") // double slash dir - allowed for bucket based remotes
|
||||
oA := mockobject.Object("diR/a")
|
||||
db := mockdir.New("dir/b")
|
||||
oB := mockobject.Object("dir/B/sub")
|
||||
@ -57,18 +58,19 @@ func TestFilterAndSortCheckDir(t *testing.T) {
|
||||
oC := mockobject.Object("dir/C")
|
||||
dd := mockdir.New("dir/d")
|
||||
oD := mockobject.Object("dir/D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "dir", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
fs.DirEntries{da2, oC, oD, db, dc, dd},
|
||||
newEntries,
|
||||
fs.DirEntries{oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||
// Check the different kinds of error when listing the root ""
|
||||
da := mockdir.New("")
|
||||
da2 := mockdir.New("/") // doubleslash dir allowed on bucket based remotes
|
||||
oA := mockobject.Object("A")
|
||||
db := mockdir.New("b")
|
||||
oB := mockobject.Object("B/sub")
|
||||
@ -76,12 +78,12 @@ func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||
oC := mockobject.Object("C")
|
||||
dd := mockdir.New("d")
|
||||
oD := mockobject.Object("D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
fs.DirEntries{da2, oA, oC, oD, db, dc, dd},
|
||||
newEntries,
|
||||
fs.DirEntries{oA, oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -2140,20 +2140,28 @@ func SetTierFile(ctx context.Context, o fs.Object, tier string) error {
|
||||
|
||||
// TouchDir touches every file in directory with time t
|
||||
func TouchDir(ctx context.Context, f fs.Fs, remote string, t time.Time, recursive bool) error {
|
||||
return walk.ListR(ctx, f, remote, false, ConfigMaxDepth(ctx, recursive), walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(ci.Transfers)
|
||||
err := walk.ListR(ctx, f, remote, false, ConfigMaxDepth(ctx, recursive), walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
if !SkipDestructive(ctx, o, "touch") {
|
||||
fs.Debugf(f, "Touching %q", o.Remote())
|
||||
err := o.SetModTime(ctx, t)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to touch: %w", err)
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Errorf(o, "%v", err)
|
||||
}
|
||||
g.Go(func() error {
|
||||
fs.Debugf(f, "Touching %q", o.Remote())
|
||||
err := o.SetModTime(gCtx, t)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to touch: %w", err)
|
||||
err = fs.CountError(gCtx, err)
|
||||
fs.Errorf(o, "%v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
})
|
||||
return nil
|
||||
})
|
||||
_ = g.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// ListFormat defines files information print format
|
||||
|
@ -726,7 +726,7 @@ func (s *syncCopyMove) markParentNotEmpty(entry fs.DirEntry) {
|
||||
parentDir = ""
|
||||
}
|
||||
delete(s.srcEmptyDirs, parentDir)
|
||||
if parentDir == "" {
|
||||
if parentDir == "" || parentDir == "/" {
|
||||
break
|
||||
}
|
||||
parentDir = path.Dir(parentDir)
|
||||
@ -1109,23 +1109,17 @@ func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Dire
|
||||
if !s.setDirModTimeAfter && equal {
|
||||
return nil
|
||||
}
|
||||
if s.setDirModTimeAfter && equal {
|
||||
newDst = dst
|
||||
} else if s.copyEmptySrcDirs {
|
||||
if s.setDirMetadata {
|
||||
newDst = dst
|
||||
if !equal {
|
||||
if s.setDirMetadata && s.copyEmptySrcDirs {
|
||||
newDst, err = operations.CopyDirMetadata(ctx, f, dst, dir, src)
|
||||
} else if s.setDirModTime {
|
||||
if dst == nil {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else if dst == nil {
|
||||
// Create the directory if it doesn't exist
|
||||
} else if dst == nil && s.setDirModTime && s.copyEmptySrcDirs {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else if dst == nil && s.copyEmptySrcDirs {
|
||||
err = operations.Mkdir(ctx, f, dir)
|
||||
} else if dst != nil && s.setDirModTime {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else {
|
||||
newDst = dst
|
||||
}
|
||||
// If we need to set modtime after and we created a dir, then save it for later
|
||||
if s.setDirModTime && s.setDirModTimeAfter && err == nil {
|
||||
|
@ -2762,6 +2762,81 @@ func TestSyncConcurrentTruncate(t *testing.T) {
|
||||
testSyncConcurrent(t, "truncate")
|
||||
}
|
||||
|
||||
// Test that sync replaces dir modtimes in dst if they've changed
|
||||
func testSyncReplaceDirModTime(t *testing.T, copyEmptySrcDirs bool) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx, _ := fs.AddConfig(context.Background())
|
||||
r := fstest.NewRun(t)
|
||||
|
||||
file1 := r.WriteFile("file1", "file1", t2)
|
||||
file2 := r.WriteFile("test_dir1/file2", "file2", t2)
|
||||
file3 := r.WriteFile("test_dir2/sub_dir/file3", "file3", t2)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
|
||||
_, err := operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// A directory that's empty on both src and dst
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
_, err = operations.MkdirModTime(ctx, r.Fremote, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// set logging
|
||||
// (this checks log output as DirModtime operations do not yet have stats, and r.CheckDirectoryModTimes also does not tell us what actions were taken)
|
||||
oldLogLevel := fs.GetConfig(context.Background()).LogLevel
|
||||
defer func() { fs.GetConfig(context.Background()).LogLevel = oldLogLevel }() // reset to old val after test
|
||||
// need to do this as fs.Infof only respects the globalConfig
|
||||
fs.GetConfig(context.Background()).LogLevel = fs.LogLevelInfo
|
||||
|
||||
// First run
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output := bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
|
||||
// Save all dirs
|
||||
dirs := []string{"test_dir1", "test_dir2", "test_dir2/sub_dir", "empty_on_remote"}
|
||||
if copyEmptySrcDirs {
|
||||
dirs = append(dirs, "empty_dir")
|
||||
}
|
||||
|
||||
// Change dir modtimes
|
||||
for _, dir := range dirs {
|
||||
_, err := operations.SetDirModTime(ctx, r.Flocal, nil, dir, t1)
|
||||
if err != nil && !errors.Is(err, fs.ErrorNotImplemented) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run again
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output = bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
r.CheckRemoteItems(t, file1, file2, file3)
|
||||
|
||||
// Check that the modtimes of the directories are as expected
|
||||
r.CheckDirectoryModTimes(t, dirs...)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTime(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, false)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTimeWithEmptyDirs(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, true)
|
||||
}
|
||||
|
||||
// Tests that nothing is transferred when src and dst already match
|
||||
// Run the same sync twice, ensure no action is taken the second time
|
||||
func testNothingToTransfer(t *testing.T, copyEmptySrcDirs bool) {
|
||||
|
@ -2121,6 +2121,144 @@ func Run(t *testing.T, opt *Opt) {
|
||||
}
|
||||
})
|
||||
|
||||
// Run tests for bucket based Fs
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Bucket
|
||||
t.Run("Bucket", func(t *testing.T) {
|
||||
// Test if this Fs is bucket based - this test won't work for wrapped bucket based backends.
|
||||
if !f.Features().BucketBased {
|
||||
t.Skip("Not a bucket based backend")
|
||||
}
|
||||
if f.Features().CanHaveEmptyDirectories {
|
||||
t.Skip("Can have empty directories")
|
||||
}
|
||||
if !f.Features().DoubleSlash {
|
||||
t.Skip("Can't have // in paths")
|
||||
}
|
||||
// Create some troublesome file names
|
||||
fileNames := []string{
|
||||
file1.Path,
|
||||
file2.Path,
|
||||
".leadingdot",
|
||||
"/.leadingdot",
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
"dir/.leadingdot",
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}
|
||||
dirNames := []string{
|
||||
"hello? sausage",
|
||||
"hello? sausage/êé",
|
||||
"hello? sausage/êé/Hello, 世界",
|
||||
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||
"/",
|
||||
"//",
|
||||
"///",
|
||||
"dir",
|
||||
"dir/",
|
||||
"dir//",
|
||||
}
|
||||
t1 := fstest.Time("2003-02-03T04:05:06.499999999Z")
|
||||
var objs []fs.Object
|
||||
for _, fileName := range fileNames[2:] {
|
||||
contents := "bad file name: " + fileName
|
||||
file := fstest.NewItem(fileName, contents, t1)
|
||||
objs = append(objs, PutTestContents(ctx, t, f, &file, contents, true))
|
||||
}
|
||||
|
||||
// Check they arrived
|
||||
// This uses walk.Walk with a max size set to make sure we don't use ListR
|
||||
check := func(f fs.Fs, dir string, wantFileNames, wantDirNames []string) {
|
||||
t.Helper()
|
||||
var gotFileNames, gotDirNames []string
|
||||
require.NoError(t, walk.Walk(ctx, f, dir, true, 100, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if _, isObj := entry.(fs.Object); isObj {
|
||||
gotFileNames = append(gotFileNames, entry.Remote())
|
||||
} else {
|
||||
gotDirNames = append(gotDirNames, entry.Remote())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(wantDirNames)
|
||||
sort.Strings(wantFileNames)
|
||||
sort.Strings(gotDirNames)
|
||||
sort.Strings(gotFileNames)
|
||||
assert.Equal(t, wantFileNames, gotFileNames)
|
||||
assert.Equal(t, wantDirNames, gotDirNames)
|
||||
}
|
||||
check(f, "", fileNames, dirNames)
|
||||
check(f, "/", []string{
|
||||
"/.leadingdot",
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
}, []string{
|
||||
"//",
|
||||
"///",
|
||||
})
|
||||
check(f, "//", []string{
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
}, []string{
|
||||
"///",
|
||||
})
|
||||
check(f, "dir", []string{
|
||||
"dir/.leadingdot",
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}, []string{
|
||||
"dir/",
|
||||
"dir//",
|
||||
})
|
||||
check(f, "dir/", []string{
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}, []string{
|
||||
"dir//",
|
||||
})
|
||||
check(f, "dir//", []string{
|
||||
"dir///tripleslash",
|
||||
}, nil)
|
||||
|
||||
// Now create a backend not at the root of a bucket
|
||||
f2, err := fs.NewFs(ctx, subRemoteName+"/dir")
|
||||
require.NoError(t, err)
|
||||
check(f2, "", []string{
|
||||
".leadingdot",
|
||||
"//tripleslash",
|
||||
"/doubleslash",
|
||||
}, []string{
|
||||
"/",
|
||||
"//",
|
||||
})
|
||||
check(f2, "/", []string{
|
||||
"//tripleslash",
|
||||
"/doubleslash",
|
||||
}, []string{
|
||||
"//",
|
||||
})
|
||||
check(f2, "//", []string{
|
||||
"//tripleslash",
|
||||
}, []string(nil))
|
||||
|
||||
// Remove the objects
|
||||
for _, obj := range objs {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}
|
||||
|
||||
// Check they are gone
|
||||
fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2}, []string{
|
||||
"hello? sausage",
|
||||
"hello? sausage/êé",
|
||||
"hello? sausage/êé/Hello, 世界",
|
||||
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||
}, fs.GetModifyWindow(ctx, f))
|
||||
})
|
||||
|
||||
// State of remote at the moment the internal tests are called
|
||||
InternalTestFiles = []fstest.Item{file1, file2}
|
||||
|
||||
|
@ -147,10 +147,6 @@ backends:
|
||||
- backend: "dropbox"
|
||||
remote: "TestDropbox:"
|
||||
fastlist: false
|
||||
ignore:
|
||||
# This test doesn't work on a standard dropbox account because it
|
||||
# tries to set the expiry of the link
|
||||
- TestIntegration/FsMkdir/FsPutFiles/PublicLink
|
||||
# - backend: "filefabric"
|
||||
# remote: "TestFileFabric:"
|
||||
# fastlist: false
|
||||
|
15
go.mod
15
go.mod
@ -26,7 +26,7 @@ require (
|
||||
github.com/aws/smithy-go v1.22.1
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
@ -94,6 +94,7 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
pgregory.net/rapid v1.1.0
|
||||
storj.io/uplink v1.13.1
|
||||
|
||||
)
|
||||
|
||||
require (
|
||||
@ -112,6 +113,7 @@ require (
|
||||
github.com/anacrolix/generics v0.0.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
|
||||
@ -149,6 +151,11 @@ require (
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/errors v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.1 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
@ -175,14 +182,16 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
@ -206,6 +215,7 @@ require (
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
@ -226,6 +236,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.4
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||
|
41
go.sum
41
go.sum
@ -63,6 +63,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107 h1:TRDGQGYANuuMxX8JU++oQKEGitJpKWC1EB0SbNXRVqI=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1KNsA=
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
@ -108,6 +110,8 @@ github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsVi
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo=
|
||||
@ -179,8 +183,8 @@ github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vc
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0 h1:8C76QklmuV4qmKAC7cUnu9D68X9kCkFMuLspPikECCo=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6 h1:mLY/79N73URZ2J/oRKTxmfhCgxThzBmjQ6XOjX5tYjI=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6/go.mod h1:0aLYPsmguHbok591y6hI5yAqU0drbUzrPEO10ZpgTTw=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed h1:KrdJUJWhJ1UWhvaP6SBsvG356KjqfdDjcS/4xTswAU4=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed/go.mod h1:0aLYPsmguHbok591y6hI5yAqU0drbUzrPEO10ZpgTTw=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
@ -235,8 +239,6 @@ github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y=
|
||||
github.com/flynn/noise v1.0.1/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
@ -270,6 +272,12 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
||||
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
||||
github.com/go-openapi/strfmt v0.22.1 h1:5Ky8cybT4576C6Ffc+8gYji/wRXCo6Ozm8RaWjPI6jc=
|
||||
github.com/go-openapi/strfmt v0.22.1/go.mod h1:OfVoytIXJasDkkGvkb1Cceb3BPyMOwk1FgmyyEw7NYg=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
@ -278,7 +286,6 @@ github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBEx
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
|
||||
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@ -385,7 +392,6 @@ github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@ -468,6 +474,8 @@ github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 h1:cUVxyR+U
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75/go.mod h1:pBbZyGwC5i16IBkjVKoy/sznA8jPD/K9iedwe1ESE6w=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
@ -478,17 +486,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
|
||||
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.81.1 h1:JYc47bk8n/MUchA2KHu1ggsCQzlJZQLJ+tTKfOho00E=
|
||||
@ -594,7 +599,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@ -648,6 +652,8 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
@ -738,7 +744,6 @@ golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@ -760,7 +765,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
@ -807,7 +811,6 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -817,12 +820,9 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -841,7 +841,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -948,7 +947,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
@ -1049,15 +1047,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY=
|
||||
gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
@ -31,7 +31,9 @@ func Split(absPath string) (bucket, bucketPath string) {
|
||||
|
||||
// Join path1 and path2
|
||||
//
|
||||
// Like path.Join but does not clean the path - useful to preserve trailing /
|
||||
// Like path.Join but does not clean the path - useful to preserve trailing /.
|
||||
//
|
||||
// It also does not clean multiple // in the path.
|
||||
func Join(path1, path2 string) string {
|
||||
if path1 == "" {
|
||||
return path2
|
||||
@ -39,7 +41,22 @@ func Join(path1, path2 string) string {
|
||||
if path2 == "" {
|
||||
return path1
|
||||
}
|
||||
return strings.TrimSuffix(path1, "/") + "/" + strings.TrimPrefix(path2, "/")
|
||||
return path1 + "/" + path2
|
||||
}
|
||||
|
||||
// IsAllSlashes returns true if s is all / characters.
|
||||
//
|
||||
// It returns false if s is "".
|
||||
func IsAllSlashes(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if c != '/' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Cache stores whether buckets are available and their IDs
|
||||
|
@ -34,10 +34,10 @@ func TestJoin(t *testing.T) {
|
||||
{in1: "in1", in2: "", want: "in1"},
|
||||
{in1: "", in2: "in2", want: "in2"},
|
||||
{in1: "in1", in2: "in2", want: "in1/in2"},
|
||||
{in1: "in1/", in2: "in2", want: "in1/in2"},
|
||||
{in1: "in1", in2: "/in2", want: "in1/in2"},
|
||||
{in1: "in1/", in2: "in2", want: "in1//in2"},
|
||||
{in1: "in1", in2: "/in2", want: "in1//in2"},
|
||||
{in1: "in1", in2: "in2/", want: "in1/in2/"},
|
||||
{in1: "/in1", in2: "/in2", want: "/in1/in2"},
|
||||
{in1: "/in1", in2: "/in2", want: "/in1//in2"},
|
||||
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
|
||||
} {
|
||||
got := Join(test.in1, test.in2)
|
||||
@ -45,6 +45,24 @@ func TestJoin(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsAllSlashes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want bool
|
||||
}{
|
||||
{in: "", want: false},
|
||||
{in: "/", want: true},
|
||||
{in: "x/", want: false},
|
||||
{in: "/x", want: false},
|
||||
{in: "//", want: true},
|
||||
{in: "/x/", want: false},
|
||||
{in: "///", want: true},
|
||||
} {
|
||||
got := IsAllSlashes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
c := NewCache()
|
||||
errBoom := errors.New("boom")
|
||||
|
@ -108,7 +108,7 @@ func (conf *Config) MakeOauth2Config() *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
ClientID: conf.ClientID,
|
||||
ClientSecret: conf.ClientSecret,
|
||||
RedirectURL: RedirectLocalhostURL,
|
||||
RedirectURL: conf.RedirectURL,
|
||||
Scopes: conf.Scopes,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: conf.AuthURL,
|
||||
|
@ -760,6 +760,7 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree dirtree.DirTree
|
||||
dir := node.(*Dir)
|
||||
dir.mu.Lock()
|
||||
dir.modTime = item.ModTime(context.TODO())
|
||||
dir.entry = item
|
||||
if dirTree != nil {
|
||||
err = dir._readDirFromDirTree(dirTree, when)
|
||||
if err != nil {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@ -655,3 +656,34 @@ func TestDirFileOpen(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(12), fi.Size())
|
||||
}
|
||||
|
||||
func TestDirEntryModTimeInvalidation(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("dirent modtime is unreliable on Windows filesystems")
|
||||
}
|
||||
r, vfs := newTestVFS(t)
|
||||
|
||||
// Needs to be less than 2x the wait time below, othewrwise the entry
|
||||
// gets cleared out before it had a chance to be updated.
|
||||
vfs.Opt.DirCacheTime = fs.Duration(50 * time.Millisecond)
|
||||
|
||||
r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
|
||||
|
||||
node, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
modTime1 := node.(*Dir).DirEntry().ModTime(context.Background())
|
||||
|
||||
// Wait some time, then write another file which must update ModTime of
|
||||
// the directory.
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2)
|
||||
|
||||
node2, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
modTime2 := node2.(*Dir).DirEntry().ModTime(context.Background())
|
||||
|
||||
// ModTime of directory must be different after second file was written.
|
||||
if modTime1.Equal(modTime2) {
|
||||
t.Error("ModTime not invalidated")
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +227,10 @@ func (c *Cache) createItemDir(name string) (string, error) {
|
||||
|
||||
// getBackend gets a backend for a cache root dir
|
||||
func getBackend(ctx context.Context, parentPath string, name string, relativeDirPath string) (fs.Fs, error) {
|
||||
path := fmt.Sprintf(":local,encoding='%v':%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
// Make sure we turn off the global links flag as it overrides the backend specific one
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Links = false
|
||||
path := fmt.Sprintf(":local,encoding='%v',links=false:%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
return fscache.Get(ctx, path)
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user