mirror of
https://github.com/rclone/rclone.git
synced 2025-04-19 18:31:10 +08:00
Merge branch 'master' into feature/8298-http-metadata
This commit is contained in:
commit
877afc3642
@ -1,77 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
294
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@ -0,0 +1,294 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
49
.github/workflows/build_publish_docker_plugin.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build_docker_volume_plugin:
|
||||
if: inputs.manual || github.repository == 'rclone/rclone'
|
||||
name: Build docker plugin job
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
@ -1,89 +0,0 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
46
Dockerfile
46
Dockerfile
@ -1,21 +1,47 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
@ -131,8 +131,8 @@ Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* Do the steps as above
|
||||
* git co master
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
|
@ -300,14 +300,13 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -1605,9 +1604,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1887,13 +1883,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
@ -258,12 +258,6 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
|
@ -2480,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
@ -203,7 +203,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -1212,6 +1211,7 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@ -1222,9 +1222,11 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@ -1657,7 +1659,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@ -1848,6 +1851,7 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@ -1893,7 +1897,8 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool) {
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@ -2689,7 +2694,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@ -2926,7 +2931,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -3187,6 +3191,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@ -4018,14 +4023,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
results, err := f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
@ -4085,6 +4089,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
@ -4099,7 +4104,8 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@ -4312,12 +4318,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
var newOptions = options[:0]
|
||||
newOptions := options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@ -4344,9 +4351,10 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var data = o.content
|
||||
data := o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@ -4371,7 +4379,8 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@ -4449,6 +4458,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@ -4544,6 +4554,7 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
@ -92,6 +92,9 @@ const (
|
||||
maxFileNameLength = 255
|
||||
)
|
||||
|
||||
type exportAPIFormat string
|
||||
type exportExtension string // dotless
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
@ -132,6 +135,16 @@ var (
|
||||
DefaultTimeoutAsync: 10 * time.Second,
|
||||
DefaultBatchSizeAsync: 100,
|
||||
}
|
||||
|
||||
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
|
||||
"markdown": "md",
|
||||
"html": "html",
|
||||
}
|
||||
// Populated based on exportKnownAPIFormats
|
||||
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
|
||||
|
||||
paperExtension = ".paper"
|
||||
paperTemplateExtension = ".papert"
|
||||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
@ -247,23 +260,61 @@ folders.`,
|
||||
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
}, {
|
||||
Name: "export_formats",
|
||||
Help: `Comma separated list of preferred formats for exporting files
|
||||
|
||||
Certain Dropbox files can only be accessed by exporting them to another format.
|
||||
These include Dropbox Paper documents.
|
||||
|
||||
For each such file, rclone will choose the first format on this list that Dropbox
|
||||
considers valid. If none is valid, it will choose Dropbox's default format.
|
||||
|
||||
Known formats include: "html", "md" (markdown)`,
|
||||
Default: fs.CommaSepList{"html", "md"},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_exports",
|
||||
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "show_all_exports",
|
||||
Default: false,
|
||||
Help: `Show all exportable files in listings.
|
||||
|
||||
Adding this flag will allow all exportable files to be server side copied.
|
||||
Note that rclone doesn't add extensions to the exportable file names in this mode.
|
||||
|
||||
Do **not** use this flag when trying to download exportable files - rclone
|
||||
will fail to download them.
|
||||
`,
|
||||
Advanced: true,
|
||||
},
|
||||
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
||||
})
|
||||
|
||||
for apiFormat, ext := range exportKnownAPIFormats {
|
||||
exportKnownExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
SharedFiles bool `config:"shared_files"`
|
||||
SharedFolders bool `config:"shared_folders"`
|
||||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootNsid string `config:"root_namespace"`
|
||||
ExportFormats fs.CommaSepList `config:"export_formats"`
|
||||
SkipExports bool `config:"skip_exports"`
|
||||
ShowAllExports bool `config:"show_all_exports"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@ -283,8 +334,18 @@ type Fs struct {
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
||||
exportExts []exportExtension
|
||||
}
|
||||
|
||||
type exportType int
|
||||
|
||||
const (
|
||||
notExport exportType = iota // a regular file
|
||||
exportHide // should be hidden
|
||||
exportListOnly // listable, but can't export
|
||||
exportExportable // can export
|
||||
)
|
||||
|
||||
// Object describes a dropbox object
|
||||
//
|
||||
// Dropbox Objects always have full metadata
|
||||
@ -296,6 +357,9 @@ type Object struct {
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hash string // content_hash of the object
|
||||
|
||||
exportType exportType
|
||||
exportAPIFormat exportAPIFormat
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@ -436,6 +500,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
|
||||
for _, e := range opt.ExportFormats {
|
||||
ext := exportExtension(e)
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
|
||||
}
|
||||
f.exportExts = append(f.exportExts, ext)
|
||||
}
|
||||
|
||||
// unauthorized config for endpoints that fail with auth
|
||||
ucfg := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
@ -588,38 +660,126 @@ func (f *Fs) setRoot(root string) {
|
||||
}
|
||||
}
|
||||
|
||||
type getMetadataResult struct {
|
||||
entry files.IsMetadata
|
||||
notFound bool
|
||||
err error
|
||||
}
|
||||
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) {
|
||||
res.err = f.pacer.Call(func() (bool, error) {
|
||||
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
return shouldRetry(ctx, res.err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
if res.err != nil {
|
||||
switch e := res.err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
notFound = true
|
||||
err = nil
|
||||
res.notFound = true
|
||||
res.err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Get metadata such that the result would be exported with the given extension
|
||||
// Return a channel that will eventually receive the metadata
|
||||
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
|
||||
ch := make(chan getMetadataResult, 1)
|
||||
wantDownloadable := (wantExportExtension == "")
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
res := f.getMetadata(ctx, filePath)
|
||||
info, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok { // Can't check anything about file, just return what we have
|
||||
ch <- res
|
||||
return
|
||||
}
|
||||
|
||||
// Return notFound if downloadability or extension doesn't match
|
||||
if wantDownloadable != info.IsDownloadable {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
if !info.IsDownloadable {
|
||||
_, ext := f.chooseExportFormat(info)
|
||||
if ext != wantExportExtension {
|
||||
ch <- getMetadataResult{notFound: true}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Return our real result or error
|
||||
ch <- res
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
|
||||
// Multiple paths might be plausible, due to export path munging.
|
||||
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
|
||||
ret = []<-chan getMetadataResult{}
|
||||
|
||||
// Prefer an exact match
|
||||
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
|
||||
|
||||
// Check if we're plausibly an export path, otherwise we're done
|
||||
if f.opt.SkipExports || f.opt.ShowAllExports {
|
||||
return
|
||||
}
|
||||
if notFound {
|
||||
dotted := path.Ext(filePath)
|
||||
if dotted == "" {
|
||||
return
|
||||
}
|
||||
ext := exportExtension(dotted[1:])
|
||||
if exportKnownExtensions[ext] == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// We might be an export path! Try all possibilities
|
||||
base := strings.TrimSuffix(filePath, dotted)
|
||||
|
||||
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
|
||||
if strings.HasSuffix(base, paperTemplateExtension) {
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
|
||||
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
|
||||
return
|
||||
}
|
||||
|
||||
// getFileMetadata gets the metadata for a file
|
||||
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) {
|
||||
var res getMetadataResult
|
||||
|
||||
// Try all possible metadatas
|
||||
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
|
||||
for _, ch := range possibleMetadatas {
|
||||
res = <-ch
|
||||
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if !res.notFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
fileInfo, ok := entry.(*files.FileMetadata)
|
||||
|
||||
fileInfo, ok := res.entry.(*files.FileMetadata)
|
||||
if !ok {
|
||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
||||
if _, ok = res.entry.(*files.FolderMetadata); ok {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
@ -628,15 +788,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
||||
}
|
||||
|
||||
// getDirMetadata gets the metadata for a directory
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
|
||||
entry, notFound, err := f.getMetadata(ctx, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) {
|
||||
res := f.getMetadata(ctx, dirPath)
|
||||
if res.err != nil {
|
||||
return nil, res.err
|
||||
}
|
||||
if notFound {
|
||||
if res.notFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
dirInfo, ok := entry.(*files.FolderMetadata)
|
||||
dirInfo, ok := res.entry.(*files.FolderMetadata)
|
||||
if !ok {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
@ -836,16 +996,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var res *files.ListFolderResult
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
Limit: 1000,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root))
|
||||
arg.Recursive = false
|
||||
arg.Limit = 1000
|
||||
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -898,7 +1057,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
if o.(*Object).exportType.listable() {
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !res.HasMore {
|
||||
@ -984,16 +1145,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
}
|
||||
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: encRoot,
|
||||
Recursive: false,
|
||||
}
|
||||
arg := files.NewListFolderArg(encRoot)
|
||||
arg.Recursive = false
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
res, err = f.srv.ListFolder(arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -1174,6 +1333,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
|
||||
// Some plans can't create links with expiry
|
||||
fs.Debugf(absPath, "can't create link with expiry, trying without")
|
||||
createArg.Settings.Expires = nil
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
@ -1338,16 +1507,14 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
|
||||
var startCursor *files.ListFolderGetLatestCursorResult
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
arg := files.ListFolderArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
Recursive: true,
|
||||
}
|
||||
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot))
|
||||
arg.Recursive = true
|
||||
|
||||
if arg.Path == "/" {
|
||||
arg.Path = ""
|
||||
}
|
||||
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
|
||||
startCursor, err = f.srv.ListFolderGetLatestCursor(arg)
|
||||
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
@ -1451,8 +1618,50 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
|
||||
// Find API export formats Dropbox supports for this file
|
||||
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
|
||||
ei := info.ExportInfo
|
||||
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
|
||||
|
||||
// Find which extensions these correspond to
|
||||
exportExtensions := map[exportExtension]exportAPIFormat{}
|
||||
var dropboxPreferredAPIFormat exportAPIFormat
|
||||
var dropboxPreferredExtension exportExtension
|
||||
for _, format := range dropboxFormatStrings {
|
||||
apiFormat := exportAPIFormat(format)
|
||||
// Only consider formats we know about
|
||||
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
|
||||
if dropboxPreferredAPIFormat == "" {
|
||||
dropboxPreferredAPIFormat = apiFormat
|
||||
dropboxPreferredExtension = ext
|
||||
}
|
||||
exportExtensions[ext] = apiFormat
|
||||
}
|
||||
}
|
||||
|
||||
// See if the user picked a valid extension
|
||||
for _, ext := range f.exportExts {
|
||||
if apiFormat, ok := exportExtensions[ext]; ok {
|
||||
return apiFormat, ext
|
||||
}
|
||||
}
|
||||
|
||||
// If no matches, prefer the first valid format Dropbox lists
|
||||
return dropboxPreferredAPIFormat, dropboxPreferredExtension
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func (et exportType) listable() bool {
|
||||
return et != exportHide
|
||||
}
|
||||
|
||||
// something we should _try_ to export
|
||||
func (et exportType) exportable() bool {
|
||||
return et == exportExportable || et == exportListOnly
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
@ -1496,6 +1705,32 @@ func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
|
||||
o.bytes = -1
|
||||
o.hash = ""
|
||||
|
||||
if o.fs.opt.SkipExports {
|
||||
o.exportType = exportHide
|
||||
return
|
||||
}
|
||||
if o.fs.opt.ShowAllExports {
|
||||
o.exportType = exportListOnly
|
||||
return
|
||||
}
|
||||
|
||||
var exportExt exportExtension
|
||||
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
|
||||
if o.exportAPIFormat == "" {
|
||||
o.exportType = exportHide
|
||||
} else {
|
||||
o.exportType = exportExportable
|
||||
// get rid of any paper extension, if present
|
||||
o.remote = strings.TrimSuffix(o.remote, paperExtension)
|
||||
// add the export extension
|
||||
o.remote += "." + string(exportExt)
|
||||
}
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a files.FileMetadata
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inaccurate date
|
||||
@ -1504,6 +1739,10 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
|
||||
o.bytes = int64(info.Size)
|
||||
o.modTime = info.ClientModified
|
||||
o.hash = info.ContentHash
|
||||
|
||||
if !info.IsDownloadable {
|
||||
o.setMetadataForExport(info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1567,6 +1806,27 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
|
||||
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
|
||||
fs.Debugf(o.remote, "No export format found")
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
|
||||
var exportResult *files.ExportResult
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
exportResult, in, err = o.fs.srv.Export(&arg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.bytes = int64(exportResult.ExportMetadata.Size)
|
||||
o.hash = exportResult.ExportMetadata.ExportHash
|
||||
return
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.SharedFiles {
|
||||
@ -1586,6 +1846,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return
|
||||
}
|
||||
|
||||
if o.exportType.exportable() {
|
||||
return o.export(ctx)
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{
|
||||
|
@ -1,9 +1,16 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalCheckPathLength(t *testing.T) {
|
||||
@ -42,3 +49,54 @@ func TestInternalCheckPathLength(t *testing.T) {
|
||||
assert.Equal(t, test.ok, err == nil, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) importPaperForTest(t *testing.T) {
|
||||
content := `# test doc
|
||||
|
||||
Lorem ipsum __dolor__ sit amet
|
||||
[link](http://google.com)
|
||||
`
|
||||
|
||||
arg := files.PaperCreateArg{
|
||||
Path: f.slashRootSlash + "export.paper",
|
||||
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
|
||||
}
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
reader := strings.NewReader(content)
|
||||
_, err = f.srv.PaperCreate(&arg, reader)
|
||||
return shouldRetry(context.Background(), err)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestPaperExport(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f.importPaperForTest(t)
|
||||
|
||||
f.exportExts = []exportExtension{"html"}
|
||||
|
||||
obj, err := f.NewObject(ctx, "export.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
buf, err := io.ReadAll(rc)
|
||||
require.NoError(t, err)
|
||||
text := string(buf)
|
||||
|
||||
for _, excerpt := range []string{
|
||||
"Lorem ipsum",
|
||||
"<b>dolor</b>",
|
||||
`href="http://google.com"`,
|
||||
} {
|
||||
require.Contains(t, text, excerpt)
|
||||
}
|
||||
}
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PaperExport", f.InternalTestPaperExport)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
@ -476,7 +476,7 @@ func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID st
|
||||
|
||||
// CopyDocByItemID copies a document by its item ID.
|
||||
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
|
||||
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
|
||||
// putting name in info doesn't work. extension does work so assume this is a bug in the endpoint
|
||||
values := map[string]any{
|
||||
"info_to_update": map[string]any{},
|
||||
}
|
||||
|
@ -92,6 +92,21 @@ Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.`,
|
||||
Default: 10 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access",
|
||||
Help: "Files and folders will be uploaded with this access permission (default private)",
|
||||
Default: "private",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them.",
|
||||
}, {
|
||||
Value: "public",
|
||||
Help: "The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way,",
|
||||
}, {
|
||||
Value: "hidden",
|
||||
Help: "The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@ -102,6 +117,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Access string `config:"access"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
@ -735,6 +751,23 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// getAccessLevel is a helper function to determine access level integer
|
||||
func getAccessLevel(access string) int64 {
|
||||
var accessLevel int64
|
||||
switch access {
|
||||
case "private":
|
||||
accessLevel = 0
|
||||
case "public":
|
||||
accessLevel = 1
|
||||
case "hidden":
|
||||
accessLevel = 2
|
||||
default:
|
||||
accessLevel = 0
|
||||
fs.Errorf(nil, "Invalid access: %s, defaulting to private", access)
|
||||
}
|
||||
return accessLevel
|
||||
}
|
||||
|
||||
// DirCacher methods
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
@ -747,7 +780,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: f.opt.Enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderIsPublic: getAccessLevel(f.opt.Access),
|
||||
FolderPublicUpl: 0,
|
||||
FolderPublicDisplay: 0,
|
||||
FolderPublicDnl: 0,
|
||||
@ -1080,7 +1113,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Set permissions
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0}
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: getAccessLevel(o.fs.opt.Access)}
|
||||
// fs.Debugf(nil, "Permissions : %#v", update)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
59
backend/s3/ibm_signer.go
Normal file
59
backend/s3/ibm_signer.go
Normal file
@ -0,0 +1,59 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/IBM/go-sdk-core/v5/core"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// Authenticator defines an interface for obtaining an IAM token.
|
||||
type Authenticator interface {
|
||||
GetToken() (string, error)
|
||||
}
|
||||
|
||||
// IbmIamSigner is a structure for signing requests using IBM IAM.
|
||||
// Requeres APIKey and Resource InstanceID
|
||||
type IbmIamSigner struct {
|
||||
APIKey string
|
||||
InstanceID string
|
||||
Auth Authenticator
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using IBM IAM token.
|
||||
func (signer *IbmIamSigner) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
var authenticator Authenticator
|
||||
if signer.Auth != nil {
|
||||
authenticator = signer.Auth
|
||||
} else {
|
||||
authenticator = &core.IamAuthenticator{ApiKey: signer.APIKey}
|
||||
}
|
||||
token, err := authenticator.GetToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
req.Header.Set("ibm-service-instance-id", signer.InstanceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NoOpCredentialsProvider is needed since S3 SDK requires having credentials, even though authentication is happening via IBM IAM.
|
||||
type NoOpCredentialsProvider struct{}
|
||||
|
||||
// Retrieve returns mock credentials for the NoOpCredentialsProvider.
|
||||
func (n *NoOpCredentialsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
||||
return aws.Credentials{
|
||||
AccessKeyID: "NoOpAccessKey",
|
||||
SecretAccessKey: "NoOpSecretKey",
|
||||
SessionToken: "",
|
||||
Source: "NoOpCredentialsProvider",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired always returns false
|
||||
func (n *NoOpCredentialsProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
47
backend/s3/ibm_signer_test.go
Normal file
47
backend/s3/ibm_signer_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type MockAuthenticator struct {
|
||||
Token string
|
||||
Error error
|
||||
}
|
||||
|
||||
func (m *MockAuthenticator) GetToken() (string, error) {
|
||||
return m.Token, m.Error
|
||||
}
|
||||
|
||||
func TestSignHTTP(t *testing.T) {
|
||||
apiKey := "mock-api-key"
|
||||
instanceID := "mock-instance-id"
|
||||
token := "mock-iam-token"
|
||||
mockAuth := &MockAuthenticator{
|
||||
Token: token,
|
||||
Error: nil,
|
||||
}
|
||||
signer := &IbmIamSigner{
|
||||
APIKey: apiKey,
|
||||
InstanceID: instanceID,
|
||||
Auth: mockAuth,
|
||||
}
|
||||
req, err := http.NewRequest("GET", "https://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request: %v", err)
|
||||
}
|
||||
credentials := aws.Credentials{
|
||||
AccessKeyID: "mock-access-key",
|
||||
SecretAccessKey: "mock-secret-key",
|
||||
}
|
||||
err = signer.SignHTTP(context.TODO(), credentials, req, "payload-hash", "service", "region", time.Now())
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
assert.Equal(t, "Bearer "+token, req.Header.Get("Authorization"), "Authorization header should be set correctly")
|
||||
assert.Equal(t, instanceID, req.Header.Get("ibm-service-instance-id"), "ibm-service-instance-id header should be set correctly")
|
||||
}
|
174
backend/s3/s3.go
174
backend/s3/s3.go
@ -36,8 +36,8 @@ import (
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
@ -934,34 +934,67 @@ func init() {
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// Linode endpoints: https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
|
||||
// Linode endpoints: https://techdocs.akamai.com/cloud-computing/docs/object-storage-product-limits#supported-endpoint-types-by-region
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Linode Object Storage API.",
|
||||
Provider: "Linode",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nl-ams-1.linodeobjects.com",
|
||||
Help: "Amsterdam (Netherlands), nl-ams-1",
|
||||
}, {
|
||||
Value: "us-southeast-1.linodeobjects.com",
|
||||
Help: "Atlanta, GA (USA), us-southeast-1",
|
||||
}, {
|
||||
Value: "in-maa-1.linodeobjects.com",
|
||||
Help: "Chennai (India), in-maa-1",
|
||||
}, {
|
||||
Value: "us-ord-1.linodeobjects.com",
|
||||
Help: "Chicago, IL (USA), us-ord-1",
|
||||
}, {
|
||||
Value: "eu-central-1.linodeobjects.com",
|
||||
Help: "Frankfurt (Germany), eu-central-1",
|
||||
}, {
|
||||
Value: "id-cgk-1.linodeobjects.com",
|
||||
Help: "Jakarta (Indonesia), id-cgk-1",
|
||||
}, {
|
||||
Value: "gb-lon-1.linodeobjects.com",
|
||||
Help: "London 2 (Great Britain), gb-lon-1",
|
||||
}, {
|
||||
Value: "us-lax-1.linodeobjects.com",
|
||||
Help: "Los Angeles, CA (USA), us-lax-1",
|
||||
}, {
|
||||
Value: "es-mad-1.linodeobjects.com",
|
||||
Help: "Madrid (Spain), es-mad-1",
|
||||
}, {
|
||||
Value: "au-mel-1.linodeobjects.com",
|
||||
Help: "Melbourne (Australia), au-mel-1",
|
||||
}, {
|
||||
Value: "us-mia-1.linodeobjects.com",
|
||||
Help: "Miami, FL (USA), us-mia-1",
|
||||
}, {
|
||||
Value: "it-mil-1.linodeobjects.com",
|
||||
Help: "Milan (Italy), it-mil-1",
|
||||
}, {
|
||||
Value: "us-east-1.linodeobjects.com",
|
||||
Help: "Newark, NJ (USA), us-east-1",
|
||||
}, {
|
||||
Value: "jp-osa-1.linodeobjects.com",
|
||||
Help: "Osaka (Japan), jp-osa-1",
|
||||
}, {
|
||||
Value: "fr-par-1.linodeobjects.com",
|
||||
Help: "Paris (France), fr-par-1",
|
||||
}, {
|
||||
Value: "br-gru-1.linodeobjects.com",
|
||||
Help: "São Paulo (Brazil), br-gru-1",
|
||||
}, {
|
||||
Value: "us-sea-1.linodeobjects.com",
|
||||
Help: "Seattle, WA (USA), us-sea-1",
|
||||
}, {
|
||||
Value: "ap-south-1.linodeobjects.com",
|
||||
Help: "Singapore ap-south-1",
|
||||
Help: "Singapore, ap-south-1",
|
||||
}, {
|
||||
Value: "sg-sin-1.linodeobjects.com",
|
||||
Help: "Singapore 2, sg-sin-1",
|
||||
}, {
|
||||
Value: "se-sto-1.linodeobjects.com",
|
||||
Help: "Stockholm (Sweden), se-sto-1",
|
||||
@ -2680,6 +2713,34 @@ knows about - please make a bug report if not.
|
||||
You can change this if you want to disable the use of multipart uploads.
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_x_id",
|
||||
Help: `Set if rclone should add x-id URL parameters.
|
||||
|
||||
You can change this if you want to disable the AWS SDK from
|
||||
adding x-id URL parameters.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sign_accept_encoding",
|
||||
Help: `Set if rclone should include Accept-Encoding as part of the signature.
|
||||
|
||||
You can change this if you want to stop rclone including
|
||||
Accept-Encoding as part of the signature.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
@ -2736,6 +2797,16 @@ use |-vv| to see the debug level logs.
|
||||
Default: sdkLogMode(0),
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "ibm_api_key",
|
||||
Help: "IBM API Key to be used to obtain IAM token",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
{
|
||||
Name: "ibm_resource_instance_id",
|
||||
Help: "IBM service instance id",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
@ -2889,6 +2960,10 @@ type Options struct {
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
IBMAPIKey string `config:"ibm_api_key"`
|
||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||
UseXID fs.Tristate `config:"use_x_id"`
|
||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@ -3073,59 +3148,67 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Fixup the request if needed.
|
||||
//
|
||||
// Google Cloud Storage alters the Accept-Encoding header, which
|
||||
// breaks the v2 request signature
|
||||
// breaks the v2 request signature. This is set with opt.SignAcceptEncoding.
|
||||
//
|
||||
// It also doesn't like the x-id URL parameter SDKv2 puts in so we
|
||||
// remove that too.
|
||||
// remove that too. This is set with opt.UseXID.Value.
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-go-v2/issues/1816.
|
||||
// Adapted from: https://github.com/aws/aws-sdk-go-v2/issues/1816#issuecomment-1927281540
|
||||
func fixupGCS(o *s3.Options) {
|
||||
func fixupRequest(o *s3.Options, opt *Options) {
|
||||
type ignoredHeadersKey struct{}
|
||||
headers := []string{"Accept-Encoding"}
|
||||
|
||||
fixup := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupGCS",
|
||||
"FixupRequest",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
}
|
||||
|
||||
// Remove x-id because Google doesn't like them
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
if !opt.UseXID.Value {
|
||||
// Remove x-id
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
},
|
||||
)
|
||||
|
||||
// Restore headers if necessary
|
||||
restore := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupGCSRestoreHeaders",
|
||||
"FixupRequestRestoreHeaders",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
@ -3171,6 +3254,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
// Try to fill in the config from the environment if env_auth=true
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
|
||||
configOpts := []func(*awsconfig.LoadOptions) error{}
|
||||
// Set the name of the profile if supplied
|
||||
if opt.Profile != "" {
|
||||
@ -3184,8 +3268,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't load configuration with env_auth=true: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
switch {
|
||||
case opt.Provider == "IBMCOS" && opt.V2Auth:
|
||||
awsConfig.Credentials = &NoOpCredentialsProvider{}
|
||||
fs.Debugf(nil, "Using IBM IAM")
|
||||
case opt.AccessKeyID == "" && opt.SecretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
awsConfig.Credentials = aws.AnonymousCredentials{}
|
||||
@ -3239,14 +3327,21 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
if opt.Provider == "IBMCOS" && opt.IBMAPIKey != "" && opt.IBMInstanceID != "" {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &IbmIamSigner{APIKey: opt.IBMAPIKey, InstanceID: opt.IBMInstanceID}
|
||||
})
|
||||
} else {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if opt.Provider == "GCS" {
|
||||
// Fixup the request if needed
|
||||
if !opt.UseXID.Value || !opt.SignAcceptEncoding.Value {
|
||||
options = append(options, func(o *s3.Options) {
|
||||
fixupGCS(o)
|
||||
fixupRequest(o, opt)
|
||||
})
|
||||
}
|
||||
|
||||
@ -3361,6 +3456,8 @@ func setQuirks(opt *Options) {
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
useMultipartUploads = true // Set if provider supports multipart uploads
|
||||
useUnsignedPayload = true // Do we need to use unsigned payloads to avoid seeking in PutObject
|
||||
useXID = true // Add x-id URL parameter into requests
|
||||
signAcceptEncoding = true // If we should include AcceptEncoding in the signature
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@ -3505,11 +3602,14 @@ func setQuirks(opt *Options) {
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
signAcceptEncoding = false
|
||||
useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
// GCS S3 doesn't support multi-part server side copy:
|
||||
// See: https://issuetracker.google.com/issues/323465186
|
||||
// So make cutoff very large which it does seem to support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
// GCS doesn't like the x-id URL parameter the SDKv2 inserts
|
||||
useXID = false
|
||||
default: //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid defaultCaseOrder: consider to make `default` case as first or as last case
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
fallthrough
|
||||
@ -3579,6 +3679,18 @@ func setQuirks(opt *Options) {
|
||||
opt.UseUnsignedPayload.Valid = true
|
||||
opt.UseUnsignedPayload.Value = useUnsignedPayload
|
||||
}
|
||||
|
||||
// Set the correct use UseXID if not manually set
|
||||
if !opt.UseXID.Valid {
|
||||
opt.UseXID.Valid = true
|
||||
opt.UseXID.Value = useXID
|
||||
}
|
||||
|
||||
// Set the correct SignAcceptEncoding if not manually set
|
||||
if !opt.SignAcceptEncoding.Valid {
|
||||
opt.SignAcceptEncoding.Valid = true
|
||||
opt.SignAcceptEncoding.Value = signAcceptEncoding
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
|
@ -2,8 +2,10 @@ package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/cloudsoda/go-smb2"
|
||||
@ -11,14 +13,17 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// dial starts a client connection to the given SMB server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SMB handshake, and then sets up a Client.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
tconn, err := dialer.Dial(network, addr)
|
||||
tconn, err := dialer.DialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -89,15 +94,7 @@ func (c *conn) close() (err error) {
|
||||
|
||||
// True if it's closed
|
||||
func (c *conn) closed() bool {
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr != nil
|
||||
return c.smbSession.Echo() != nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
@ -118,23 +115,20 @@ func (f *Fs) getSessions() int32 {
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
bgCtx := context.Background()
|
||||
|
||||
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
if share != "" {
|
||||
// mount the specified share as well if user requested
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
err = c.mountShare(share)
|
||||
if err != nil {
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(bgCtx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
@ -192,23 +186,30 @@ func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err erro
|
||||
// Return a SMB connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
func (f *Fs) putConnection(pc **conn) {
|
||||
c := *pc
|
||||
*pc = nil
|
||||
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.close()
|
||||
//
|
||||
// if err is not nil then it checks the connection is alive using an
|
||||
// ECHO request
|
||||
func (f *Fs) putConnection(pc **conn, err error) {
|
||||
if pc == nil {
|
||||
return
|
||||
}
|
||||
c := *pc
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular SMB error then check the connection
|
||||
if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrExist) || errors.Is(err, os.ErrPermission)) {
|
||||
echoErr := c.smbSession.Echo()
|
||||
if echoErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", echoErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
fs.Debugf(f, "Connection OK after error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
@ -235,15 +236,18 @@ func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
}
|
||||
|
||||
g, _ := errgroup.WithContext(ctx)
|
||||
for i, c := range f.pool {
|
||||
if !c.closed() {
|
||||
cErr := c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
g.Go(func() (err error) {
|
||||
if !c.closed() {
|
||||
err = c.close()
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
f.pool[i] = nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
@ -207,7 +207,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
// ignore stat error here
|
||||
return f, nil
|
||||
@ -268,7 +268,7 @@ func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Obj
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@ -290,7 +290,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.Remove(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -375,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
return nil, err
|
||||
}
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@ -412,7 +412,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
defer f.putConnection(&cn, err)
|
||||
|
||||
_, err = cn.smbShare.Stat(dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
@ -430,7 +430,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
defer f.putConnection(&cn, err)
|
||||
|
||||
if share == "" {
|
||||
shares, err := cn.smbSession.ListSharenames()
|
||||
@ -474,7 +474,7 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Statfs(dir)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -556,7 +556,7 @@ func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
|
||||
f.putConnection(&cn)
|
||||
f.putConnection(&cn, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -604,7 +604,7 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer o.fs.putConnection(&cn)
|
||||
defer o.fs.putConnection(&cn, err)
|
||||
|
||||
err = cn.smbShare.Chtimes(reqDir, t, t)
|
||||
if err != nil {
|
||||
@ -650,24 +650,25 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
pos, err := fl.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, fmt.Errorf("failed to seek: %w", err)
|
||||
}
|
||||
if pos != offset {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
err = fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
in = readers.NewLimitedReadCloser(fl, limit)
|
||||
in = &boundReadCloser{
|
||||
rc: in,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, nil)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -697,7 +698,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
}()
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
@ -757,7 +758,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
err = cn.smbShare.Remove(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
o.fs.putConnection(&cn, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func init() {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
// err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
@ -327,7 +327,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
||||
func (f *Fs) getAuthToken(ctx context.Context) error {
|
||||
fs.Debugf(f, "Renewing token")
|
||||
|
||||
var authRequest = api.TokenAuthRequest{
|
||||
authRequest := api.TokenAuthRequest{
|
||||
AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
RefreshToken: f.opt.RefreshToken,
|
||||
@ -509,7 +509,7 @@ func errorHandler(resp *http.Response) (err error) {
|
||||
return fmt.Errorf("error reading error out of body: %w", err)
|
||||
}
|
||||
match := findError.FindSubmatch(body)
|
||||
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
||||
if len(match) < 2 || len(match[1]) == 0 {
|
||||
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||
}
|
||||
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
||||
@ -552,7 +552,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
|
@ -11,4 +11,5 @@
|
||||
<services+github@simjo.st>
|
||||
<seb•ɑƬ•chezwam•ɖɵʈ•org>
|
||||
<allllaboutyou@gmail.com>
|
||||
<psycho@feltzv.fr>
|
||||
<psycho@feltzv.fr>
|
||||
<afw5059@gmail.com>
|
@ -108,6 +108,8 @@ var logReplacements = []string{
|
||||
`^.*?Can't compare hashes, so using check --download.*?$`, dropMe,
|
||||
// ignore timestamps in directory time updates
|
||||
`^(INFO : .*?: (Made directory with|Set directory) (metadata|modification time)).*$`, dropMe,
|
||||
// ignore equivalent log for backends lacking dir modtime support
|
||||
`^(INFO : .*?: Making directory).*$`, dropMe,
|
||||
// ignore sizes in directory time updates
|
||||
`^(NOTICE: .*?: Skipped set directory modification time as --dry-run is set).*$`, dropMe,
|
||||
// ignore sizes in directory metadata updates
|
||||
@ -746,6 +748,16 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "concurrent-func":
|
||||
b.TestFn = func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
dst := "file1.txt"
|
||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
@ -871,10 +883,9 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
|
@ -161,9 +161,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -286,7 +284,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@ -367,7 +365,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
}
|
||||
}
|
||||
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
// if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
@ -392,7 +390,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
// if files are identical, leave them alone instead of renaming
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
@ -486,7 +484,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
@ -498,12 +495,11 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
@ -515,7 +511,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
@ -523,7 +519,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
@ -531,7 +527,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
// propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
|
@ -359,8 +359,6 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
@ -370,7 +368,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
@ -395,21 +393,11 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
b.replaceCurrentListings()
|
||||
} else {
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
}
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
}
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
|
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
@ -0,0 +1 @@
|
||||
"file1.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-new
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_concurrent/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
73
cmd/bisync/testdata/test_concurrent/golden/test.log
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
[36m(01) :[0m [34mtest concurrent[0m
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(04) :[0m [34mtest changed on one path - file1[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2001-01-02 {datadir/} file5R.txt[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2023-08-26 {datadir/} file7.txt[0m
|
||||
[36m(07) :[0m [34mcopy-as {datadir/}file5R.txt {path2/} file1.txt[0m
|
||||
|
||||
[36m(08) :[0m [34mtest bisync with file changed during[0m
|
||||
[36m(09) :[0m [34mconcurrent-func[0m
|
||||
[36m(10) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : Path2: 1 changes: [32m 0 new[0m, [33m 1 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 0 older[0m, [36m 1 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file1.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(11) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/initial/RCLONE_TEST
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file1.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file2.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file3.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file4.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file5.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file6.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file7.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/initial/file8.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
0
cmd/bisync/testdata/test_concurrent/modfiles/dummy.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file1.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file10.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file11.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file2.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
Newer version
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5L.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5R
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file5R.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer and not equal to 5L
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file6.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
1
cmd/bisync/testdata/test_concurrent/modfiles/file7.txt
vendored
Normal file
@ -0,0 +1 @@
|
||||
This file is newer
|
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
test concurrent
|
||||
|
||||
test initial bisync
|
||||
bisync resync
|
||||
|
||||
test changed on one path - file1
|
||||
touch-glob 2001-01-02 {datadir/} file5R.txt
|
||||
touch-glob 2023-08-26 {datadir/} file7.txt
|
||||
copy-as {datadir/}file5R.txt {path2/} file1.txt
|
||||
|
||||
test bisync with file changed during
|
||||
concurrent-func
|
||||
bisync
|
||||
|
||||
bisync
|
@ -430,7 +430,7 @@ func initConfig() {
|
||||
}
|
||||
|
||||
// Start the metrics server if configured and not running the "rc" command
|
||||
if os.Args[1] != "rc" {
|
||||
if len(os.Args) >= 2 && os.Args[1] != "rc" {
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
|
@ -22,6 +22,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will
|
||||
implement this command directly, in which case ` + "`--checkers`" + ` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
|
@ -809,7 +809,6 @@ put them back in again.` >}}
|
||||
* ben-ba <benjamin.brauner@gmx.de>
|
||||
* Eli Orzitzer <e_orz@yahoo.com>
|
||||
* Anthony Metzidis <anthony.metzidis@gmail.com>
|
||||
* emyarod <afw5059@gmail.com>
|
||||
* keongalvin <keongalvin@gmail.com>
|
||||
* rarspace01 <rarspace01@users.noreply.github.com>
|
||||
* Paul Stern <paulstern45@gmail.com>
|
||||
@ -938,3 +937,11 @@ put them back in again.` >}}
|
||||
* hiddenmarten <hiddenmarten@gmail.com>
|
||||
* Trevor Starick <trevor.starick@gmail.com>
|
||||
* b-wimmer <132347192+b-wimmer@users.noreply.github.com>
|
||||
* Jess <jess@jessie.cafe>
|
||||
* Zachary Vorhies <zachvorhies@protonmail.com>
|
||||
* Alexander Minbaev <minbaev@gmail.com>
|
||||
* Joel K Biju <joelkbiju18@gmail.com>
|
||||
* ll3006 <doublel3006@gmail.com>
|
||||
* jbagwell-akamai <113531113+jbagwell-akamai@users.noreply.github.com>
|
||||
* Michael Kebe <michael.kebe@gmail.com>
|
||||
* Lorenz Brun <lorenz@brun.one>
|
||||
|
@ -1815,6 +1815,9 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
|
@ -5,6 +5,32 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/oauthutil: Fix redirect URL mismatch errors (Nick Craig-Wood)
|
||||
* bisync: Fix listings missing concurrent modifications (nielash)
|
||||
* serve s3: Fix list objects encoding-type (Nick Craig-Wood)
|
||||
* fs: Fix confusing "didn't find section in config file" error (Nick Craig-Wood)
|
||||
* doc fixes (Christoph Berger, Dimitri Papadopoulos, Matt Ickstadt, Nick Craig-Wood, Tim White, Zachary Vorhies)
|
||||
* build: Added parallel docker builds and caching for go build in the container (Anagh Kumar Baranwal)
|
||||
* VFS
|
||||
* Fix the cache failing to upload symlinks when `--links` was specified (Nick Craig-Wood)
|
||||
* Fix race detected by race detector (Nick Craig-Wood)
|
||||
* Close the change notify channel on Shutdown (izouxv)
|
||||
* B2
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* Iclouddrive
|
||||
* Add notes on ADP and Missing PCS cookies (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Mark German (de) region as deprecated (Nick Craig-Wood)
|
||||
* S3
|
||||
* Added new storage class to magalu provider (Bruno Fernandes)
|
||||
* Add DigitalOcean regions SFO2, LON1, TOR1, BLR1 (jkpe)
|
||||
* Add latest Linode Object Storage endpoints (jbagwell-akamai)
|
||||
|
||||
## v1.69.0 - 2025-01-12
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.69.0)
|
||||
|
@ -7,6 +7,8 @@ versionIntroduced: v1.65
|
||||
---
|
||||
# rclone serve nfs
|
||||
|
||||
*Not available in Windows.*
|
||||
|
||||
Serve the remote as an NFS mount
|
||||
|
||||
## Synopsis
|
||||
|
@ -190,6 +190,42 @@ with `--dropbox-batch-mode async` then do a final transfer with
|
||||
Note that there may be a pause when quitting rclone while rclone
|
||||
finishes up the last batch using this mode.
|
||||
|
||||
### Exporting files
|
||||
|
||||
Certain files in Dropbox are "exportable", such as Dropbox Paper
|
||||
documents. These files need to be converted to another format in
|
||||
order to be downloaded. Often multiple formats are available for
|
||||
conversion.
|
||||
|
||||
When rclone downloads a exportable file, it chooses the format to
|
||||
download based on the `--dropbox-export-formats` setting. By
|
||||
default, the export formats are `html,md`, which are sensible
|
||||
defaults for Dropbox Paper.
|
||||
|
||||
Rclone chooses the first format ID in the export formats list that
|
||||
Dropbox supports for a given file. If no format in the list is
|
||||
usable, rclone will choose the default format that Dropbox suggests.
|
||||
|
||||
Rclone will change the extension to correspond to the export format.
|
||||
Here are some examples of how extensions are mapped:
|
||||
|
||||
| File type | Filename in Dropbox | Filename in rclone |
|
||||
|----------------|---------------------|--------------------|
|
||||
| Paper | mydoc.paper | mydoc.html |
|
||||
| Paper template | mydoc.papert | mydoc.papert.html |
|
||||
| other | mydoc | mydoc.html |
|
||||
|
||||
_Importing_ exportable files is not yet supported by rclone.
|
||||
|
||||
Here are the supported export extensions known by rclone. Note that
|
||||
rclone does not currently support other formats not on this list,
|
||||
even if Dropbox supports them. Also, Dropbox could change the list
|
||||
of supported formats at any time.
|
||||
|
||||
| Format ID | Name | Description |
|
||||
|-----------|----------|----------------------|
|
||||
| html | HTML | HTML document |
|
||||
| md | Markdown | Markdown text format |
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/dropbox/dropbox.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
@ -522,6 +558,11 @@ non-personal account otherwise the visibility may not be correct.
|
||||
[forum discussion](https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211) and the
|
||||
[dropbox SDK issue](https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75).
|
||||
|
||||
Modification times for Dropbox Paper documents are not exact, and
|
||||
may not change for some period after the document is edited.
|
||||
To make sure you get recent changes in a sync, either wait an hour
|
||||
or so, or use `--ignore-times` to force a full sync.
|
||||
|
||||
## Get your own Dropbox App ID
|
||||
|
||||
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
|
||||
|
@ -2958,7 +2958,7 @@ Choose a number from below, or type in your own value
|
||||
location_constraint>1
|
||||
```
|
||||
|
||||
9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
8. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
```
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
@ -2974,8 +2974,7 @@ Choose a number from below, or type in your own value
|
||||
acl> 1
|
||||
```
|
||||
|
||||
|
||||
12. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
9. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
```
|
||||
[xxx]
|
||||
type = s3
|
||||
@ -2987,7 +2986,7 @@ acl> 1
|
||||
acl = private
|
||||
```
|
||||
|
||||
13. Execute rclone commands
|
||||
10. Execute rclone commands
|
||||
```
|
||||
1) Create a bucket.
|
||||
rclone mkdir IBM-COS-XREGION:newbucket
|
||||
@ -3006,6 +3005,35 @@ acl> 1
|
||||
rclone delete IBM-COS-XREGION:newbucket/file.txt
|
||||
```
|
||||
|
||||
#### IBM IAM authentication
|
||||
If using IBM IAM authentication with IBM API KEY you need to fill in these additional parameters
|
||||
1. Select false for env_auth
|
||||
2. Leave `access_key_id` and `secret_access_key` blank
|
||||
3. Paste your `ibm_api_key`
|
||||
```
|
||||
Option ibm_api_key.
|
||||
IBM API Key to be used to obtain IAM token
|
||||
Enter a value of type string. Press Enter for the default (1).
|
||||
ibm_api_key>
|
||||
```
|
||||
4. Paste your `ibm_resource_instance_id`
|
||||
```
|
||||
Option ibm_resource_instance_id.
|
||||
IBM service instance id
|
||||
Enter a value of type string. Press Enter for the default (2).
|
||||
ibm_resource_instance_id>
|
||||
```
|
||||
5. In advanced settings type true for `v2_auth`
|
||||
```
|
||||
Option v2_auth.
|
||||
If true use v2 authentication.
|
||||
If this is false (the default) then rclone will use v4 authentication.
|
||||
If it is set then rclone will use v2 authentication.
|
||||
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.
|
||||
Enter a boolean value (true or false). Press Enter for the default (true).
|
||||
v2_auth>
|
||||
```
|
||||
|
||||
### IDrive e2 {#idrive-e2}
|
||||
|
||||
Here is an example of making an [IDrive e2](https://www.idrive.com/e2/)
|
||||
@ -4845,27 +4873,49 @@ Option endpoint.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
4 / Milan (Italy), it-mil-1
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
6 / Paris (France), fr-par-1
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
8 / Singapore ap-south-1
|
||||
18 / Singapore, ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 3
|
||||
endpoint> 5
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
@ -1108,7 +1108,8 @@ Properties:
|
||||
|
||||
On some SFTP servers (e.g. Synology) the paths are different
|
||||
for SSH and SFTP so the hashes can't be calculated properly.
|
||||
For them using `disable_hashcheck` is a good idea.
|
||||
You can either use [`--sftp-path-override`](#--sftp-path-override)
|
||||
or [`disable_hashcheck`](#--sftp-disable-hashcheck).
|
||||
|
||||
The only ssh agent supported under Windows is Putty's pageant.
|
||||
|
||||
|
@ -57,7 +57,8 @@ off donation.
|
||||
Thank you very much to our sponsors:
|
||||
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="300" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="285" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
|
@ -1,3 +1,3 @@
|
||||
<a href="{{ .Get "link" }}" target="_blank" >
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" style="{{ .Get "style" | safeCSS }}">
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" styleX="{{ .Get "style" | safeCSS }}" style="margin: 2px; padding: 1px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
</a>
|
||||
|
@ -65,28 +65,29 @@ type StatsInfo struct {
|
||||
}
|
||||
|
||||
type averageValues struct {
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
startOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
started bool
|
||||
}
|
||||
|
||||
// NewStats creates an initialised StatsInfo
|
||||
func NewStats(ctx context.Context) *StatsInfo {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return &StatsInfo{
|
||||
s := &StatsInfo{
|
||||
ctx: ctx,
|
||||
ci: ci,
|
||||
checking: newTransferMap(ci.Checkers, "checking"),
|
||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||
inProgress: newInProgress(ctx),
|
||||
startTime: time.Now(),
|
||||
average: averageValues{stop: make(chan bool)},
|
||||
average: averageValues{},
|
||||
}
|
||||
s.startAverageLoop()
|
||||
return s
|
||||
}
|
||||
|
||||
// RemoteStats returns stats for rc
|
||||
@ -328,61 +329,96 @@ func (s *StatsInfo) averageLoop() {
|
||||
ticker := time.NewTicker(averagePeriodLength)
|
||||
defer ticker.Stop()
|
||||
|
||||
startTime := time.Now()
|
||||
a := &s.average
|
||||
defer a.stopped.Done()
|
||||
|
||||
shouldRun := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case now := <-ticker.C:
|
||||
a.mu.Lock()
|
||||
var elapsed float64
|
||||
if a.lpTime.IsZero() {
|
||||
elapsed = now.Sub(startTime).Seconds()
|
||||
} else {
|
||||
elapsed = now.Sub(a.lpTime).Seconds()
|
||||
|
||||
if !shouldRun {
|
||||
a.mu.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
avg := 0.0
|
||||
elapsed := now.Sub(a.lpTime).Seconds()
|
||||
if elapsed > 0 {
|
||||
avg = float64(a.lpBytes) / elapsed
|
||||
}
|
||||
|
||||
if period < averagePeriod {
|
||||
period++
|
||||
}
|
||||
|
||||
a.speed = (avg + a.speed*(period-1)) / period
|
||||
a.lpBytes = 0
|
||||
a.lpTime = now
|
||||
|
||||
a.mu.Unlock()
|
||||
|
||||
case stop, ok := <-a.stop:
|
||||
if !ok {
|
||||
return // Channel closed, exit the loop
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
|
||||
// If we are resuming, store the current time
|
||||
if !shouldRun && !stop {
|
||||
a.lpTime = time.Now()
|
||||
}
|
||||
shouldRun = !stop
|
||||
|
||||
a.mu.Unlock()
|
||||
case <-a.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Resume the average loop
|
||||
func (s *StatsInfo) resumeAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- false
|
||||
}
|
||||
|
||||
// Pause the average loop
|
||||
func (s *StatsInfo) pauseAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- true
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _startAverageLoop() {
|
||||
if !s.average.started {
|
||||
s.average.stop = make(chan bool)
|
||||
s.average.started = true
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
func (s *StatsInfo) startAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.startOnce.Do(func() {
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
})
|
||||
s._startAverageLoop()
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _stopAverageLoop() {
|
||||
s.average.stopOnce.Do(func() {
|
||||
if s.average.started {
|
||||
close(s.average.stop)
|
||||
s.average.stopped.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
func (s *StatsInfo) stopAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s._stopAverageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// String convert the StatsInfo to a string for printing
|
||||
@ -564,9 +600,9 @@ func (s *StatsInfo) GetBytesWithPending() int64 {
|
||||
pending := int64(0)
|
||||
for _, tr := range s.startedTransfers {
|
||||
if tr.acc != nil {
|
||||
bytes, size := tr.acc.progress()
|
||||
if bytes < size {
|
||||
pending += size - bytes
|
||||
bytesRead, size := tr.acc.progress()
|
||||
if bytesRead < size {
|
||||
pending += size - bytesRead
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -699,7 +735,8 @@ func (s *StatsInfo) ResetCounters() {
|
||||
s.oldDuration = 0
|
||||
|
||||
s._stopAverageLoop()
|
||||
s.average = averageValues{stop: make(chan bool)}
|
||||
s.average = averageValues{}
|
||||
s._startAverageLoop()
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
|
||||
@ -788,7 +825,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
}
|
||||
tr := newTransfer(s, obj, srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.startAverageLoop()
|
||||
s.resumeAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@ -796,7 +833,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64, srcFs, dstFs fs.Fs) *Transfer {
|
||||
tr := newTransferRemoteSize(s, remote, size, false, "", srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.startAverageLoop()
|
||||
s.resumeAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@ -811,7 +848,7 @@ func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
|
||||
s.mu.Unlock()
|
||||
}
|
||||
if s.transferring.empty() && s.checking.empty() {
|
||||
time.AfterFunc(averageStopAfter, s.stopAverageLoop)
|
||||
s.pauseAverageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -237,5 +237,14 @@ func TestCountError(t *testing.T) {
|
||||
}
|
||||
|
||||
func percentDiff(start, end uint64) uint64 {
|
||||
return (start - end) * 100 / start
|
||||
if start == 0 {
|
||||
return 0 // Handle zero start value to avoid division by zero
|
||||
}
|
||||
var diff uint64
|
||||
if end > start {
|
||||
diff = end - start // Handle case where end is larger than start
|
||||
} else {
|
||||
diff = start - end
|
||||
}
|
||||
return (diff * 100) / start
|
||||
}
|
||||
|
@ -1109,23 +1109,17 @@ func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Dire
|
||||
if !s.setDirModTimeAfter && equal {
|
||||
return nil
|
||||
}
|
||||
if s.setDirModTimeAfter && equal {
|
||||
newDst = dst
|
||||
} else if s.copyEmptySrcDirs {
|
||||
if s.setDirMetadata {
|
||||
newDst = dst
|
||||
if !equal {
|
||||
if s.setDirMetadata && s.copyEmptySrcDirs {
|
||||
newDst, err = operations.CopyDirMetadata(ctx, f, dst, dir, src)
|
||||
} else if s.setDirModTime {
|
||||
if dst == nil {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else if dst == nil {
|
||||
// Create the directory if it doesn't exist
|
||||
} else if dst == nil && s.setDirModTime && s.copyEmptySrcDirs {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else if dst == nil && s.copyEmptySrcDirs {
|
||||
err = operations.Mkdir(ctx, f, dir)
|
||||
} else if dst != nil && s.setDirModTime {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else {
|
||||
newDst = dst
|
||||
}
|
||||
// If we need to set modtime after and we created a dir, then save it for later
|
||||
if s.setDirModTime && s.setDirModTimeAfter && err == nil {
|
||||
|
@ -2762,6 +2762,81 @@ func TestSyncConcurrentTruncate(t *testing.T) {
|
||||
testSyncConcurrent(t, "truncate")
|
||||
}
|
||||
|
||||
// Test that sync replaces dir modtimes in dst if they've changed
|
||||
func testSyncReplaceDirModTime(t *testing.T, copyEmptySrcDirs bool) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx, _ := fs.AddConfig(context.Background())
|
||||
r := fstest.NewRun(t)
|
||||
|
||||
file1 := r.WriteFile("file1", "file1", t2)
|
||||
file2 := r.WriteFile("test_dir1/file2", "file2", t2)
|
||||
file3 := r.WriteFile("test_dir2/sub_dir/file3", "file3", t2)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
|
||||
_, err := operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// A directory that's empty on both src and dst
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
_, err = operations.MkdirModTime(ctx, r.Fremote, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// set logging
|
||||
// (this checks log output as DirModtime operations do not yet have stats, and r.CheckDirectoryModTimes also does not tell us what actions were taken)
|
||||
oldLogLevel := fs.GetConfig(context.Background()).LogLevel
|
||||
defer func() { fs.GetConfig(context.Background()).LogLevel = oldLogLevel }() // reset to old val after test
|
||||
// need to do this as fs.Infof only respects the globalConfig
|
||||
fs.GetConfig(context.Background()).LogLevel = fs.LogLevelInfo
|
||||
|
||||
// First run
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output := bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
|
||||
// Save all dirs
|
||||
dirs := []string{"test_dir1", "test_dir2", "test_dir2/sub_dir", "empty_on_remote"}
|
||||
if copyEmptySrcDirs {
|
||||
dirs = append(dirs, "empty_dir")
|
||||
}
|
||||
|
||||
// Change dir modtimes
|
||||
for _, dir := range dirs {
|
||||
_, err := operations.SetDirModTime(ctx, r.Flocal, nil, dir, t1)
|
||||
if err != nil && !errors.Is(err, fs.ErrorNotImplemented) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run again
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output = bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
r.CheckRemoteItems(t, file1, file2, file3)
|
||||
|
||||
// Check that the modtimes of the directories are as expected
|
||||
r.CheckDirectoryModTimes(t, dirs...)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTime(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, false)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTimeWithEmptyDirs(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, true)
|
||||
}
|
||||
|
||||
// Tests that nothing is transferred when src and dst already match
|
||||
// Run the same sync twice, ensure no action is taken the second time
|
||||
func testNothingToTransfer(t *testing.T, copyEmptySrcDirs bool) {
|
||||
|
@ -144,10 +144,6 @@ backends:
|
||||
- backend: "dropbox"
|
||||
remote: "TestDropbox:"
|
||||
fastlist: false
|
||||
ignore:
|
||||
# This test doesn't work on a standard dropbox account because it
|
||||
# tries to set the expiry of the link
|
||||
- TestIntegration/FsMkdir/FsPutFiles/PublicLink
|
||||
# - backend: "filefabric"
|
||||
# remote: "TestFileFabric:"
|
||||
# fastlist: false
|
||||
|
15
go.mod
15
go.mod
@ -25,7 +25,7 @@ require (
|
||||
github.com/aws/smithy-go v1.22.1
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
@ -91,6 +91,7 @@ require (
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
storj.io/uplink v1.13.1
|
||||
|
||||
)
|
||||
|
||||
require (
|
||||
@ -109,6 +110,7 @@ require (
|
||||
github.com/anacrolix/generics v0.0.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
|
||||
@ -146,6 +148,11 @@ require (
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/errors v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.1 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
@ -172,14 +179,16 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
@ -203,6 +212,7 @@ require (
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
@ -223,6 +233,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.4
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||
|
41
go.sum
41
go.sum
@ -63,6 +63,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107 h1:TRDGQGYANuuMxX8JU++oQKEGitJpKWC1EB0SbNXRVqI=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1KNsA=
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
@ -106,6 +108,8 @@ github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsVi
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo=
|
||||
@ -177,8 +181,8 @@ github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vc
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0 h1:8C76QklmuV4qmKAC7cUnu9D68X9kCkFMuLspPikECCo=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6 h1:mLY/79N73URZ2J/oRKTxmfhCgxThzBmjQ6XOjX5tYjI=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6/go.mod h1:0aLYPsmguHbok591y6hI5yAqU0drbUzrPEO10ZpgTTw=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed h1:KrdJUJWhJ1UWhvaP6SBsvG356KjqfdDjcS/4xTswAU4=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed/go.mod h1:0aLYPsmguHbok591y6hI5yAqU0drbUzrPEO10ZpgTTw=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
@ -233,8 +237,6 @@ github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y=
|
||||
github.com/flynn/noise v1.0.1/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
@ -268,6 +270,12 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
||||
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
||||
github.com/go-openapi/strfmt v0.22.1 h1:5Ky8cybT4576C6Ffc+8gYji/wRXCo6Ozm8RaWjPI6jc=
|
||||
github.com/go-openapi/strfmt v0.22.1/go.mod h1:OfVoytIXJasDkkGvkb1Cceb3BPyMOwk1FgmyyEw7NYg=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
@ -276,7 +284,6 @@ github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBEx
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
|
||||
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@ -383,7 +390,6 @@ github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@ -464,6 +470,8 @@ github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU=
|
||||
github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
@ -474,17 +482,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
|
||||
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.81.1 h1:JYc47bk8n/MUchA2KHu1ggsCQzlJZQLJ+tTKfOho00E=
|
||||
@ -590,7 +595,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@ -644,6 +648,8 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
@ -734,7 +740,6 @@ golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@ -756,7 +761,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
@ -803,7 +807,6 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -813,12 +816,9 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -837,7 +837,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -944,7 +943,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
@ -1045,15 +1043,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY=
|
||||
gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
@ -108,7 +108,7 @@ func (conf *Config) MakeOauth2Config() *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
ClientID: conf.ClientID,
|
||||
ClientSecret: conf.ClientSecret,
|
||||
RedirectURL: RedirectLocalhostURL,
|
||||
RedirectURL: conf.RedirectURL,
|
||||
Scopes: conf.Scopes,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: conf.AuthURL,
|
||||
|
@ -760,6 +760,7 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree dirtree.DirTree
|
||||
dir := node.(*Dir)
|
||||
dir.mu.Lock()
|
||||
dir.modTime = item.ModTime(context.TODO())
|
||||
dir.entry = item
|
||||
if dirTree != nil {
|
||||
err = dir._readDirFromDirTree(dirTree, when)
|
||||
if err != nil {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@ -655,3 +656,34 @@ func TestDirFileOpen(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(12), fi.Size())
|
||||
}
|
||||
|
||||
func TestDirEntryModTimeInvalidation(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("dirent modtime is unreliable on Windows filesystems")
|
||||
}
|
||||
r, vfs := newTestVFS(t)
|
||||
|
||||
// Needs to be less than 2x the wait time below, othewrwise the entry
|
||||
// gets cleared out before it had a chance to be updated.
|
||||
vfs.Opt.DirCacheTime = fs.Duration(50 * time.Millisecond)
|
||||
|
||||
r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
|
||||
|
||||
node, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
modTime1 := node.(*Dir).DirEntry().ModTime(context.Background())
|
||||
|
||||
// Wait some time, then write another file which must update ModTime of
|
||||
// the directory.
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
r.WriteObject(context.Background(), "dir/file2", "file2 contents", t2)
|
||||
|
||||
node2, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
modTime2 := node2.(*Dir).DirEntry().ModTime(context.Background())
|
||||
|
||||
// ModTime of directory must be different after second file was written.
|
||||
if modTime1.Equal(modTime2) {
|
||||
t.Error("ModTime not invalidated")
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +227,10 @@ func (c *Cache) createItemDir(name string) (string, error) {
|
||||
|
||||
// getBackend gets a backend for a cache root dir
|
||||
func getBackend(ctx context.Context, parentPath string, name string, relativeDirPath string) (fs.Fs, error) {
|
||||
path := fmt.Sprintf(":local,encoding='%v':%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
// Make sure we turn off the global links flag as it overrides the backend specific one
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Links = false
|
||||
path := fmt.Sprintf(":local,encoding='%v',links=false:%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
return fscache.Get(ctx, path)
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user