diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 123014908bebb..5ace4600a1f26 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,4 +3,4 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - interval: "daily" + interval: "weekly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 15b84cc28513f..5f212e9ab5816 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,93 +1,48 @@ ---- -# Github Actions build for rclone -# -*- compile-command: "yamllint -f parsable build.yml" -*- +name: Rclone Build -name: build - -# Trigger the workflow on push or pull request on: push: - branches: - - '**' tags: - - '**' - pull_request: - workflow_dispatch: - inputs: - manual: - description: Manual run (bypass default conditions) - type: boolean - default: true + - "**" + +env: + IMAGE: tgdrive/rclone jobs: build: - if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) timeout-minutes: 60 - defaults: - run: - shell: bash strategy: fail-fast: false matrix: - job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24'] + job_name: ["linux", "windows"] include: - job_name: linux os: ubuntu-latest - go: '>=1.25.0-rc.1' + go: ">=1.24.0-rc.1" gotags: cmount - build_flags: '-include "^linux/"' - check: true - quicktest: true - racequicktest: true - librclonetest: true - deploy: true - - - job_name: linux_386 - os: ubuntu-latest - go: '>=1.25.0-rc.1' - goarch: 386 + cgo: "0" + build_flags: '-include "^(linux/amd64|linux/arm64|linux/arm-v7)"' + + - job_name: windows + os: windows-latest + go: ">=1.24.0-rc.1" gotags: cmount - quicktest: true + cgo: "0" + build_flags: '-include "^(windows/amd64|windows/arm64)"' + build_args: "-buildmode exe" - job_name: mac_amd64 os: macos-latest - go: '>=1.25.0-rc.1' - gotags: 'cmount' + go: ">=1.23.0-rc.1" + gotags: "cmount" build_flags: '-include "^darwin/amd64" -cgo' - quicktest: true - racequicktest: true - deploy: true - job_name: mac_arm64 os: macos-latest - go: '>=1.25.0-rc.1' - gotags: 'cmount' + go: ">=1.23.0-rc.1" + gotags: "cmount" build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' - deploy: true - - - job_name: windows - os: windows-latest - go: '>=1.25.0-rc.1' - gotags: cmount - cgo: '0' - build_flags: '-include "^windows/"' - build_args: '-buildmode exe' - quicktest: true - deploy: true - - - job_name: other_os - os: ubuntu-latest - go: '>=1.25.0-rc.1' - build_flags: '-exclude "^(windows/|darwin/|linux/)"' - compile_all: true - deploy: true - - - job_name: go1.24 - os: ubuntu-latest - go: '1.24' - quicktest: true - racequicktest: true name: ${{ matrix.job_name }} @@ -100,20 +55,21 @@ jobs: fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v6 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} check-latest: true - name: Set environment variables + shell: bash run: | echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV - if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi - name: Install Libraries on Linux + shell: bash run: | sudo modprobe fuse sudo chmod 666 /dev/fuse @@ -123,6 +79,7 @@ jobs: if: matrix.os == 'ubuntu-latest' - name: Install Libraries on macOS + shell: bash run: | # https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788 # https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008 @@ -138,7 +95,8 @@ jobs: shell: powershell run: | $ProgressPreference = 'SilentlyContinue' - choco install -y winfsp zip + choco install -y winfsp + choco install zip --source="bin" echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append if ($env:GOARCH -eq "386") { choco install -y mingw --forcex86 --force @@ -151,6 +109,7 @@ jobs: if: matrix.os == 'windows-latest' - name: Print Go version and environment + shell: bash run: | printf "Using go at: $(which go)\n" printf "Go version: $(go version)\n" @@ -162,195 +121,44 @@ jobs: env - name: Build rclone - run: | - make - - - name: Rclone version - run: | - rclone version - - - name: Run tests - run: | - make quicktest - if: matrix.quicktest - - - name: Race test - run: | - make racequicktest - if: matrix.racequicktest - - - name: Run librclone tests - run: | - make -C librclone/ctest test - make -C librclone/ctest clean - librclone/python/test_rclone.py - if: matrix.librclonetest - - - name: Compile all architectures test - run: | - make - make compile_all - if: matrix.compile_all - - - name: Deploy built binaries + shell: bash run: | if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi - make ci_beta - env: - RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} - # working-directory: '$(modulePath)' - # Deploy binaries if enabled in config && not a PR && not a fork - if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' - - lint: - if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) - timeout-minutes: 30 - name: "lint" - runs-on: ubuntu-latest - - steps: - - name: Get runner parameters - id: get-runner-parameters - run: | - echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT - echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT + make cross - - name: Checkout - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - - name: Install Go - id: setup-go - uses: actions/setup-go@v6 - with: - go-version: '>=1.24.0-rc.1' - check-latest: true - cache: false - - - name: Cache - uses: actions/cache@v5 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/.cache/golangci-lint - key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }} - restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}- - - - name: Code quality test (Linux) - uses: golangci/golangci-lint-action@v9 - with: - version: latest - skip-cache: true - - - name: Code quality test (Windows) - uses: golangci/golangci-lint-action@v9 - env: - GOOS: "windows" - with: - version: latest - skip-cache: true - - - name: Code quality test (macOS) - uses: golangci/golangci-lint-action@v9 - env: - GOOS: "darwin" + - name: Upload Binary + uses: actions/upload-artifact@v4 with: - version: latest - skip-cache: true - - - name: Code quality test (FreeBSD) - uses: golangci/golangci-lint-action@v9 - env: - GOOS: "freebsd" - with: - version: latest - skip-cache: true - - - name: Code quality test (OpenBSD) - uses: golangci/golangci-lint-action@v9 - env: - GOOS: "openbsd" - with: - version: latest - skip-cache: true - - - name: Install govulncheck - run: go install golang.org/x/vuln/cmd/govulncheck@latest - - - name: Scan for vulnerabilities - run: govulncheck ./... - - - name: Check Markdown format - uses: DavidAnson/markdownlint-cli2-action@v20 - with: - globs: | - CONTRIBUTING.md - MAINTAINERS.md - README.md - RELEASE.md - CODE_OF_CONDUCT.md - librclone\README.md - backend\s3\README.md - docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md - - - name: Scan edits of autogenerated files - run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}' - if: github.event_name == 'pull_request' + name: rclone-${{ matrix.job_name }} + path: ${{ github.workspace }}/build/**/* + retention-days: 1 android: - if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) timeout-minutes: 30 name: "android-all" runs-on: ubuntu-latest - steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 0 - - # Upgrade together with NDK version - name: Set up Go - uses: actions/setup-go@v6 + uses: actions/setup-go@v5 with: - go-version: '>=1.25.0-rc.1' - - - name: Set global environment variables - run: | - echo "VERSION=$(make version)" >> $GITHUB_ENV + go-version: ">=1.24.0-rc.1" - - name: build native rclone - run: | - make - - - name: install gomobile - run: | - go install golang.org/x/mobile/cmd/gobind@latest - go install golang.org/x/mobile/cmd/gomobile@latest - env PATH=$PATH:~/go/bin gomobile init - echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV - - - name: arm-v7a gomobile build - run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile - - - name: arm-v7a Set environment variables - run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV - echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV - echo 'GOOS=android' >> $GITHUB_ENV - echo 'GOARCH=arm' >> $GITHUB_ENV - echo 'GOARM=7' >> $GITHUB_ENV - echo 'CGO_ENABLED=1' >> $GITHUB_ENV - echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV - - - name: arm-v7a build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a . + - name: Go module cache + uses: actions/cache@v5 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- - name: arm64-v8a Set environment variables + shell: bash run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV + echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV echo 'GOARCH=arm64' >> $GITHUB_ENV @@ -358,36 +166,93 @@ jobs: echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV - name: arm64-v8a build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a . + run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${{ github.ref_name }} -o build/rclone-armv8a . - - name: x86 Set environment variables + - name: Package into Zip run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV - echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV - echo 'GOOS=android' >> $GITHUB_ENV - echo 'GOARCH=386' >> $GITHUB_ENV - echo 'CGO_ENABLED=1' >> $GITHUB_ENV - echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV + cd build + mv rclone-armv8a rclone + zip -r rclone-${{ github.ref_name }}-android-arm64.zip rclone - - name: x86 build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 . + - name: Upload Artifacts + uses: actions/upload-artifact@v4 + with: + name: rclone-android + path: ${{ github.workspace }}/build/rclone-*.zip + retention-days: 1 - - name: x64 Set environment variables - run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV - echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV - echo 'GOOS=android' >> $GITHUB_ENV - echo 'GOARCH=amd64' >> $GITHUB_ENV - echo 'CGO_ENABLED=1' >> $GITHUB_ENV - echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV + release: + name: Create Release + runs-on: ubuntu-latest + needs: [build, android] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 - - name: x64 build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 . + - name: Download All Artifacts + uses: actions/download-artifact@v4 + with: + path: /tmp/build + pattern: rclone-* + merge-multiple: true - - name: Upload artifacts + - name: Copy Artifacts run: | - make ci_upload + mkdir build + cp -r /tmp/build/* build/ + + - name: Upload Assets + shell: bash env: - RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} - # Upload artifacts if not a PR && not a fork - if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone' + GH_TOKEN: ${{ github.token }} + run: | + make upload_github + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set Docker metadata + id: docker_meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE }} + labels: | + org.opencontainers.image.version=${{ github.ref_name}} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.title=${{ env.IMAGE }} + org.opencontainers.image.description=Rclone Docker Image + + - name: Publish to Docker + uses: docker/build-push-action@v6 + with: + context: . + push: true + platforms: linux/amd64,linux/arm64,linux/arm/v7 + build-args: | + VERSION=${{ github.ref_name}} + labels: ${{ steps.docker_meta.outputs.labels }} + tags: | + ghcr.io/${{ env.IMAGE }}:${{ github.ref_name}} + ghcr.io/${{ env.IMAGE }}:latest + + - name: Publish Docker Plugin + run: | + VER=${GITHUB_REF_NAME} + PLUGIN_USER=ghcr.io/tgdrive + for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do + export PLUGIN_ARCH + export PLUGIN_USER + make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-} + make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v} + done + make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest + make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v} diff --git a/.github/workflows/build_publish_docker_image.yml b/.github/workflows/build_publish_docker_image.yml deleted file mode 100644 index 261e9de488bb0..0000000000000 --- a/.github/workflows/build_publish_docker_image.yml +++ /dev/null @@ -1,294 +0,0 @@ ---- -# Github Actions release for rclone -# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*- - -name: Build & Push Docker Images - -# Trigger the workflow on push or pull request -on: - push: - branches: - - '**' - tags: - - '**' - workflow_dispatch: - inputs: - manual: - description: Manual run (bypass default conditions) - type: boolean - default: true - -jobs: - build-image: - if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request') - timeout-minutes: 60 - strategy: - fail-fast: false - matrix: - include: - - platform: linux/amd64 - runs-on: ubuntu-24.04 - - platform: linux/386 - runs-on: ubuntu-24.04 - - platform: linux/arm64 - runs-on: ubuntu-24.04-arm - - platform: linux/arm/v7 - runs-on: ubuntu-24.04-arm - - platform: linux/arm/v6 - runs-on: ubuntu-24.04-arm - - name: Build Docker Image for ${{ matrix.platform }} - runs-on: ${{ matrix.runs-on }} - - steps: - - name: Free Space - shell: bash - run: | - df -h . - # Remove android SDK - sudo rm -rf /usr/local/lib/android || true - # Remove .net runtime - sudo rm -rf /usr/share/dotnet || true - df -h . - - - name: Checkout Repository - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - - name: Set REPO_NAME Variable - run: | - echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV} - - - name: Set PLATFORM Variable - run: | - platform=${{ matrix.platform }} - echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV - - - name: Set CACHE_NAME Variable - shell: python - run: | - import os, re - - def slugify(input_string, max_length=63): - slug = input_string.lower() - slug = re.sub(r'[^a-z0-9 -]', ' ', slug) - slug = slug.strip() - slug = re.sub(r'\s+', '-', slug) - slug = re.sub(r'-+', '-', slug) - slug = slug[:max_length] - slug = re.sub(r'[-]+$', '', slug) - return slug - - ref_name_slug = "cache" - - if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request": - ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME']) - - with open(os.environ['GITHUB_ENV'], 'a') as env: - env.write(f"CACHE_NAME={ref_name_slug}\n") - - - name: Get ImageOS - # There's no way around this, because "ImageOS" is only available to - # processes, but the setup-go action uses it in its key. - id: imageos - uses: actions/github-script@v8 - with: - result-encoding: string - script: | - return process.env.ImageOS - - - name: Extract Metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v5 - env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages) - with: - images: | - ghcr.io/${{ env.REPO_NAME }} - labels: | - org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone - org.opencontainers.image.vendor=${{ github.repository_owner }} - org.opencontainers.image.authors=rclone - org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} - tags: | - type=sha - type=ref,event=pr - type=ref,event=branch - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - type=raw,value=beta,enable={{is_default_branch}} - - - name: Setup QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Load Go Build Cache for Docker - id: go-cache - uses: actions/cache@v5 - with: - key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }} - # Cache only the go builds, the module download is cached via the docker layer caching - path: | - go-build-cache - - - name: Inject Go Build Cache into Docker - uses: reproducible-containers/buildkit-cache-dance@v3 - with: - cache-map: | - { - "go-build-cache": "/root/.cache/go-build" - } - skip-extraction: ${{ steps.go-cache.outputs.cache-hit }} - - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - # This is the user that triggered the Workflow. In this case, it will - # either be the user whom created the Release or manually triggered - # the workflow_dispatch. - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and Publish Image Digest - id: build - uses: docker/build-push-action@v6 - with: - file: Dockerfile - context: . - provenance: false - # don't specify 'tags' here (error "get can't push tagged ref by digest") - # tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - annotations: ${{ steps.meta.outputs.annotations }} - platforms: ${{ matrix.platform }} - outputs: | - type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true - cache-from: | - type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }} - cache-to: | - type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd - - - name: Export Image Digest - run: | - mkdir -p /tmp/digests - digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" - - - name: Upload Image Digest - uses: actions/upload-artifact@v6 - with: - name: digests-${{ env.PLATFORM }} - path: /tmp/digests/* - retention-days: 1 - if-no-files-found: error - - merge-image: - name: Merge & Push Final Docker Image - runs-on: ubuntu-24.04 - needs: - - build-image - - steps: - - name: Download Image Digests - uses: actions/download-artifact@v7 - with: - path: /tmp/digests - pattern: digests-* - merge-multiple: true - - - name: Set REPO_NAME Variable - run: | - echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV} - - - name: Extract Metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v5 - env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: index - with: - images: | - ${{ env.REPO_NAME }} - ghcr.io/${{ env.REPO_NAME }} - labels: | - org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone - org.opencontainers.image.vendor=${{ github.repository_owner }} - org.opencontainers.image.authors=rclone - org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} - tags: | - type=sha - type=ref,event=pr - type=ref,event=branch - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - type=raw,value=beta,enable={{is_default_branch}} - - - name: Extract Tags - shell: python - run: | - import json, os - - metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON'] - metadata = json.loads(metadata_json) - - tags = [f"--tag '{tag}'" for tag in metadata["tags"]] - tags_string = " ".join(tags) - - with open(os.environ['GITHUB_ENV'], 'a') as env: - env.write(f"TAGS={tags_string}\n") - - - name: Extract Annotations - shell: python - run: | - import json, os - - metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON'] - metadata = json.loads(metadata_json) - - annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]] - annotations_string = " ".join(annotations) - - with open(os.environ['GITHUB_ENV'], 'a') as env: - env.write(f"ANNOTATIONS={annotations_string}\n") - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - # This is the user that triggered the Workflow. In this case, it will - # either be the user whom created the Release or manually triggered - # the workflow_dispatch. - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Create & Push Manifest List - working-directory: /tmp/digests - run: | - docker buildx imagetools create \ - ${{ env.TAGS }} \ - ${{ env.ANNOTATIONS }} \ - $(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *) - - - name: Inspect and Run Multi-Platform Image - run: | - docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} - docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} - docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version diff --git a/.github/workflows/build_publish_docker_plugin.yml b/.github/workflows/build_publish_docker_plugin.yml deleted file mode 100644 index f88d2f0f18888..0000000000000 --- a/.github/workflows/build_publish_docker_plugin.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -# Github Actions release for rclone -# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*- - -name: Release Build for Docker Plugin - -on: - release: - types: [published] - workflow_dispatch: - inputs: - manual: - description: Manual run (bypass default conditions) - type: boolean - default: true - -jobs: - build_docker_volume_plugin: - if: inputs.manual || github.repository == 'rclone/rclone' - name: Build docker plugin job - runs-on: ubuntu-latest - steps: - - name: Free some space - shell: bash - run: | - df -h . - # Remove android SDK - sudo rm -rf /usr/local/lib/android || true - # Remove .net runtime - sudo rm -rf /usr/share/dotnet || true - df -h . - - name: Checkout master - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - name: Build and publish docker plugin - shell: bash - run: | - VER=${GITHUB_REF#refs/tags/} - PLUGIN_USER=rclone - docker login --username ${{ secrets.DOCKER_HUB_USER }} \ - --password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}" - for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do - export PLUGIN_USER PLUGIN_ARCH - make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-} - make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v} - done - make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest - make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v} diff --git a/.github/workflows/notify.yml b/.github/workflows/notify.yml deleted file mode 100644 index 5c5bfb49d6cff..0000000000000 --- a/.github/workflows/notify.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: Notify users based on issue labels - -on: - issues: - types: [labeled] - -jobs: - notify: - runs-on: ubuntu-latest - steps: - - uses: jenschelkopf/issue-label-notification-action@1.3 - with: - token: ${{ secrets.NOTIFY_ACTION_TOKEN }} - recipients: | - Support Contract=@rclone/support diff --git a/.github/workflows/winget.yml b/.github/workflows/winget.yml deleted file mode 100644 index 378e45a455316..0000000000000 --- a/.github/workflows/winget.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Publish to Winget -on: - release: - types: [released] - -jobs: - publish: - runs-on: ubuntu-latest - steps: - - uses: vedantmgoyal2009/winget-releaser@v2 - with: - identifier: Rclone.Rclone - installers-regex: '-windows-\w+\.zip$' - token: ${{ secrets.WINGET_TOKEN }} diff --git a/.gitignore b/.gitignore index 70efa8580e990..e0e06090276dd 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,4 @@ __pycache__ .DS_Store resource_windows_*.syso .devcontainer +.aider* diff --git a/Dockerfile b/Dockerfile index bb086159492c3..15b5164454b6a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,51 +1,23 @@ -FROM golang:alpine AS builder - -ARG CGO_ENABLED=0 - -WORKDIR /go/src/github.com/rclone/rclone/ - -RUN echo "**** Set Go Environment Variables ****" && \ - go env -w GOCACHE=/root/.cache/go-build - -RUN echo "**** Install Dependencies ****" && \ - apk add --no-cache \ - make \ - bash \ - gawk \ - git - -COPY go.mod . -COPY go.sum . - -RUN echo "**** Download Go Dependencies ****" && \ - go mod download -x +FROM alpine:latest -RUN echo "**** Verify Go Dependencies ****" && \ - go mod verify +RUN apk --no-cache add ca-certificates fuse3 tzdata unzip coreutils && \ + echo "user_allow_other" >> /etc/fuse.conf -COPY . . +ARG TARGETARCH -RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \ - echo "**** Build Binary ****" && \ - make +ARG TARGETVARIANT -RUN echo "**** Print Version Binary ****" && \ - ./rclone version +ARG VERSION -# Begin final image -FROM alpine:latest - -RUN echo "**** Install Dependencies ****" && \ - apk add --no-cache \ - ca-certificates \ - fuse3 \ - tzdata && \ - echo "Enable user_allow_other in fuse" && \ - echo "user_allow_other" >> /etc/fuse.conf +COPY build/rclone-${VERSION}-linux-${TARGETARCH}${TARGETVARIANT:+-$TARGETVARIANT}.zip /tmp/rclone.zip -COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ +RUN unzip /tmp/rclone.zip -d /tmp && \ + mv /tmp/rclone-*-linux-${TARGETARCH}${TARGETVARIANT:+-$TARGETVARIANT}/rclone /usr/local/bin/rclone && \ + chmod +x /usr/local/bin/rclone && \ + rm -rf /tmp/rclone* && \ + apk del unzip -RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone +RUN addgroup -g 1000 rclone && adduser -u 1000 -Ds /bin/sh -G rclone rclone ENTRYPOINT [ "rclone" ] diff --git a/Makefile b/Makefile index 5e75a449962f2..15408ad9111de 100644 --- a/Makefile +++ b/Makefile @@ -100,7 +100,6 @@ compiletest: check: rclone @echo "-- START CODE QUALITY REPORT -------------------------------" @golangci-lint run $(LINTTAGS) ./... - @bin/markdown-lint @echo "-- END CODE QUALITY REPORT ---------------------------------" # Get the build dependencies @@ -145,11 +144,9 @@ MANUAL.txt: MANUAL.md pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt commanddocs: rclone - go generate ./lib/transform -@rmdir -p '$$HOME/.config/rclone' XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/ @[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1) - go run bin/make_bisync_docs.go ./docs/content/ backenddocs: rclone bin/make_backend_docs.py -@rmdir -p '$$HOME/.config/rclone' @@ -208,12 +205,12 @@ upload: upload_github: ./bin/upload-github $(TAG) -cross: doc - go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) +cross: + go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) beta: go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) - rclone -v copy build/ pub.rclone.org:/$(TAG) + rclone -v copy build/ memstore:pub-rclone-org/$(TAG) @echo Beta release ready at https://pub.rclone.org/$(TAG)/ log_since_last_release: @@ -226,18 +223,18 @@ ci_upload: sudo chown -R $$USER build find build -type l -delete gzip -r9v build - ./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds + ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) - ./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest + ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest endif @echo Beta release ready at $(BETA_URL)/testbuilds ci_beta: git log $(LAST_TAG).. > /tmp/git-log.txt go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) - rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) + rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) - rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) + rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) endif @echo Beta release ready at $(BETA_URL) @@ -246,7 +243,7 @@ fetch_binaries: rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ serve: website - cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache + cd docs && hugo server -v -w --disableFastRender tag: retag doc bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new diff --git a/backend/all/all.go b/backend/all/all.go index aed11e189ae44..d28e5e99ff46b 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -62,6 +62,7 @@ import ( _ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/swift" + _ "github.com/rclone/rclone/backend/teldrive" _ "github.com/rclone/rclone/backend/ulozto" _ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/uptobox" diff --git a/backend/teldrive/api/types.go b/backend/teldrive/api/types.go new file mode 100644 index 0000000000000..cbe2f611a0f66 --- /dev/null +++ b/backend/teldrive/api/types.go @@ -0,0 +1,147 @@ +// Package api provides types used by the Teldrive API. +package api + +import "time" + +type Error struct { + Code bool `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e Error) Error() string { + out := "api error" + if e.Message != "" { + out += ": " + e.Message + } + return out +} + +type Part struct { + Id int64 + Size int64 + Name string + Start int64 + End int64 +} + +// FileInfo represents a file when listing folder contents +type FileInfo struct { + Id string `json:"id"` + Name string `json:"name"` + MimeType string `json:"mimeType"` + Size int64 `json:"size"` + ParentId string `json:"parentId"` + Type string `json:"type"` + ModTime time.Time `json:"updatedAt"` +} + +type Meta struct { + Count int `json:"count,omitempty"` + TotalPages int `json:"totalPages,omitempty"` + CurrentPage int `json:"currentPage,omitempty"` +} + +type ReadMetadataResponse struct { + Files []FileInfo `json:"items"` + Meta Meta `json:"meta"` +} + +// MetadataRequestOptions represents all the options when listing folder contents +type MetadataRequestOptions struct { + Page int64 + Limit int64 +} + +type CreateDirRequest struct { + Path string `json:"path"` +} + +type PartFile struct { + Name string `json:"name"` + PartId int `json:"partId"` + PartNo int `json:"partNo"` + TotalParts int `json:"totalParts"` + Size int64 `json:"size"` + ChannelID int64 `json:"channelId"` + Encrypted bool `json:"encrypted"` + Salt string `json:"salt"` +} + +type FilePart struct { + ID int `json:"id"` + Salt string `json:"salt,omitempty"` +} + +type CreateFileRequest struct { + Name string `json:"name"` + Type string `json:"type"` + Path string `json:"path,omitempty"` + MimeType string `json:"mimeType,omitempty"` + Size int64 `json:"size,omitempty"` + ChannelID int64 `json:"channelId,omitempty"` + Encrypted bool `json:"encrypted,omitempty"` + Parts []FilePart `json:"parts,omitempty"` + ParentId string `json:"parentId,omitempty"` + ModTime time.Time `json:"updatedAt,omitempty"` +} + +type MoveFileRequest struct { + Destination string `json:"destinationParent,omitempty"` + DestinationLeaf string `json:"destinationName,omitempty"` + Files []string `json:"ids,omitempty"` +} +type DirMove struct { + Source string `json:"source"` + Destination string `json:"destination"` +} + +type UpdateFileInformation struct { + Name string `json:"name,omitempty"` + ModTime *time.Time `json:"updatedAt,omitempty"` + Parts []FilePart `json:"parts,omitempty"` + Size int64 `json:"size,omitempty"` + UploadId string `json:"uploadId,omitempty"` + ChannelID int64 `json:"channelId,omitempty"` + ParentID string `json:"parentId,omitempty"` + Encrypted bool `json:"encrypted,omitempty"` +} + +type RemoveFileRequest struct { + Source string `json:"source,omitempty"` + Files []string `json:"ids,omitempty"` +} +type CopyFile struct { + Newname string `json:"newName"` + Destination string `json:"destination"` + ModTime time.Time `json:"updatedAt,omitempty"` +} + +type Session struct { + UserName string `json:"userName"` + UserId int64 `json:"userId"` + Hash string `json:"hash"` +} + +type FileShare struct { + ID string `json:"id,omitempty"` + ExpiresAt *time.Time `json:"expiresAt,omitempty"` +} + +type CategorySize struct { + Size int64 `json:"totalSize"` +} + +type EventSource struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + ParentId string `json:"parentId"` + DestParentId string `json:"destParentId"` +} + +type Event struct { + ID string `json:"id"` + Type string `json:"type"` + CreatedAt time.Time `json:"createdAt"` + Source EventSource `json:"source"` +} diff --git a/backend/teldrive/teldrive.go b/backend/teldrive/teldrive.go new file mode 100644 index 0000000000000..d27f2101a0660 --- /dev/null +++ b/backend/teldrive/teldrive.go @@ -0,0 +1,1285 @@ +// Package teldrive provides an interface to the teldrive storage system. +package teldrive + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/rclone/rclone/backend/teldrive/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" + "golang.org/x/sync/errgroup" +) + +const ( + timeFormat = time.RFC3339 + maxChunkSize = 2000 * fs.Mebi + defaultChunkSize = 500 * fs.Mebi + minChunkSize = 100 * fs.Mebi + authCookieName = "access_token" +) + +var ( + errCanNotUploadFileWithUnknownSize = errors.New("teldrive can't upload files with unknown size") +) + +func init() { + fs.Register(&fs.RegInfo{ + Name: "teldrive", + Description: "Tel Drive", + NewFs: NewFs, + Options: []fs.Option{{ + Help: "Access Token Cookie", + Name: "access_token", + Sensitive: true, + }, { + Help: "Api Host", + Name: "api_host", + Sensitive: true, + }, { + Help: "Chunk Size", + Name: "chunk_size", + Default: defaultChunkSize, + }, { + Help: "Page Size for listing files", + Name: "page_size", + Default: 500, + }, { + Name: "random_chunk_name", + Default: true, + Help: "Random Names For Chunks for Security", + Advanced: true, + }, { + Name: "channel_id", + Help: "Channel ID", + Sensitive: true, + }, { + Name: "upload_concurrency", + Default: 4, + Help: "Upload Concurrency", + Advanced: true, + }, + { + Name: "threaded_streams", + Default: false, + Help: "Thread Streams", + Advanced: true, + }, + { + Help: "Upload Api Host", + Name: "upload_host", + Sensitive: true, + }, + { + Name: "encrypt_files", + Default: false, + Help: "Enable Native Teldrive Encryption", + }, { + + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: encoder.Standard | encoder.EncodeInvalidUtf8, + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + ApiHost string `config:"api_host"` + UploadHost string `config:"upload_host"` + AccessToken string `config:"access_token"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + RootFolderID string `config:"root_folder_id"` + RandomChunkName bool `config:"random_chunk_name"` + UploadConcurrency int `config:"upload_concurrency"` + ChannelID int64 `config:"channel_id"` + EncryptFiles bool `config:"encrypt_files"` + PageSize int64 `config:"page_size"` + ThreadedStreams bool `config:"threaded_streams"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs is the interface a cloud storage system must provide +type Fs struct { + root string + name string + opt Options + features *fs.Features + srv *rest.Client + pacer *fs.Pacer + userId int64 + dirCache *dircache.DirCache + rootFolderID string +} + +// Object represents an teldrive object +type Object struct { + fs *Fs + remote string + id string + size int64 + parentId string + name string + modTime time.Time + mimeType string +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String returns a description of the FS +func (f *Fs) String() string { + return fmt.Sprintf("teldrive root '%s'", f.root) +} + +// Precision of the ModTimes in this Fs +func (f *Fs) Precision() time.Duration { + return time.Second +} + +// Hashes returns the supported hash types of the filesystem +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +func checkUploadChunkSize(cs fs.SizeSuffix) error { + if cs < minChunkSize { + return fmt.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize) + } + if cs > maxChunkSize { + return fmt.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize) + } + return nil +} + +func Ptr[T any](t T) *T { + return &t +} + +// NewFs makes a new Fs object from the path +// +// The path is of the form remote:path +// +// Remotes are looked up in the config file. If the remote isn't +// found then NotFoundInConfigFile will be returned. +// +// On Windows avoid single character remote names as they can be mixed +// up with drive letters. +func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) { + opt := new(Options) + err := configstruct.Set(config, opt) + if err != nil { + return nil, err + } + + err = checkUploadChunkSize(opt.ChunkSize) + if err != nil { + return nil, err + } + + if opt.ChannelID < 0 { + channnelId := strconv.FormatInt(opt.ChannelID, 10) + opt.ChannelID, _ = strconv.ParseInt(strings.TrimPrefix(channnelId, "-100"), 10, 64) + } + + f := &Fs{ + name: name, + root: root, + opt: *opt, + pacer: fs.NewPacer(ctx, pacer.NewDefault()), + } + + f.root = strings.Trim(root, "/") + + f.features = (&fs.Features{ + CanHaveEmptyDirectories: true, + ReadMimeType: true, + ChunkWriterDoesntSeek: true, + }).Fill(ctx, f) + + client := fshttp.NewClient(ctx) + authCookie := http.Cookie{Name: authCookieName, Value: opt.AccessToken} + f.srv = rest.NewClient(client).SetRoot(strings.Trim(opt.ApiHost, "/")).SetCookie(&authCookie) + + opts := rest.Opts{ + Method: "GET", + Path: "/api/auth/session", + } + + var ( + session api.Session + sessionResp *http.Response + ) + + err = f.pacer.Call(func() (bool, error) { + sessionResp, err = f.srv.CallJSON(ctx, &opts, nil, &session) + return shouldRetry(ctx, sessionResp, err) + }) + + if err != nil { + return nil, err + } + if session.UserId == 0 { + return nil, errors.New("invalid session") + } + + for _, cookie := range sessionResp.Cookies() { + if (cookie.Name == authCookieName) && (cookie.Value != "") { + config.Set(authCookieName, cookie.Value) + } + } + + f.userId = session.UserId + + if f.opt.RootFolderID != "" { + f.rootFolderID = f.opt.RootFolderID + } else { + f.rootFolderID, err = f.getRootID(ctx) + if err != nil { + return nil, err + } + config.Set("root_folder_id", f.rootFolderID) + } + f.dirCache = dircache.New(f.root, f.rootFolderID, f) + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF) + tempF.root = newRoot + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return f, nil + } + _, err := tempF.NewObject(ctx, remote) + if err != nil { + if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorIsDir) { + // File doesn't exist so return old f + return f, nil + } + return nil, err + } + f.features.Fill(ctx, &tempF) + // XXX: update the old f here instead of returning tempF, since + // `features` were already filled with functions having *f as a receiver. + // See https://github.com/rclone/rclone/issues/2182 + f.dirCache = tempF.dirCache + f.root = tempF.root + return f, fs.ErrorIsFile + + } + return f, nil +} + +func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.MetadataRequestOptions) (*api.ReadMetadataResponse, error) { + + directoryID, err := f.dirCache.FindDir(ctx, path, false) + + if err != nil { + return nil, err + } + opts := rest.Opts{ + Method: "GET", + Path: "/api/files", + Parameters: url.Values{ + "parentId": []string{directoryID}, + "limit": []string{strconv.FormatInt(options.Limit, 10)}, + "sort": []string{"id"}, + "operation": []string{"list"}, + "page": []string{strconv.FormatInt(options.Page, 10)}, + }, + } + var info api.ReadMetadataResponse + var resp *http.Response + + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return &info, nil +} + +func (f *Fs) getRootID(ctx context.Context) (string, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files", + Parameters: url.Values{ + "parentId": []string{"nil"}, + "operation": []string{"find"}, + "name": []string{"root"}, + "type": []string{"folder"}, + }, + } + var err error + var info api.ReadMetadataResponse + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return "", err + } + if len(info.Files) == 0 { + return "", fmt.Errorf("couldn't find root directory ID: %w", err) + } + return info.Files[0].Id, nil +} + +func (f *Fs) getFileShare(ctx context.Context, id string) (*api.FileShare, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files/" + id + "/share", + } + res := api.FileShare{} + var ( + resp *http.Response + err error + ) + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &res) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp.StatusCode == http.StatusNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + if res.ExpiresAt != nil && res.ExpiresAt.UTC().Before(time.Now().UTC()) { + return nil, fs.ErrorObjectNotFound + } + return &res, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + + opts := &api.MetadataRequestOptions{ + Limit: f.opt.PageSize, + Page: 1, + } + + files := []api.FileInfo{} + + info, err := f.readMetaDataForPath(ctx, dir, opts) + + if err != nil { + return nil, err + } + + files = append(files, info.Files...) + mu := sync.Mutex{} + if info.Meta.TotalPages > 1 { + g, _ := errgroup.WithContext(ctx) + + g.SetLimit(8) + + for i := 2; i <= info.Meta.TotalPages; i++ { + page := i + g.Go(func() error { + opts := &api.MetadataRequestOptions{ + Limit: f.opt.PageSize, + Page: int64(page), + } + info, err := f.readMetaDataForPath(ctx, dir, opts) + if err != nil { + return err + } + mu.Lock() + files = append(files, info.Files...) + mu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + } + + for _, item := range files { + remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name)) + if item.Type == "folder" { + f.dirCache.Put(remote, item.Id) + d := fs.NewDir(remote, item.ModTime).SetID(item.Id).SetParentID(item.ParentId). + SetSize(item.Size) + entries = append(entries, d) + } + if item.Type == "file" { + o, err := f.newObjectWithInfo(ctx, remote, &item) + if err != nil { + continue + } + entries = append(entries, o) + } + + } + return entries, nil +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(_ context.Context, remote string, info *api.FileInfo) (fs.Object, error) { + if info == nil { + return nil, fs.ErrorObjectNotFound + } + o := &Object{ + fs: f, + remote: remote, + id: info.Id, + size: info.Size, + parentId: info.ParentId, + name: info.Name, + modTime: info.ModTime, + mimeType: info.MimeType, + } + if info.Type == "folder" { + return o, fs.ErrorIsDir + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found it +// returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + } + + res, err := f.findObject(ctx, directoryID, leaf) + if err != nil || len(res) == 0 { + return nil, fs.ErrorObjectNotFound + } + if res[0].Type == "folder" { + return nil, fs.ErrorIsDir + } + + return f.newObjectWithInfo(ctx, remote, &res[0]) +} + +func (f *Fs) findObject(ctx context.Context, pathID, leaf string) ([]api.FileInfo, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files", + Parameters: url.Values{ + "parentId": []string{pathID}, + "operation": []string{"find"}, + "name": []string{leaf}, + "sort": []string{"id"}, + "order": []string{"desc"}, + "limit": []string{"1"}, + }, + } + var info api.ReadMetadataResponse + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return info.Files, nil +} + +func (f *Fs) moveTo(ctx context.Context, id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID string) error { + + if srcDirectoryID != dstDirectoryID { + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/move", + NoResponse: true, + } + mv := api.MoveFileRequest{ + Destination: dstDirectoryID, + Files: []string{id}, + DestinationLeaf: dstLeaf, + } + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &mv, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("couldn't move file: %w", err) + } + } else { + if srcLeaf != dstLeaf { + err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Name: dstLeaf}, id) + if err != nil { + return fmt.Errorf("move: failed rename: %w", err) + } + } + } + + return nil +} + +// updateFileInformation set's various file attributes most importantly it's name +func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileInformation, fileId string) (err error) { + opts := rest.Opts{ + Method: "PATCH", + Path: "/api/files/" + fileId, + NoResponse: true, + } + + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, update, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("couldn't update file info: %w", err) + } + return err +} + +func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, _ ...fs.OpenOption) error { + + o := &Object{ + fs: f, + } + uploadInfo, err := o.uploadMultipart(ctx, bufio.NewReader(in), src) + + if err != nil { + return err + } + + return o.createFile(ctx, src, uploadInfo) +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { + files, err := f.findObject(ctx, pathID, leaf) + if err != nil { + return "", false, err + } + if len(files) == 0 { + return "", false, nil + } + if files[0].Type == "file" { + return "", false, fs.ErrorIsFile + } + return files[0].Id, true, nil +} + +// Put in to the remote path with the modTime given of the given size +// +// When called from outside an Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Put should either +// return an error or upload it properly (rather than e.g. calling panic). +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + if src.Size() < 0 { + return nil, errCanNotUploadFileWithUnknownSize + } + existingObj, err := f.NewObject(ctx, src.Remote()) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src, options...) + default: + return nil, err + } +} + +// PutUnchecked uploads the object +// +// This will create a duplicate if we upload a new file without +// checking to see if there is one already - use Put() for that. +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + err := f.putUnchecked(ctx, in, src, options...) + if err != nil { + return nil, err + } + return f.NewObject(ctx, src.Remote()) +} + +// Update the already existing object +// +// Copy the reader into the object updating modTime and size. +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + + if src.Size() < 0 { + return errCanNotUploadFileWithUnknownSize + } + + remote := o.Remote() + + modTime := src.ModTime(ctx) + + leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) + if err != nil { + return err + } + + var uploadInfo *uploadInfo + + if src.Size() > 0 { + uploadInfo, err = o.uploadMultipart(ctx, bufio.NewReader(in), src) + if err != nil { + return err + } + } + + payload := &api.UpdateFileInformation{ + ModTime: Ptr(modTime.UTC()), + Size: src.Size(), + ParentID: directoryID, + Name: leaf, + } + + if uploadInfo != nil { + payload.Parts = uploadInfo.fileChunks + payload.UploadId = uploadInfo.uploadID + payload.ChannelID = o.fs.opt.ChannelID + payload.Encrypted = uploadInfo.encryptFile + } + + opts := rest.Opts{ + Method: "PUT", + Path: "/api/files/" + o.id + "/parts", + NoResponse: true, + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, payload, nil) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return fmt.Errorf("failed to update file information: %w", err) + } + + o.modTime = modTime + + o.size = src.Size() + + return nil +} + +// ChangeNotify calls the passed function with a path that has had changes. +// If the implementation uses polling, it should adhere to the given interval. +// +// Automatically restarts itself in case of unexpected behavior of the remote. +// +// Close the returned channel to stop being notified. +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { + go func() { + processedEventIDs := make(map[string]time.Time) + var ticker *time.Ticker + var tickerC <-chan time.Time + for { + select { + case pollInterval, ok := <-pollIntervalChan: + if !ok { + if ticker != nil { + ticker.Stop() + } + return + } + if ticker != nil { + ticker.Stop() + ticker, tickerC = nil, nil + } + if pollInterval != 0 { + ticker = time.NewTicker(pollInterval) + tickerC = ticker.C + } + case <-tickerC: + fs.Debugf(f, "Checking for changes on remote") + for eventID, timestamp := range processedEventIDs { + if time.Since(timestamp) > 5*time.Minute { + delete(processedEventIDs, eventID) + } + } + err := f.changeNotifyRunner(ctx, notifyFunc, processedEventIDs) + if err != nil { + fs.Infof(f, "Change notify listener failure: %s", err) + } + } + } + }() +} + +func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), processedEventIDs map[string]time.Time) error { + + var changes []api.Event + + opts := rest.Opts{ + Method: "GET", + Path: "/api/events", + } + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &changes) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return err + } + + var pathsToClear []string + for _, change := range changes { + if _, ok := processedEventIDs[change.ID]; ok { + continue + } + addPathToClear := func(parentID string) { + if path, ok := f.dirCache.GetInv(parentID); ok { + pathsToClear = append(pathsToClear, path) + } + } + + // Check original parent location + addPathToClear(change.Source.ParentId) + + // Check destination parent location if file was moved + if change.Source.DestParentId != "" { + addPathToClear(change.Source.DestParentId) + } + processedEventIDs[change.ID] = time.Now() + } + notifiedPaths := make(map[string]bool) + for _, path := range pathsToClear { + if _, ok := notifiedPaths[path]; ok { + continue + } + notifiedPaths[path] = true + notifyFunc(path, fs.EntryDirectory) + } + return nil +} + +// OpenChunkWriter returns the chunk size and a ChunkWriter +// +// Pass in the remote and the src object +// You can also use options to hint at the desired chunk size +func (f *Fs) OpenChunkWriter( + ctx context.Context, + remote string, + src fs.ObjectInfo, + options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { + + if src.Size() <= 0 { + return info, nil, errCanNotUploadFileWithUnknownSize + } + + o := &Object{ + fs: f, + remote: remote, + } + + uploadInfo, err := o.prepareUpload(ctx, src) + + if err != nil { + return info, nil, fmt.Errorf("failed to prepare upload: %w", err) + } + + chunkWriter := &objectChunkWriter{ + size: src.Size(), + f: f, + src: src, + o: o, + uploadInfo: uploadInfo, + } + info = fs.ChunkWriterInfo{ + ChunkSize: uploadInfo.chunkSize, + Concurrency: o.fs.opt.UploadConcurrency, + LeavePartsOnError: true, + } + fs.Debugf(o, "open chunk writer: started upload: %v", uploadInfo.uploadID) + return info, chunkWriter, err +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + opts := rest.Opts{ + Method: "POST", + Path: "/api/files", + } + mkdir := api.CreateFileRequest{ + Name: leaf, + Type: "folder", + ParentId: pathID, + } + info := api.FileInfo{} + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &mkdir, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return "", err + } + return info.Id, nil +} + +// Mkdir makes the directory (container, bucket) +// +// Shouldn't return an error if it already exists +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { + _, err = f.dirCache.FindDir(ctx, dir, true) + return err +} + +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return err + } + + if check { + info, err := f.readMetaDataForPath(ctx, dir, &api.MetadataRequestOptions{ + Limit: 1, + Page: 1, + }) + if err != nil { + return err + } + if len(info.Files) > 0 { + return fs.ErrorDirectoryNotEmpty + } + } + + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/delete", + NoResponse: true, + } + rm := api.RemoveFileRequest{ + Files: []string{directoryID}, + } + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &rm, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + f.dirCache.FlushDir(dir) + return nil +} + +// Rmdir removes the directory (container, bucket) if empty +// +// Return an error if it doesn't exist or isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { + return f.purgeCheck(ctx, dir, true) +} + +// Purge all files in the directory specified +// +// Implement this if you have a way of deleting all the files +// quicker than just running Remove() on the result of List() +// +// Return an error if it doesn't exist +func (f *Fs) Purge(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, false) +} + +// Move src to this remote using server-side move operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false) + if err != nil { + return nil, err + } + + dstLeaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return nil, err + } + + err = f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcParentID, directoryID) + if err != nil { + return nil, err + } + f.dirCache.FlushDir(src.Remote()) + newObj := *srcObj + newObj.remote = remote + newObj.fs = f + return &newObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove + +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) + if err != nil { + return err + } + err = f.moveTo(ctx, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) + if err != nil { + return fmt.Errorf("dirmove: failed to move: %w", err) + } + srcFs.dirCache.FlushDir(srcRemote) + return nil +} + +func (o *Object) Remove(ctx context.Context) error { + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/delete", + NoResponse: true, + } + delete := api.RemoveFileRequest{ + Files: []string{o.id}, + } + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, &delete, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + return nil +} + +// PublicLink adds a "readable by anyone with link" permission on the given file or folder. +func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { + id, err := f.dirCache.FindDir(ctx, remote, false) + if err == nil { + fs.Debugf(f, "attempting to share directory '%s'", remote) + } else { + fs.Debugf(f, "attempting to share single file '%s'", remote) + o, err := f.NewObject(ctx, remote) + if err != nil { + return "", err + } + id = o.(fs.IDer).ID() + } + if unlink { + opts := rest.Opts{ + Method: "DELETE", + Path: "/api/files/" + id + "/share", + NoResponse: true, + } + f.pacer.Call(func() (bool, error) { + resp, err := f.srv.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + return "", nil + } + + share, err := f.getFileShare(ctx, id) + if err != nil { + if !errors.Is(err, fs.ErrorObjectNotFound) { + return "", err + } + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/" + id + "/share", + NoResponse: true, + } + payload := api.FileShare{} + if expire < fs.DurationOff { + dur := time.Now().Add(time.Duration(expire)).UTC() + payload.ExpiresAt = &dur + } + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &payload, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return "", err + } + share, err = f.getFileShare(ctx, id) + if err != nil { + return "", err + } + } + return fmt.Sprintf("%s/share/%s", f.opt.ApiHost, share.ID), nil +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + var resp *http.Response + + http := o.fs.srv + + fs.FixRangeOption(options, o.size) + + opts := rest.Opts{ + Method: "GET", + Path: fmt.Sprintf("/api/files/%s/%s", o.id, url.QueryEscape(o.name)), + Options: options, + } + if !o.fs.opt.ThreadedStreams { + opts.Parameters = url.Values{ + "download": []string{"1"}, + } + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = http.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return nil, err + } + return resp.Body, err +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false) + if err != nil { + return nil, err + } + dstLeaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return nil, err + } + + if srcParentID == directoryID && dstLeaf == srcLeaf { + fs.Debugf(src, "Can't copy - change file name") + return nil, fs.ErrorCantCopy + } + + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/" + srcObj.id + "/copy", + } + copy := api.CopyFile{ + Newname: dstLeaf, + Destination: directoryID, + ModTime: srcObj.ModTime(ctx).UTC(), + } + var info api.FileInfo + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, ©, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return f.newObjectWithInfo(ctx, remote, &info) +} + +// About gets quota information +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files/categories", + } + var stats []api.CategorySize + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &stats) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to read user info: %w", err) + } + + total := int64(0) + for category := range stats { + total += stats[category].Size + } + return &fs.Usage{Used: fs.NewUsageValue(total)}, nil +} + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + return o.modTime +} + +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// Hash returns the Md5sum of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} + +// ParentID implements fs.ParentIDer. +func (o *Object) ParentID() string { + return o.parentId +} + +// Storable returns whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + updateInfo := &api.UpdateFileInformation{ + ModTime: Ptr(modTime.UTC()), + } + err := o.fs.updateFileInformation(ctx, updateInfo, o.id) + if err != nil { + return fmt.Errorf("couldn't update mod time: %w", err) + } + o.modTime = modTime + return nil +} + +// DirCacheFlush an optional interface to flush internal directory cache +// DirCacheFlush resets the directory cache - used in testing +// as an optional interface +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.MimeTyper = &Object{} + _ fs.OpenChunkWriter = (*Fs)(nil) + _ fs.IDer = (*Object)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.PublicLinker = (*Fs)(nil) + _ fs.ParentIDer = (*Object)(nil) + _ fs.Abouter = (*Fs)(nil) +) diff --git a/backend/teldrive/teldrive_test.go b/backend/teldrive/teldrive_test.go new file mode 100644 index 0000000000000..98576b13d9799 --- /dev/null +++ b/backend/teldrive/teldrive_test.go @@ -0,0 +1,20 @@ +package teldrive + +import ( + "testing" + + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "teldrive:", + NilObject: (*Object)(nil), + ChunkedUpload: fstests.ChunkedUploadConfig{ + MinChunkSize: minChunkSize, + CeilChunkSize: fstests.NextPowerOfTwo, + }, + SkipInvalidUTF8: true, + }) +} diff --git a/backend/teldrive/upload.go b/backend/teldrive/upload.go new file mode 100644 index 0000000000000..c5ab79846cf17 --- /dev/null +++ b/backend/teldrive/upload.go @@ -0,0 +1,386 @@ +package teldrive + +import ( + "context" + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + "io" + "net/url" + "sort" + "strconv" + "sync" + + "github.com/google/uuid" + "github.com/rclone/rclone/backend/teldrive/api" + "github.com/rclone/rclone/fs/operations" + "github.com/rclone/rclone/lib/pool" + "github.com/rclone/rclone/lib/rest" + + "github.com/rclone/rclone/fs" +) + +type uploadInfo struct { + existingChunks map[int]api.PartFile + uploadID string + channelID int64 + encryptFile bool + chunkSize int64 + totalChunks int64 + fileChunks []api.FilePart + fileName string + dir string +} + +type objectChunkWriter struct { + size int64 + f *Fs + src fs.ObjectInfo + partsToCommitMu sync.Mutex + partsToCommit []api.PartFile + o *Object + uploadInfo *uploadInfo +} + +func getMD5Hash(text string) string { + hash := md5.Sum([]byte(text)) + return hex.EncodeToString(hash[:]) +} + +// WriteChunk will write chunk number with reader bytes, where chunk number >= 0 +func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) { + if chunkNumber < 0 { + err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber) + return -1, err + } + + chunkNumber += 1 + + if existing, ok := w.uploadInfo.existingChunks[chunkNumber]; ok { + switch r := reader.(type) { + case *operations.ReOpen: + r.Account(int(existing.Size)) + case *pool.RW: + r.Account(int(existing.Size)) + default: + } + w.addCompletedPart(existing) + return existing.Size, nil + } + + var ( + response api.PartFile + partName string + ) + + err = w.f.pacer.Call(func() (bool, error) { + + size, err = reader.Seek(0, io.SeekEnd) + if err != nil { + + return false, err + } + + _, err = reader.Seek(0, io.SeekStart) + if err != nil { + return false, err + } + + fs.Debugf(w.o, "Sending chunk %d length %d", chunkNumber, size) + if w.f.opt.RandomChunkName { + partName = getMD5Hash(uuid.New().String()) + } else { + partName = w.uploadInfo.fileName + if w.uploadInfo.totalChunks > 1 { + partName = fmt.Sprintf("%s.part.%03d", w.uploadInfo.fileName, chunkNumber) + } + } + + opts := rest.Opts{ + Method: "POST", + Body: reader, + ContentLength: &size, + ContentType: "application/octet-stream", + Parameters: url.Values{ + "partName": []string{partName}, + "fileName": []string{w.uploadInfo.fileName}, + "partNo": []string{strconv.Itoa(chunkNumber)}, + "channelId": []string{strconv.FormatInt(w.uploadInfo.channelID, 10)}, + "encrypted": []string{strconv.FormatBool(w.uploadInfo.encryptFile)}, + }, + } + + if w.f.opt.UploadHost != "" { + opts.RootURL = w.f.opt.UploadHost + "/api/uploads/" + w.uploadInfo.uploadID + + } else { + opts.Path = "/api/uploads/" + w.uploadInfo.uploadID + } + + resp, err := w.f.srv.CallJSON(ctx, &opts, nil, &response) + + retry, err := shouldRetry(ctx, resp, err) + + if err != nil { + fs.Debugf(w.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) + } + if response.PartId == 0 { + return true, fmt.Errorf("error sending chunk %d", chunkNumber) + } + + return retry, err + + }) + + if err != nil { + return 0, fmt.Errorf("error sending chunk %d: %v", chunkNumber, err) + } + + w.addCompletedPart(response) + fs.Debugf(w.o, "Done sending chunk %d", chunkNumber) + + return size, err + +} + +// add a part number and etag to the completed parts +func (w *objectChunkWriter) addCompletedPart(part api.PartFile) { + w.partsToCommitMu.Lock() + defer w.partsToCommitMu.Unlock() + w.partsToCommit = append(w.partsToCommit, part) +} + +func (w *objectChunkWriter) Close(ctx context.Context) error { + + if w.uploadInfo.totalChunks != int64(len(w.partsToCommit)) { + return fmt.Errorf("uploaded failed") + } + + sort.Slice(w.partsToCommit, func(i, j int) bool { + return w.partsToCommit[i].PartNo < w.partsToCommit[j].PartNo + }) + + fileChunks := []api.FilePart{} + + for _, part := range w.partsToCommit { + fileChunks = append(fileChunks, api.FilePart{ID: part.PartId, Salt: part.Salt}) + } + + w.uploadInfo.fileChunks = fileChunks + + return w.o.createFile(ctx, w.src, w.uploadInfo) +} + +func (*objectChunkWriter) Abort(ctx context.Context) error { + return nil +} + +func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo) (*uploadInfo, error) { + + leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, src.Remote(), true) + + if err != nil { + return nil, err + } + + uploadID := getMD5Hash(fmt.Sprintf("%s:%s:%d:%d", directoryID, leaf, src.Size(), o.fs.userId)) + + var ( + uploadParts []api.PartFile + existingChunks map[int]api.PartFile + ) + + opts := rest.Opts{ + Method: "GET", + Path: "/api/uploads/" + uploadID, + } + + chunkSize := int64(o.fs.opt.ChunkSize) + + if chunkSize < src.Size() { + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploadParts) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return nil, err + } + existingChunks = make(map[int]api.PartFile, len(uploadParts)) + for _, part := range uploadParts { + existingChunks[part.PartNo] = part + } + + } + + totalChunks := src.Size() / chunkSize + + if src.Size()%chunkSize != 0 { + totalChunks++ + } + + channelID := o.fs.opt.ChannelID + + encryptFile := o.fs.opt.EncryptFiles + + if len(uploadParts) > 0 { + channelID = uploadParts[0].ChannelID + encryptFile = uploadParts[0].Encrypted + } + + return &uploadInfo{ + existingChunks: existingChunks, + uploadID: uploadID, + channelID: channelID, + encryptFile: encryptFile, + chunkSize: chunkSize, + totalChunks: totalChunks, + fileName: leaf, + dir: directoryID, + }, nil +} + +func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo) (*uploadInfo, error) { + + size := src.Size() + + if size < 0 { + return nil, errors.New("unknown-sized upload not supported") + } + + uploadInfo, err := o.prepareUpload(ctx, src) + + if err != nil { + return nil, err + } + + if size > 0 { + + var ( + partsToCommit []api.PartFile + uploadedSize int64 + ) + + totalChunks := int(uploadInfo.totalChunks) + + for chunkNo := 1; chunkNo <= totalChunks; chunkNo++ { + if existing, ok := uploadInfo.existingChunks[chunkNo]; ok { + io.CopyN(io.Discard, in, existing.Size) + partsToCommit = append(partsToCommit, existing) + uploadedSize += existing.Size + continue + } + + n := uploadInfo.chunkSize + + if chunkNo == totalChunks { + n = src.Size() - uploadedSize + } + + chunkName := uploadInfo.fileName + + if o.fs.opt.RandomChunkName { + chunkName = getMD5Hash(uuid.New().String()) + } else if totalChunks > 1 { + chunkName = fmt.Sprintf("%s.part.%03d", chunkName, chunkNo) + } + + partReader := io.LimitReader(in, n) + + opts := rest.Opts{ + Method: "POST", + Body: partReader, + ContentLength: &n, + ContentType: "application/octet-stream", + Parameters: url.Values{ + "partName": []string{chunkName}, + "fileName": []string{uploadInfo.fileName}, + "partNo": []string{strconv.Itoa(chunkNo)}, + "channelId": []string{strconv.FormatInt(uploadInfo.channelID, 10)}, + "encrypted": []string{strconv.FormatBool(uploadInfo.encryptFile)}, + }, + } + + if o.fs.opt.UploadHost != "" { + opts.RootURL = o.fs.opt.UploadHost + "/api/uploads/" + uploadInfo.uploadID + + } else { + opts.Path = "/api/uploads/" + uploadInfo.uploadID + } + + var partInfo api.PartFile + + _, err := o.fs.srv.CallJSON(ctx, &opts, nil, &partInfo) + + if err != nil { + return nil, err + } + + uploadedSize += n + + partsToCommit = append(partsToCommit, partInfo) + } + + sort.Slice(partsToCommit, func(i, j int) bool { + return partsToCommit[i].PartNo < partsToCommit[j].PartNo + }) + + fileChunks := []api.FilePart{} + + for _, part := range partsToCommit { + fileChunks = append(fileChunks, api.FilePart{ID: part.PartId, Salt: part.Salt}) + } + + uploadInfo.fileChunks = fileChunks + } + + return uploadInfo, nil + +} + +func (o *Object) createFile(ctx context.Context, src fs.ObjectInfo, uploadInfo *uploadInfo) error { + + opts := rest.Opts{ + Method: "POST", + Path: "/api/files", + NoResponse: true, + } + + payload := api.CreateFileRequest{ + Name: uploadInfo.fileName, + Type: "file", + ParentId: uploadInfo.dir, + MimeType: fs.MimeType(ctx, src), + Size: src.Size(), + Parts: uploadInfo.fileChunks, + ChannelID: uploadInfo.channelID, + Encrypted: uploadInfo.encryptFile, + ModTime: src.ModTime(ctx).UTC(), + } + + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, &payload, nil) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return err + } + if src.Size() > 0 { + opts = rest.Opts{ + Method: "DELETE", + Path: "/api/uploads/" + uploadInfo.uploadID, + NoResponse: true, + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + } + return nil +} diff --git a/bin/upload-github b/bin/upload-github index 0ebdc708dab20..a1f01a7f3c3bd 100755 --- a/bin/upload-github +++ b/bin/upload-github @@ -6,7 +6,7 @@ set -e -REPO="rclone/rclone" +REPO="tgdrive/rclone" if [ "$1" == "" ]; then echo "Syntax: $0 Version" @@ -29,7 +29,7 @@ gh release create "${VERSION}" \ --notes-file "/tmp/${VERSION}-release-notes" \ --draft=true -for build in build/*; do +for build in /tmp/build/*; do case $build in *current*) continue ;; *testbuilds*) continue ;; diff --git a/bin/zip.3.0.nupkg b/bin/zip.3.0.nupkg new file mode 100644 index 0000000000000..dfa2739836fc4 Binary files /dev/null and b/bin/zip.3.0.nupkg differ diff --git a/fs/operations/reopen.go b/fs/operations/reopen.go index fcc2a6860ba3c..7ea8c18f3c2d3 100644 --- a/fs/operations/reopen.go +++ b/fs/operations/reopen.go @@ -332,6 +332,14 @@ func (h *ReOpen) SetAccounting(account AccountFn) *ReOpen { return h } +// Account sets the accounting function to account for n bytes being read +func (h *ReOpen) Account(n int) *ReOpen { + if h.account != nil { + h.account(n) + } + return h +} + // DelayAccounting makes sure the accounting function only gets called // on the i-th or later read of the data from this point (counting // from 1). diff --git a/fs/rc/rcserver/rcserver.go b/fs/rc/rcserver/rcserver.go index 7b6622c43aad7..f94724d42d31a 100644 --- a/fs/rc/rcserver/rcserver.go +++ b/fs/rc/rcserver/rcserver.go @@ -363,7 +363,7 @@ func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string } // Match URLS of the form [fs]/remote -var fsMatch = regexp.MustCompile(`^\[(.*?)\](.*)$`) +var fsMatch = regexp.MustCompile(`^\[([^}]*)\](.*)$`) func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string) { // Look to see if this has an fs in the path diff --git a/lib/pool/reader_writer.go b/lib/pool/reader_writer.go index 3a14af2c03185..8ea398ddb6b96 100644 --- a/lib/pool/reader_writer.go +++ b/lib/pool/reader_writer.go @@ -87,6 +87,14 @@ func (rw *RW) SetAccounting(account RWAccount) *RW { return rw } +// Account sets the accounting function to account for n bytes being read +func (rw *RW) Account(n int) *RW { + if rw.account != nil { + rw.account(n) + } + return rw +} + // DelayAccountinger enables an accounting delay type DelayAccountinger interface { // DelayAccounting makes sure the accounting function only