diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..4371c0a
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,59 @@
+version: 2
+updates:
+ # Enable version updates for Go modules
+ - package-ecosystem: "gomod"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ day: "monday"
+ time: "09:00"
+ reviewers:
+ - "nullable-eth"
+ assignees:
+ - "nullable-eth"
+ commit-message:
+ prefix: "deps"
+ prefix-development: "deps(dev)"
+ include: "scope"
+ open-pull-requests-limit: 5
+ labels:
+ - "dependencies"
+ - "go"
+
+ # Enable version updates for GitHub Actions
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ day: "monday"
+ time: "09:00"
+ reviewers:
+ - "nullable-eth"
+ assignees:
+ - "nullable-eth"
+ commit-message:
+ prefix: "ci"
+ include: "scope"
+ open-pull-requests-limit: 5
+ labels:
+ - "dependencies"
+ - "github-actions"
+
+ # Enable version updates for Docker
+ - package-ecosystem: "docker"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ day: "monday"
+ time: "09:00"
+ reviewers:
+ - "nullable-eth"
+ assignees:
+ - "nullable-eth"
+ commit-message:
+ prefix: "docker"
+ include: "scope"
+ open-pull-requests-limit: 3
+ labels:
+ - "dependencies"
+ - "docker"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..2346d2a
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,335 @@
+name: Create Release
+
+on:
+ workflow_dispatch:
+ inputs:
+ release_type:
+ description: 'Release type'
+ required: true
+ default: 'patch'
+ type: choice
+ options:
+ - patch
+ - minor
+ - major
+ push:
+ branches:
+ - main
+ paths-ignore:
+ - 'README*.md'
+ - 'LICENSE'
+ - '.gitignore'
+ - 'IMPLEMENTATION_PLAN.md'
+
+jobs:
+ check-changes:
+ runs-on: ubuntu-latest
+ outputs:
+ should_release: ${{ steps.changes.outputs.should_release }}
+ version: ${{ steps.version.outputs.version }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Check for significant changes
+ id: changes
+ run: |
+ # Get the last release tag
+ LAST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
+ echo "Last tag: ${LAST_TAG:-'(none)'}"
+
+ # Check if there are changes in source code since last release
+ if [[ -z "$LAST_TAG" ]]; then
+ # No previous tags, this is the first release
+ echo "No previous tags found - first release"
+ echo "should_release=true" >> $GITHUB_OUTPUT
+ elif git diff --quiet $LAST_TAG HEAD -- '*.go' 'go.mod' 'go.sum' 'Dockerfile' '.github/workflows/' 'internal/' 'cmd/' 'pkg/'; then
+ echo "No significant changes detected"
+ echo "should_release=false" >> $GITHUB_OUTPUT
+ else
+ echo "Significant changes detected"
+ echo "should_release=true" >> $GITHUB_OUTPUT
+ fi
+
+ - name: Calculate next version
+ id: version
+ if: steps.changes.outputs.should_release == 'true' || github.event_name == 'workflow_dispatch'
+ run: |
+ # Get the last release tag
+ LAST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
+ echo "Last tag: ${LAST_TAG:-'(none)'}"
+
+ # Handle first release vs. subsequent releases
+ if [[ -z "$LAST_TAG" ]]; then
+ # First release - start from v0.0.0
+ MAJOR=0
+ MINOR=1
+ PATCH=0
+ echo "First release - starting from v0.1.0"
+ else
+ # Remove 'v' prefix and split version
+ VERSION_NUMBER=${LAST_TAG#v}
+ IFS='.' read -r -a VERSION_PARTS <<< "$VERSION_NUMBER"
+
+ MAJOR=${VERSION_PARTS[0]:-0}
+ MINOR=${VERSION_PARTS[1]:-0}
+ PATCH=${VERSION_PARTS[2]:-0}
+ fi
+
+ # Determine release type
+ if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
+ RELEASE_TYPE="${{ github.event.inputs.release_type }}"
+ else
+ # Auto-determine based on commit messages since last release
+ if [[ -z "$LAST_TAG" ]]; then
+ # First release - check all commits
+ COMMITS=$(git log --oneline)
+ else
+ # Subsequent release - check commits since last tag
+ COMMITS=$(git log $LAST_TAG..HEAD --oneline)
+ fi
+
+ if echo "$COMMITS" | grep -qE "(BREAKING CHANGE|!:)"; then
+ RELEASE_TYPE="major"
+ elif echo "$COMMITS" | grep -qE "(feat:|feature:)"; then
+ RELEASE_TYPE="minor"
+ else
+ RELEASE_TYPE="patch"
+ fi
+ fi
+
+ echo "Release type: $RELEASE_TYPE"
+
+ # Increment version based on release type
+ case $RELEASE_TYPE in
+ major)
+ MAJOR=$((MAJOR + 1))
+ MINOR=0
+ PATCH=0
+ ;;
+ minor)
+ MINOR=$((MINOR + 1))
+ PATCH=0
+ ;;
+ patch)
+ PATCH=$((PATCH + 1))
+ ;;
+ esac
+
+ NEW_VERSION="v${MAJOR}.${MINOR}.${PATCH}"
+ echo "New version: $NEW_VERSION"
+ echo "version=$NEW_VERSION" >> $GITHUB_OUTPUT
+
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.23'
+
+ - name: Cache Go modules
+ uses: actions/cache@v3
+ with:
+ path: ~/go/pkg/mod
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+ restore-keys: |
+ ${{ runner.os }}-go-
+
+ - name: Download dependencies
+ run: go mod download
+
+ - name: Run tests
+ run: go test -v ./...
+
+ - name: Run go vet
+ run: go vet ./...
+
+ - name: Run go fmt check
+ run: |
+ if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then
+ echo "The following files need to be formatted:"
+ gofmt -s -l .
+ exit 1
+ fi
+
+ build:
+ needs: test
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.23'
+
+ - name: Build application
+ run: |
+ # Build for multiple architectures
+ GOOS=linux GOARCH=amd64 go build -o syncarr-linux-amd64 ./cmd/syncarr
+ GOOS=linux GOARCH=arm64 go build -o syncarr-linux-arm64 ./cmd/syncarr
+ GOOS=windows GOARCH=amd64 go build -o syncarr-windows-amd64.exe ./cmd/syncarr
+ GOOS=darwin GOARCH=amd64 go build -o syncarr-darwin-amd64 ./cmd/syncarr
+ GOOS=darwin GOARCH=arm64 go build -o syncarr-darwin-arm64 ./cmd/syncarr
+
+ - name: Upload build artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: syncarr-binaries
+ path: syncarr-*
+
+ create-release:
+ needs: [check-changes, test, build]
+ if: needs.check-changes.outputs.should_release == 'true' || github.event_name == 'workflow_dispatch'
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Download build artifacts
+ uses: actions/download-artifact@v4
+ with:
+ name: syncarr-binaries
+
+ - name: Generate changelog
+ id: changelog
+ run: |
+ # Get the last release tag
+ LAST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
+
+ if [[ -n "$LAST_TAG" ]]; then
+ echo "## What's Changed" > changelog.md
+ echo "" >> changelog.md
+
+ # Get commits since last release
+ git log $LAST_TAG..HEAD --pretty=format:"- %s (%h)" --reverse >> changelog.md
+ else
+ echo "## π Initial Release of SyncArr" > changelog.md
+ echo "" >> changelog.md
+ echo "SyncArr synchronizes labeled movies and TV shows between source and destination Plex Media Servers." >> changelog.md
+ echo "" >> changelog.md
+ echo "### β¨ Features" >> changelog.md
+ echo "- π·οΈ Label-based synchronization between Plex servers" >> changelog.md
+ echo "- π Bidirectional watched state synchronization" >> changelog.md
+ echo "- π Secure SSH/SFTP file transfers with key-based authentication" >> changelog.md
+ echo "- π Incremental sync with change detection" >> changelog.md
+ echo "- π― Content filtering by patterns, size, and library rules" >> changelog.md
+ echo "- π³ Docker container with complete configuration via environment variables" >> changelog.md
+ echo "- π Comprehensive structured logging" >> changelog.md
+ echo "- β‘ Force full sync option for resolving inconsistencies" >> changelog.md
+ echo "- π§ Flexible deployment (one-shot or continuous modes)" >> changelog.md
+ echo "" >> changelog.md
+ echo "### π Getting Started" >> changelog.md
+ echo "1. Set up SSH key-based authentication between servers" >> changelog.md
+ echo "2. Configure environment variables for source/destination Plex servers" >> changelog.md
+ echo "3. Add labels to media items you want to sync" >> changelog.md
+ echo "4. Deploy using Docker Compose" >> changelog.md
+ fi
+
+ echo "" >> changelog.md
+ echo "### π³ Docker Images" >> changelog.md
+ echo "" >> changelog.md
+ echo "**Multi-architecture support:** \`linux/amd64\`, \`linux/arm64\`" >> changelog.md
+ echo "" >> changelog.md
+ echo '```bash' >> changelog.md
+ echo "# Latest release" >> changelog.md
+ echo "docker pull ghcr.io/${{ github.repository_owner }}/syncarr:${{ needs.check-changes.outputs.version }}" >> changelog.md
+ echo "" >> changelog.md
+ echo "# Always latest" >> changelog.md
+ echo "docker pull ghcr.io/${{ github.repository_owner }}/syncarr:latest" >> changelog.md
+ echo '```' >> changelog.md
+ echo "" >> changelog.md
+ echo "### π₯ Binary Downloads" >> changelog.md
+ echo "" >> changelog.md
+ echo "Pre-compiled binaries are available for multiple platforms in the release assets below." >> changelog.md
+
+ - name: Create Release
+ uses: softprops/action-gh-release@v1
+ with:
+ tag_name: ${{ needs.check-changes.outputs.version }}
+ name: Release ${{ needs.check-changes.outputs.version }}
+ body_path: changelog.md
+ files: |
+ syncarr-linux-amd64
+ syncarr-linux-arm64
+ syncarr-windows-amd64.exe
+ syncarr-darwin-amd64
+ syncarr-darwin-arm64
+ draft: false
+ prerelease: false
+ generate_release_notes: true
+
+ publish-docker:
+ needs: [check-changes, create-release]
+ if: needs.check-changes.outputs.should_release == 'true' || github.event_name == 'workflow_dispatch'
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Log in to Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract metadata
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ghcr.io/${{ github.repository }}
+ tags: |
+ type=raw,value=${{ needs.check-changes.outputs.version }}
+ type=raw,value=latest
+
+ - name: Build and push Docker image
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ./Dockerfile
+ platforms: linux/amd64,linux/arm64
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+
+ - name: Generate Docker summary
+ run: |
+ echo "## π³ Docker Image Published" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Version:** ${{ needs.check-changes.outputs.version }}" >> $GITHUB_STEP_SUMMARY
+ echo "**Commit:** ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Registry:** ghcr.io" >> $GITHUB_STEP_SUMMARY
+ echo "**Platforms:** linux/amd64, linux/arm64" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Tags:**" >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ echo "ghcr.io/${{ github.repository }}:${{ needs.check-changes.outputs.version }}" >> $GITHUB_STEP_SUMMARY
+ echo "ghcr.io/${{ github.repository }}:latest" >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Pull command:**" >> $GITHUB_STEP_SUMMARY
+ echo '```bash' >> $GITHUB_STEP_SUMMARY
+ echo "docker pull ghcr.io/${{ github.repository }}:${{ needs.check-changes.outputs.version }}" >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
\ No newline at end of file
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..8e72ad1
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,107 @@
+name: Test
+
+on:
+ pull_request:
+ branches: [ main ]
+ push:
+ branches: [ main ]
+ workflow_dispatch:
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.23'
+
+ - name: Cache Go modules
+ uses: actions/cache@v3
+ with:
+ path: ~/go/pkg/mod
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+ restore-keys: |
+ ${{ runner.os }}-go-
+
+ - name: Download dependencies
+ run: go mod download
+
+ - name: Run tests
+ run: go test -v ./...
+
+ - name: Run go vet
+ run: go vet ./...
+
+ - name: Run go fmt check
+ run: |
+ if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then
+ echo "The following files need to be formatted:"
+ gofmt -s -l .
+ exit 1
+ fi
+
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.23'
+
+ - name: Run golangci-lint
+ uses: golangci/golangci-lint-action@v3
+ with:
+ version: latest
+ args: --timeout=5m
+
+ build:
+ needs: [test, lint]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.23'
+
+ - name: Build application
+ run: |
+ # Build for multiple architectures
+ CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o syncarr-linux-amd64 ./cmd/syncarr
+ CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o syncarr-linux-arm64 ./cmd/syncarr
+
+ - name: Upload build artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: syncarr-binaries
+ path: syncarr-*
+
+ docker-test:
+ needs: [test, lint]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Build Docker image
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ./Dockerfile
+ platforms: linux/amd64,linux/arm64
+ push: false
+ tags: syncarr:test
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index aaadf73..917dbde 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,3 @@
-# If you prefer the allow list template instead of the deny list, see community template:
-# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
-#
# Binaries for programs and plugins
*.exe
*.exe~
@@ -11,22 +8,59 @@
# Test binary, built with `go test -c`
*.test
-# Code coverage profiles and other test artifacts
+# Output of the go coverage tool, specifically when used with LiteIDE
*.out
-coverage.*
-*.coverprofile
-profile.cov
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
-go.work.sum
-# env file
+# IDE files
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# OS generated files
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+ehthumbs.db
+Thumbs.db
+
+# Docker
+.dockerignore
+
+# Logs
+*.log
+logs/
+
+# Local environment files
.env
+.env.local
+.env.*.local
+
+# Configuration files with secrets
+config.yaml
+config.yml
+config.json
+
+# SSH keys
+*.pem
+*.key
+id_rsa*
+*.pub
+
+# Temporary files
+tmp/
+temp/
-# Editor/IDE
-# .idea/
-# .vscode/
+# Build artifacts
+dist/
+build/
+/scripts
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..d48a97c
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,18 @@
+linters:
+ enable:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+ - gofmt
+ - goimports
+ - misspell
+
+run:
+ timeout: 5m
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..91a64ce
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,36 @@
+# Build stage
+FROM golang:1.23-alpine AS builder
+
+WORKDIR /app
+
+# Copy source code (includes go.mod)
+COPY . .
+RUN go mod download
+
+# Build the application
+RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o syncarr ./cmd/syncarr
+
+# Runtime stage
+FROM alpine:latest
+
+# Install ca-certificates for HTTPS requests and debugging tools
+RUN apk update && apk upgrade && \
+ apk add --no-cache ca-certificates tzdata bash curl wget busybox-extras rsync sshpass openssh-client && \
+ ln -sf /bin/bash /bin/sh && \
+ echo "Bash installed successfully" && \
+ which bash && bash --version
+
+WORKDIR /root/
+
+# Copy the binary from builder stage
+COPY --from=builder /app/syncarr .
+
+# Create a non-root user
+RUN adduser -D -s /bin/bash syncarr && \
+ chown syncarr:syncarr ./syncarr && \
+ chmod +x ./syncarr
+
+USER syncarr
+
+# Run the application
+CMD ["./syncarr"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..efdd477
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,124 @@
+.PHONY: build test clean docker docker-build docker-run fmt vet lint
+
+# Build variables
+BINARY_NAME=syncarr
+VERSION ?= $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
+COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown")
+BUILD_DATE ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
+
+# Go build flags
+LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(BUILD_DATE)"
+
+# Default target
+all: fmt vet test build
+
+# Build the application
+build:
+ @echo "Building $(BINARY_NAME)..."
+ go build $(LDFLAGS) -o $(BINARY_NAME) ./cmd/syncarr
+
+# Build for Linux (useful for Docker)
+build-linux:
+ @echo "Building $(BINARY_NAME) for Linux..."
+ GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build $(LDFLAGS) -o $(BINARY_NAME)-linux ./cmd/syncarr
+
+# Run tests
+test:
+ @echo "Running tests..."
+ go test -v ./...
+
+# Run tests with coverage
+test-coverage:
+ @echo "Running tests with coverage..."
+ go test -v -coverprofile=coverage.out ./...
+ go tool cover -html=coverage.out -o coverage.html
+
+# Format code
+fmt:
+ @echo "Formatting code..."
+ go fmt ./...
+
+# Vet code
+vet:
+ @echo "Vetting code..."
+ go vet ./...
+
+# Lint code (requires golangci-lint)
+lint:
+ @echo "Linting code..."
+ golangci-lint run
+
+# Clean build artifacts
+clean:
+ @echo "Cleaning..."
+ rm -f $(BINARY_NAME) $(BINARY_NAME)-linux
+ rm -f coverage.out coverage.html
+ docker rmi syncarr:latest 2>/dev/null || true
+
+# Docker build
+docker-build:
+ @echo "Building Docker image..."
+ docker build -t syncarr:latest .
+
+# Docker run (requires environment variables to be set)
+docker-run: docker-build
+ @echo "Running Docker container..."
+ docker run --rm syncarr:latest --version
+
+# Docker compose up
+docker-up:
+ @echo "Starting with Docker Compose..."
+ docker-compose up -d
+
+# Docker compose down
+docker-down:
+ @echo "Stopping Docker Compose..."
+ docker-compose down
+
+# Docker compose logs
+docker-logs:
+ docker-compose logs -f syncarr
+
+# Install dependencies
+deps:
+ @echo "Installing dependencies..."
+ go mod download
+ go mod tidy
+
+# Update dependencies
+update-deps:
+ @echo "Updating dependencies..."
+ go get -u ./...
+ go mod tidy
+
+# Run the application (oneshot mode)
+run-oneshot:
+ @echo "Running $(BINARY_NAME) in oneshot mode..."
+ ./$(BINARY_NAME) --oneshot
+
+# Validate configuration
+validate:
+ @echo "Validating configuration..."
+ ./$(BINARY_NAME) --validate
+
+# Show help
+help:
+ @echo "Available targets:"
+ @echo " build - Build the application"
+ @echo " build-linux - Build for Linux (Docker)"
+ @echo " test - Run tests"
+ @echo " test-coverage - Run tests with coverage report"
+ @echo " fmt - Format code"
+ @echo " vet - Vet code"
+ @echo " lint - Lint code (requires golangci-lint)"
+ @echo " clean - Clean build artifacts"
+ @echo " docker-build - Build Docker image"
+ @echo " docker-run - Run Docker container"
+ @echo " docker-up - Start with Docker Compose"
+ @echo " docker-down - Stop Docker Compose"
+ @echo " docker-logs - Show Docker Compose logs"
+ @echo " deps - Install dependencies"
+ @echo " update-deps - Update dependencies"
+ @echo " run-oneshot - Run application in oneshot mode"
+ @echo " validate - Validate configuration"
+ @echo " help - Show this help"
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..eec8877
--- /dev/null
+++ b/README.md
@@ -0,0 +1,481 @@
+# SyncArr π¬πΊπ
+
+
+
+
+
+**SyncArr** is a high-performance Go application that synchronizes labeled movies and TV shows between Plex Media Servers. It provides fast file transfers using rsync, comprehensive metadata synchronization, and intelligent content matching to keep your Plex libraries perfectly synchronized.
+
+## π Quick Start
+
+### Docker Compose Example
+
+```yaml
+version: '3.8'
+
+services:
+ syncarr:
+ image: syncarr:latest
+ container_name: syncarr
+ restart: unless-stopped
+
+ environment:
+ # Source Plex Server
+ SOURCE_PLEX_REQUIRES_HTTPS: "true"
+ SOURCE_PLEX_HOST: "192.168.1.10"
+ SOURCE_PLEX_PORT: "32400"
+ SOURCE_PLEX_TOKEN: "your-source-plex-token"
+
+ # Destination Plex Server
+ DEST_PLEX_REQUIRES_HTTPS: "true"
+ DEST_PLEX_HOST: "192.168.1.20"
+ DEST_PLEX_PORT: "32400"
+ DEST_PLEX_TOKEN: "your-destination-plex-token"
+
+ # SSH Configuration (choose password OR key-based auth)
+ OPT_SSH_USER: "your-ssh-user"
+ OPT_SSH_PASSWORD: "your-ssh-password" # For password auth
+ # OPT_SSH_KEY_PATH: "/keys/id_rsa" # For key-based auth
+ OPT_SSH_PORT: "22"
+
+ # Sync Configuration
+ SYNC_LABEL: "Sync2Secondary" # Label to identify content to sync
+ SYNC_INTERVAL: "60" # Minutes between sync cycles
+ LOG_LEVEL: "INFO" # DEBUG, INFO, WARN, ERROR
+ DRY_RUN: "false" # Set to "true" for testing
+
+ # Path Mapping
+ SOURCE_REPLACE_FROM: "/data/Media" # Source path prefix to replace
+ SOURCE_REPLACE_TO: "/media/source" # Local container path
+ DEST_ROOT_DIR: "/mnt/data" # Destination server root path
+
+ volumes:
+ # Mount your media directories (adjust paths as needed)
+ - "/path/to/your/media:/media/source:ro" # Read-only source media
+
+ # For SSH key authentication (uncomment if using keys)
+ # - "/path/to/ssh/keys:/keys:ro"
+
+ # Use host networking to access local Plex servers
+ network_mode: "host"
+
+ # Health check
+ healthcheck:
+ test: ["CMD", "./syncarr", "--validate"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+```
+
+> **π‘ Pro Tip**: Start with `DRY_RUN: "true"` to test your configuration without making any changes!
+
+## β¨ Features
+
+
+π― Core Synchronization Features
+
+- **π·οΈ Label-based Sync**: Automatically sync only media items with specific Plex labels
+- **β‘ High-Performance Transfers**: Uses rsync for fast, resumable file transfers
+- **π 6-Phase Sync Process**: Content discovery β File transfer β Library refresh β Content matching β Metadata sync β Cleanup
+- **π Comprehensive Metadata Sync**: Titles, summaries, ratings, genres, labels, collections, artwork, and more
+- **ποΈ Watched State Sync**: Keep viewing progress synchronized between servers
+- **π Incremental Updates**: Only transfer changed or new content
+- **π Automatic Directory Creation**: Creates destination directories as needed
+
+
+
+
+π Authentication & Security
+
+- **π Dual SSH Authentication**: Support for both SSH keys and password authentication
+- **π Secure Transfers**: All file transfers use encrypted SSH connections
+- **π‘οΈ Non-interactive Operation**: Uses sshpass for automated password authentication
+- **β οΈ Dry Run Mode**: Test configurations without making any changes
+
+
+
+
+π οΈ Advanced Features
+
+- **π³ Docker Ready**: Containerized application with health checks
+- **π Structured Logging**: JSON logging with configurable levels (DEBUG, INFO, WARN, ERROR)
+- **π Continuous & One-shot Modes**: Run continuously or execute single sync cycles
+- **ποΈ Force Full Sync**: Bypass incremental checks for complete re-synchronization
+- **π Performance Monitoring**: Detailed transfer statistics and timing information
+- **π Content Matching**: Intelligent filename-based matching between source and destination
+
+
+
+## π Configuration
+
+
+π Environment Variables
+
+### Plex Server Configuration
+
+| Variable | Description | Example | Required |
+|----------|-------------|---------|----------|
+| `SOURCE_PLEX_HOST` | Source Plex server hostname/IP | `192.168.1.10` | β
|
+| `SOURCE_PLEX_PORT` | Source Plex server port | `32400` | β |
+| `SOURCE_PLEX_TOKEN` | Source Plex server API token | `xxxxxxxxxxxx` | β
|
+| `SOURCE_PLEX_REQUIRES_HTTPS` | Use HTTPS for source server | `true`/`false` | β |
+| `DEST_PLEX_HOST` | Destination Plex server hostname/IP | `192.168.1.20` | β
|
+| `DEST_PLEX_PORT` | Destination Plex server port | `32400` | β |
+| `DEST_PLEX_TOKEN` | Destination Plex server API token | `xxxxxxxxxxxx` | β
|
+| `DEST_PLEX_REQUIRES_HTTPS` | Use HTTPS for destination server | `true`/`false` | β |
+
+### SSH Configuration
+
+| Variable | Description | Example | Required |
+|----------|-------------|---------|----------|
+| `OPT_SSH_USER` | SSH username | `mediauser` | β
|
+| `OPT_SSH_PASSWORD` | SSH password (for password auth) | `secretpass` | β* |
+| `OPT_SSH_KEY_PATH` | SSH private key path (for key auth) | `/keys/id_rsa` | β* |
+| `OPT_SSH_PORT` | SSH port | `22` | β |
+
+*Either password or key path is required
+
+### Sync Configuration
+
+| Variable | Description | Example | Required |
+|----------|-------------|---------|----------|
+| `SYNC_LABEL` | Plex label to identify content to sync | `Sync2Secondary` | β
|
+| `SYNC_INTERVAL` | Minutes between sync cycles | `60` | β |
+| `LOG_LEVEL` | Logging level | `INFO` | β |
+| `DRY_RUN` | Test mode without changes | `false` | β |
+| `FORCE_FULL_SYNC` | Force complete sync | `false` | β |
+
+### Path Mapping
+
+| Variable | Description | Example | Required |
+|----------|-------------|---------|----------|
+| `SOURCE_REPLACE_FROM` | Source path prefix to replace | `/data/Media` | β |
+| `SOURCE_REPLACE_TO` | Container path for source media | `/media/source` | β |
+| `DEST_ROOT_DIR` | Destination server root directory | `/mnt/data` | β
|
+
+
+
+
+ποΈ Advanced Configuration
+
+### Performance Tuning
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `WORKER_POOL_SIZE` | Number of concurrent workers | `4` |
+| `PLEX_API_RATE_LIMIT` | Plex API requests per second | `10.0` |
+| `TRANSFER_BUFFER_SIZE` | Transfer buffer size (KB) | `64` |
+| `MAX_CONCURRENT_TRANSFERS` | Max simultaneous transfers | `3` |
+
+### Transfer Options
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `ENABLE_COMPRESSION` | Enable transfer compression | `true` |
+| `RESUME_TRANSFERS` | Resume interrupted transfers | `true` |
+
+
+
+## π Usage
+
+
+π Getting Your Plex Token
+
+1. **Via Plex Web App:**
+ - Open Plex Web App
+ - Open browser developer tools (F12)
+ - Go to Network tab
+ - Refresh the page
+ - Look for requests to `/library/sections`
+ - Find the `X-Plex-Token` header value
+
+2. **Via Plex API:**
+
+ ```bash
+ curl -X POST 'https://plex.tv/api/v2/users/signin' \
+ -H 'Content-Type: application/x-www-form-urlencoded' \
+ -d 'user[login]=YOUR_EMAIL&user[password]=YOUR_PASSWORD'
+ ```
+
+
+
+
+π·οΈ Adding Labels to Media
+
+1. **In Plex Web Interface:**
+ - Navigate to your movie or TV show
+ - Click "Edit" (pencil icon)
+ - Go to "Tags" tab
+ - Add your sync label (e.g., `Sync2Secondary`) to "Labels" field
+ - Click "Save Changes"
+
+2. **Bulk Labeling with Labelarr:**
+ - Use [Labelarr](https://github.com/yourusername/labelarr) for bulk label management
+ - Set up rules to automatically apply labels based on criteria
+
+
+
+
+π₯οΈ Command Line Usage
+
+```bash
+# Run a single sync cycle
+docker run --rm -v $(pwd)/config:/config syncarr --oneshot
+
+# Validate configuration
+docker run --rm -v $(pwd)/config:/config syncarr --validate
+
+# Force full synchronization (bypasses incremental checks)
+docker run --rm -v $(pwd)/config:/config syncarr --force-full-sync --oneshot
+
+# Show version information
+docker run --rm syncarr --version
+
+# Run with debug logging
+docker run --rm -e LOG_LEVEL=DEBUG syncarr --oneshot
+```
+
+
+
+
+π Monitoring & Logs
+
+### View Logs
+
+```bash
+# Follow logs in real-time
+docker-compose logs -f syncarr
+
+# View last 100 lines
+docker-compose logs --tail=100 syncarr
+
+# Filter for errors only
+docker-compose logs syncarr | grep '"level":"error"'
+```
+
+### Health Check
+
+```bash
+# Check container health
+docker-compose ps
+
+# Manual health check
+docker-compose exec syncarr ./syncarr --validate
+```
+
+### Log Levels
+
+- **DEBUG**: Detailed operation logs, file-by-file progress
+- **INFO**: High-level status updates, sync summaries
+- **WARN**: Non-critical issues, skipped items
+- **ERROR**: Critical errors, failed operations
+
+
+
+## ποΈ Architecture
+
+
+π 6-Phase Sync Process
+
+1. **π Content Discovery**: Scan source Plex server for labeled media
+2. **π File Transfer**: Copy media files using high-performance rsync
+3. **π Library Refresh**: Update destination Plex library
+4. **π― Content Matching**: Match source items to destination items by filename
+5. **π Metadata Sync**: Synchronize comprehensive metadata between matched items
+6. **π§Ή Cleanup**: Remove orphaned files and update statistics
+
+
+
+
+π§© Components Overview
+
+```
+βββββββββββββββββββββββ βββββββββββββββββββββββ
+β Source Plex β β Destination Plex β
+β Server β β Server β
+ββββββββββββ¬βββββββββββ ββββββββββββ¬βββββββββββ
+ β β
+ β SyncArr β
+ β βββββββββββββββββββ β
+ ββββββ€ Sync Orchestratorβββββ
+ βββββββββββ¬ββββββββ
+ β
+ βββββββββββββββββββΌββββββββββββββββββ
+ β β β
+ ββββββΌβββββ βββββββββΌβββββ ββββββββΌβββββββ
+ βContent β βFile Transferβ βMetadata β
+ βDiscoveryβ β (rsync) β βSynchronizer β
+ βββββββββββ βββββββββββββββ βββββββββββββββ
+```
+
+**Key Components:**
+
+- **π― Sync Orchestrator**: Coordinates the entire synchronization process
+- **π Content Discovery**: Finds labeled media using Plex API
+- **π File Transfer**: High-performance rsync with automatic directory creation
+- **π Metadata Synchronizer**: Comprehensive metadata and watched state sync
+- **π Plex Client**: Direct Plex API interactions with custom implementation
+- **βοΈ Configuration Manager**: Environment-based configuration management
+
+
+
+## π§ Development
+
+
+ποΈ Building from Source
+
+```bash
+# Clone the repository
+git clone https://github.com/nullable-eth/syncarr.git
+cd syncarr
+
+# Build the application
+go build -o syncarr ./cmd/syncarr
+
+# Run tests
+go test ./...
+
+# Build Docker image
+docker build -t syncarr:latest .
+
+# Run with development settings
+LOG_LEVEL=DEBUG DRY_RUN=true ./syncarr --oneshot
+```
+
+
+
+
+π Project Structure
+
+```
+syncarr/
+βββ cmd/syncarr/ # Main application entry point
+βββ internal/
+β βββ config/ # Configuration management
+β βββ discovery/ # Content discovery and matching
+β βββ logger/ # Structured logging
+β βββ metadata/ # Metadata synchronization
+β βββ orchestrator/ # Main sync coordination
+β βββ plex/ # Plex API client wrapper
+β βββ transfer/ # File transfer (rsync/scp)
+βββ pkg/types/ # Shared data types
+βββ docker/ # Docker configurations
+βββ scripts/ # Utility scripts
+βββ Dockerfile # Docker build configuration
+βββ README.md # This file
+```
+
+
+
+
+π€ Contributing
+
+We welcome contributions! Here's how to get started:
+
+1. **Fork the repository**
+2. **Create a feature branch**: `git checkout -b feature/amazing-feature`
+3. **Make your changes** and add tests
+4. **Run tests**: `go test ./...`
+5. **Build and test**: `docker build -t syncarr:test .`
+6. **Commit changes**: `git commit -m 'Add amazing feature'`
+7. **Push to branch**: `git push origin feature/amazing-feature`
+8. **Open a Pull Request**
+
+**Development Guidelines:**
+
+- Follow Go best practices and `gofmt` formatting
+- Add tests for new functionality
+- Update documentation for user-facing changes
+- Use structured logging with appropriate levels
+
+
+
+## π Troubleshooting
+
+
+π§ Common Issues
+
+### SSH Authentication Failed
+
+```json
+{"level":"error","msg":"Permission denied (publickey,password)"}
+```
+
+**Solutions:**
+
+- Verify SSH credentials are correct
+- Ensure SSH user has access to destination paths
+- Test SSH connection manually: `ssh user@destination-server`
+- For password auth: Ensure `OPT_SSH_PASSWORD` is set
+- For key auth: Ensure private key is mounted and `OPT_SSH_KEY_PATH` is correct
+
+### Rsync Not Found
+
+```json
+{"level":"error","msg":"rsync: command not found"}
+```
+
+**Solutions:**
+
+- The Docker image includes rsync by default
+- If building custom image, ensure rsync is installed
+- Check container logs for rsync availability
+
+### Directory Creation Failed
+
+```json
+{"level":"error","msg":"Failed to create destination directory"}
+```
+
+**Solutions:**
+
+- Verify SSH user has write permissions on destination server
+- Check `DEST_ROOT_DIR` path exists and is accessible
+- Ensure sufficient disk space on destination
+
+### Plex Token Invalid
+
+```json
+{"level":"error","msg":"Unauthorized: Invalid token"}
+```
+
+**Solutions:**
+
+- Regenerate Plex token following the guide above
+- Verify token has access to required libraries
+- Check Plex server is accessible from container
+
+
+
+
+π Debug Mode
+
+Enable detailed logging for troubleshooting:
+
+```yaml
+environment:
+ LOG_LEVEL: "DEBUG"
+ DRY_RUN: "true" # Test without making changes
+```
+
+**Debug logs include:**
+
+- Individual file transfer progress
+- SSH command execution details
+- Plex API request/response details
+- Metadata comparison results
+- Directory creation attempts
+
+
+
+## π License
+
+This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
+
+## π Acknowledgments
+
+- **Plex API**: Direct integration with Plex Media Server API
+- **[logrus](https://github.com/sirupsen/logrus)**: Structured logging framework
+- **Go SSH Libraries**: Secure file transfer capabilities
+- **rsync**: High-performance file synchronization
+- **Docker**: Containerization and deployment
diff --git a/cmd/syncarr/main.go b/cmd/syncarr/main.go
new file mode 100644
index 0000000..2cdf757
--- /dev/null
+++ b/cmd/syncarr/main.go
@@ -0,0 +1,113 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/nullable-eth/syncarr/internal/config"
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/internal/orchestrator"
+)
+
+var (
+ version = "dev"
+ commit = "unknown"
+ date = "unknown"
+)
+
+func main() {
+ // Command line flags
+ var (
+ showVersion = flag.Bool("version", false, "Show version information")
+ validateOnly = flag.Bool("validate", false, "Validate configuration and exit")
+ oneShot = flag.Bool("oneshot", false, "Run sync once and exit (don't run continuously)")
+ forceFullSync = flag.Bool("force-full-sync", false, "Force a complete synchronization, bypassing incremental checks")
+ )
+ flag.Parse()
+
+ if *showVersion {
+ fmt.Printf("SyncArr %s (commit: %s, built: %s)\n", version, commit, date)
+ os.Exit(0)
+ }
+
+ // Load configuration
+ cfg, err := config.LoadConfig()
+ if err != nil {
+ log.Fatalf("Failed to load configuration: %v", err)
+ }
+
+ // Override force full sync if specified via command line
+ if *forceFullSync {
+ cfg.ForceFullSync = true
+ }
+
+ // Validate configuration
+ if err := cfg.Validate(); err != nil {
+ log.Fatalf("Configuration validation failed: %v", err)
+ }
+
+ if *validateOnly {
+ fmt.Println("Configuration is valid")
+ os.Exit(0)
+ }
+
+ // Initialize logger
+ log := logger.New(cfg.LogLevel)
+
+ log.WithFields(map[string]interface{}{
+ "version": version,
+ "commit": commit,
+ "build_date": date,
+ "source_host": cfg.Source.Host,
+ "destination_host": cfg.Destination.Host,
+ "sync_label": cfg.SyncLabel,
+ "force_full_sync": cfg.ForceFullSync,
+ "dry_run": cfg.DryRun,
+ }).Info("SyncArr starting up")
+
+ // Create sync orchestrator
+ sync, err := orchestrator.NewSyncOrchestrator(cfg, log)
+ if err != nil {
+ log.WithError(err).Fatal("Failed to create sync orchestrator")
+ }
+ defer func() {
+ if err := sync.Close(); err != nil {
+ log.WithError(err).Error("Failed to close sync orchestrator")
+ }
+ }()
+
+ // Handle force full sync
+ if err := sync.HandleForceFullSync(); err != nil {
+ log.WithError(err).Fatal("Failed to handle force full sync")
+ }
+
+ // Set up signal handling for graceful shutdown
+ sigChan := make(chan os.Signal, 1)
+ signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
+
+ // Run sync
+ if *oneShot {
+ log.Info("Running single synchronization cycle")
+ if err := sync.RunSyncCycle(); err != nil {
+ log.WithError(err).Fatal("Sync failed")
+ }
+ log.Info("Single sync completed successfully")
+ } else {
+ // Run continuous sync in a goroutine
+ go func() {
+ if err := sync.RunContinuous(); err != nil {
+ log.WithError(err).Error("Continuous sync failed")
+ }
+ }()
+
+ // Wait for shutdown signal
+ sig := <-sigChan
+ log.WithField("signal", sig.String()).Info("Received shutdown signal, stopping...")
+ }
+
+ log.Info("SyncArr shutdown complete")
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..c4b670b
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,16 @@
+module github.com/nullable-eth/syncarr
+
+go 1.22
+
+toolchain go1.23.3
+
+require (
+ github.com/pkg/sftp v1.13.6
+ github.com/sirupsen/logrus v1.9.3
+ golang.org/x/crypto v0.26.0
+)
+
+require (
+ github.com/kr/fs v0.1.0 // indirect
+ golang.org/x/sys v0.23.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..6a80e99
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,56 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
+github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/internal/config/config.go b/internal/config/config.go
new file mode 100644
index 0000000..f899659
--- /dev/null
+++ b/internal/config/config.go
@@ -0,0 +1,244 @@
+package config
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Config represents the main application configuration
+type Config struct {
+ Source PlexServerConfig `json:"source"`
+ Destination PlexServerConfig `json:"destination"`
+ SyncLabel string `json:"syncLabel"`
+ SourceReplaceFrom string `json:"sourceReplaceFrom"` // Optional: Source path pattern to replace (e.g., "/data/Movies")
+ SourceReplaceTo string `json:"sourceReplaceTo"` // Optional: Local path replacement (e.g., "M:\\Movies")
+ DestRootDir string `json:"destRootDir"` // Required: Destination root path (e.g., "/mnt/data/Movies")
+ Interval time.Duration `json:"interval"`
+ SSH SSHConfig `json:"ssh"`
+ Performance PerformanceConfig `json:"performance"`
+ Transfer TransferConfig `json:"transfer"`
+ ForceFullSync bool `json:"forceFullSync"`
+ DryRun bool `json:"dryRun"`
+ LogLevel string `json:"logLevel"`
+}
+
+// PlexServerConfig represents Plex server configuration
+// Updated to include RequireHTTPS
+// Protocol is derived from RequireHTTPS
+// Removed FilterConfig and BandwidthConfig
+type PlexServerConfig struct {
+ Host string `json:"host"`
+ Port string `json:"port"`
+ Token string `json:"token"`
+ Protocol string `json:"protocol"` // http/https
+ RequireHTTPS bool `json:"requireHttps"`
+}
+
+// SSHConfig represents SSH connection configuration
+type SSHConfig struct {
+ User string `json:"user"`
+ Password string `json:"password"`
+ Port string `json:"port"`
+ KeyPath string `json:"keyPath,omitempty"` // Optional, for future key-based auth
+ StrictHostKeyCheck bool `json:"strictHostKeyCheck"` // Whether to enforce host key verification
+ KnownHostsFile string `json:"knownHostsFile,omitempty"` // Path to known_hosts file
+}
+
+// PerformanceConfig represents performance-related configuration
+type PerformanceConfig struct {
+ WorkerPoolSize int `json:"workerPoolSize"`
+ PlexAPIRateLimit float64 `json:"plexApiRateLimit"`
+ TransferBufferSize int `json:"transferBufferSize"`
+ MaxConcurrentTransfers int `json:"maxConcurrentTransfers"`
+}
+
+// TransferConfig represents transfer-related configuration
+type TransferConfig struct {
+ EnableCompression bool `json:"enableCompression"`
+ ResumeTransfers bool `json:"resumeTransfers"`
+}
+
+// LoadConfig loads configuration from environment variables
+func LoadConfig() (*Config, error) {
+ config := &Config{
+ Source: PlexServerConfig{
+ Host: getEnvWithDefault("SOURCE_PLEX_HOST", ""),
+ Port: getEnvWithDefault("SOURCE_PLEX_PORT", "32400"),
+ Token: getEnvWithDefault("SOURCE_PLEX_TOKEN", ""),
+ RequireHTTPS: parseBoolEnv("SOURCE_PLEX_REQUIRES_HTTPS", true),
+ Protocol: "https",
+ },
+ Destination: PlexServerConfig{
+ Host: getEnvWithDefault("DEST_PLEX_HOST", ""),
+ Port: getEnvWithDefault("DEST_PLEX_PORT", "32400"),
+ Token: getEnvWithDefault("DEST_PLEX_TOKEN", ""),
+ RequireHTTPS: parseBoolEnv("DEST_PLEX_REQUIRES_HTTPS", true),
+ Protocol: "https",
+ },
+ SyncLabel: getEnvWithDefault("SYNC_LABEL", ""),
+ SourceReplaceFrom: getEnvWithDefault("SOURCE_REPLACE_FROM", ""),
+ SourceReplaceTo: getEnvWithDefault("SOURCE_REPLACE_TO", ""),
+ DestRootDir: getEnvWithDefault("DEST_ROOT_DIR", ""),
+ SSH: SSHConfig{
+ User: getEnvWithDefault("OPT_SSH_USER", ""),
+ Password: getEnvWithDefault("OPT_SSH_PASSWORD", ""),
+ Port: getEnvWithDefault("OPT_SSH_PORT", "22"),
+ KeyPath: getEnvWithDefault("OPT_SSH_KEY_PATH", ""), // Keep for future use
+ },
+ DryRun: parseBoolEnv("DRY_RUN", false),
+ LogLevel: getEnvWithDefault("LOG_LEVEL", "INFO"),
+ ForceFullSync: parseBoolEnv("FORCE_FULL_SYNC", false),
+ }
+
+ // Set protocol based on RequireHTTPS
+ if !config.Source.RequireHTTPS {
+ config.Source.Protocol = "http"
+ }
+ if !config.Destination.RequireHTTPS {
+ config.Destination.Protocol = "http"
+ }
+
+ // Parse interval
+ intervalStr := getEnvWithDefault("SYNC_INTERVAL", "60")
+ intervalMinutes, err := strconv.Atoi(intervalStr)
+ if err != nil {
+ return nil, fmt.Errorf("invalid SYNC_INTERVAL: %w", err)
+ }
+ config.Interval = time.Duration(intervalMinutes) * time.Minute
+
+ // Parse performance configuration
+ config.Performance = PerformanceConfig{
+ WorkerPoolSize: int(parseIntEnv("WORKER_POOL_SIZE", 4)),
+ PlexAPIRateLimit: parseFloatEnv("PLEX_API_RATE_LIMIT", 10.0),
+ TransferBufferSize: int(parseIntEnv("TRANSFER_BUFFER_SIZE", 64)) * 1024, // Convert KB to bytes
+ MaxConcurrentTransfers: int(parseIntEnv("MAX_CONCURRENT_TRANSFERS", 3)),
+ }
+
+ // Parse transfer configuration
+ config.Transfer = TransferConfig{
+ EnableCompression: parseBoolEnv("ENABLE_COMPRESSION", true),
+ ResumeTransfers: parseBoolEnv("RESUME_TRANSFERS", true),
+ }
+
+ // Validate required fields
+ if err := config.Validate(); err != nil {
+ return nil, fmt.Errorf("configuration validation failed: %w", err)
+ }
+
+ return config, nil
+}
+
+// Validate checks if the configuration is valid
+func (c *Config) Validate() error {
+ if c.Source.Host == "" {
+ return fmt.Errorf("SOURCE_PLEX_HOST is required")
+ }
+ if c.Source.Token == "" {
+ return fmt.Errorf("SOURCE_PLEX_TOKEN is required")
+ }
+ if c.Destination.Host == "" {
+ return fmt.Errorf("DEST_PLEX_HOST is required")
+ }
+ if c.Destination.Token == "" {
+ return fmt.Errorf("DEST_PLEX_TOKEN is required")
+ }
+ if c.SyncLabel == "" {
+ return fmt.Errorf("SYNC_LABEL is required")
+ }
+
+ // SSH is optional - if not provided, run in metadata-only mode
+ // No validation required for SSH fields
+
+ // Validate path mapping configuration
+ // Source replacement is optional, but if one is provided, both must be provided
+ sourceReplaceProvided := c.SourceReplaceFrom != "" || c.SourceReplaceTo != ""
+ sourceBothProvided := c.SourceReplaceFrom != "" && c.SourceReplaceTo != ""
+
+ if sourceReplaceProvided && !sourceBothProvided {
+ return fmt.Errorf("if source path replacement is desired, both SOURCE_REPLACE_FROM and SOURCE_REPLACE_TO must be provided")
+ }
+
+ // DEST_ROOT_DIR is required if SSH is configured (file transfer mode)
+ sshConfigured := c.SSH.User != "" && c.SSH.Password != ""
+ if sshConfigured && c.DestRootDir == "" {
+ return fmt.Errorf("DEST_ROOT_DIR is required when SSH is configured for file transfer")
+ }
+
+ // Validate log level
+ validLogLevels := []string{"DEBUG", "INFO", "WARN", "ERROR"}
+ isValidLogLevel := false
+ for _, level := range validLogLevels {
+ if c.LogLevel == level {
+ isValidLogLevel = true
+ break
+ }
+ }
+ if !isValidLogLevel {
+ return fmt.Errorf("invalid LOG_LEVEL: %s (must be one of: %s)", c.LogLevel, strings.Join(validLogLevels, ", "))
+ }
+
+ // Validate performance settings
+ if c.Performance.WorkerPoolSize < 1 {
+ return fmt.Errorf("WORKER_POOL_SIZE must be at least 1")
+ }
+ if c.Performance.PlexAPIRateLimit <= 0 {
+ return fmt.Errorf("PLEX_API_RATE_LIMIT must be greater than 0")
+ }
+ if c.Performance.TransferBufferSize < 1024 {
+ return fmt.Errorf("TRANSFER_BUFFER_SIZE must be at least 1KB")
+ }
+ if c.Performance.MaxConcurrentTransfers < 1 {
+ return fmt.Errorf("MAX_CONCURRENT_TRANSFERS must be at least 1")
+ }
+
+ return nil
+}
+
+// GetSourceURL returns the full URL for the source Plex server
+func (c *Config) GetSourceURL() string {
+ return fmt.Sprintf("%s://%s:%s", c.Source.Protocol, c.Source.Host, c.Source.Port)
+}
+
+// GetDestinationURL returns the full URL for the destination Plex server
+func (c *Config) GetDestinationURL() string {
+ return fmt.Sprintf("%s://%s:%s", c.Destination.Protocol, c.Destination.Host, c.Destination.Port)
+}
+
+// Helper functions for parsing environment variables
+
+func getEnvWithDefault(key, defaultValue string) string {
+ if value := os.Getenv(key); value != "" {
+ return value
+ }
+ return defaultValue
+}
+
+func parseBoolEnv(key string, defaultValue bool) bool {
+ if value := os.Getenv(key); value != "" {
+ if parsed, err := strconv.ParseBool(value); err == nil {
+ return parsed
+ }
+ }
+ return defaultValue
+}
+
+func parseIntEnv(key string, defaultValue int64) int64 {
+ if value := os.Getenv(key); value != "" {
+ if parsed, err := strconv.ParseInt(value, 10, 64); err == nil {
+ return parsed
+ }
+ }
+ return defaultValue
+}
+
+func parseFloatEnv(key string, defaultValue float64) float64 {
+ if value := os.Getenv(key); value != "" {
+ if parsed, err := strconv.ParseFloat(value, 64); err == nil {
+ return parsed
+ }
+ }
+ return defaultValue
+}
diff --git a/internal/config/config_test.go b/internal/config/config_test.go
new file mode 100644
index 0000000..cbdd9b5
--- /dev/null
+++ b/internal/config/config_test.go
@@ -0,0 +1,173 @@
+package config
+
+import (
+ "os"
+ "testing"
+ "time"
+)
+
+func TestLoadConfig(t *testing.T) {
+ // Set up test environment variables
+ testEnvVars := map[string]string{
+ "SOURCE_PLEX_HOST": "test-source.local",
+ "SOURCE_PLEX_PORT": "32400",
+ "SOURCE_PLEX_TOKEN": "test-source-token",
+ "SOURCE_PLEX_PROTOCOL": "http",
+ "DEST_PLEX_HOST": "test-dest.local",
+ "DEST_PLEX_PORT": "32400",
+ "DEST_PLEX_TOKEN": "test-dest-token",
+ "DEST_PLEX_PROTOCOL": "http",
+ "SYNC_LABEL": "test-sync",
+ "SYNC_INTERVAL": "30",
+ "OPT_SSH_USER": "testuser",
+ "OPT_SSH_KEY_PATH": "/test/keys/id_rsa",
+ "DEST_ROOT_DIR": "/test/dest",
+ "LOG_LEVEL": "DEBUG",
+ "DRY_RUN": "true",
+ "FORCE_FULL_SYNC": "false",
+ }
+
+ // Set environment variables
+ for key, value := range testEnvVars {
+ os.Setenv(key, value)
+ defer os.Unsetenv(key)
+ }
+
+ // Load configuration
+ cfg, err := LoadConfig()
+ if err != nil {
+ t.Fatalf("LoadConfig() failed: %v", err)
+ }
+
+ // Test source configuration
+ if cfg.Source.Host != "test-source.local" {
+ t.Errorf("Expected source host 'test-source.local', got '%s'", cfg.Source.Host)
+ }
+
+ if cfg.Source.Token != "test-source-token" {
+ t.Errorf("Expected source token 'test-source-token', got '%s'", cfg.Source.Token)
+ }
+
+ // Test destination configuration
+ if cfg.Destination.Host != "test-dest.local" {
+ t.Errorf("Expected destination host 'test-dest.local', got '%s'", cfg.Destination.Host)
+ }
+
+ // Test sync configuration
+ if cfg.SyncLabel != "test-sync" {
+ t.Errorf("Expected sync label 'test-sync', got '%s'", cfg.SyncLabel)
+ }
+
+ expectedInterval := 30 * time.Minute
+ if cfg.Interval != expectedInterval {
+ t.Errorf("Expected interval %v, got %v", expectedInterval, cfg.Interval)
+ }
+
+ // Test SSH configuration
+ if cfg.SSH.User != "testuser" {
+ t.Errorf("Expected SSH user 'testuser', got '%s'", cfg.SSH.User)
+ }
+
+ if cfg.SSH.KeyPath != "/test/keys/id_rsa" {
+ t.Errorf("Expected SSH key path '/test/keys/id_rsa', got '%s'", cfg.SSH.KeyPath)
+ }
+
+ // Test boolean flags
+ if !cfg.DryRun {
+ t.Error("Expected DryRun to be true")
+ }
+
+ if cfg.ForceFullSync {
+ t.Error("Expected ForceFullSync to be false")
+ }
+
+ // Test log level
+ if cfg.LogLevel != "DEBUG" {
+ t.Errorf("Expected log level 'DEBUG', got '%s'", cfg.LogLevel)
+ }
+}
+
+func TestConfigValidation(t *testing.T) {
+ tests := []struct {
+ name string
+ config Config
+ wantError bool
+ }{
+ {
+ name: "valid config",
+ config: Config{
+ Source: PlexServerConfig{
+ Host: "source.local",
+ Port: "32400",
+ Token: "source-token",
+ Protocol: "http",
+ },
+ Destination: PlexServerConfig{
+ Host: "dest.local",
+ Port: "32400",
+ Token: "dest-token",
+ Protocol: "http",
+ },
+ SyncLabel: "sync",
+ Interval: time.Hour,
+ SSH: SSHConfig{
+ User: "user",
+ KeyPath: "/keys/id_rsa",
+ },
+ LogLevel: "INFO",
+ Performance: PerformanceConfig{
+ WorkerPoolSize: 4,
+ PlexAPIRateLimit: 10.0,
+ TransferBufferSize: 65536,
+ MaxConcurrentTransfers: 3,
+ },
+ },
+ wantError: false,
+ },
+ {
+ name: "missing source host",
+ config: Config{
+ Source: PlexServerConfig{
+ Host: "", // Missing
+ Port: "32400",
+ Token: "source-token",
+ Protocol: "http",
+ },
+ Destination: PlexServerConfig{
+ Host: "dest.local",
+ Port: "32400",
+ Token: "dest-token",
+ Protocol: "http",
+ },
+ },
+ wantError: true,
+ },
+ {
+ name: "missing destination token",
+ config: Config{
+ Source: PlexServerConfig{
+ Host: "source.local",
+ Port: "32400",
+ Token: "source-token",
+ Protocol: "http",
+ },
+ Destination: PlexServerConfig{
+ Host: "dest.local",
+ Port: "32400",
+ Token: "", // Missing
+ Protocol: "http",
+ },
+ },
+ wantError: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.config.Validate()
+ if (err != nil) != tt.wantError {
+ t.Errorf("Config.Validate() error = %v, wantError %v", err, tt.wantError)
+ }
+ })
+ }
+}
diff --git a/internal/discovery/content_matcher.go b/internal/discovery/content_matcher.go
new file mode 100644
index 0000000..8bb45ab
--- /dev/null
+++ b/internal/discovery/content_matcher.go
@@ -0,0 +1,250 @@
+package discovery
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/internal/plex"
+)
+
+// ContentMatcher handles Phase 5: Content Matching
+type ContentMatcher struct {
+ sourceClient *plex.Client
+ destClient *plex.Client
+ logger *logger.Logger
+}
+
+// ItemMatch represents a matched item between source and destination with full metadata
+type ItemMatch struct {
+ SourceItem *EnhancedMediaItem
+ DestItem *EnhancedMediaItem
+ Filename string
+}
+
+// NewContentMatcher creates a new content matcher
+func NewContentMatcher(sourceClient, destClient *plex.Client, log *logger.Logger) *ContentMatcher {
+ return &ContentMatcher{
+ sourceClient: sourceClient,
+ destClient: destClient,
+ logger: log,
+ }
+}
+
+// MatchItemsByFilename implements Phase 5: Content Matching by filename with full metadata
+func (cm *ContentMatcher) MatchItemsByFilename(sourceItems []*EnhancedMediaItem) ([]ItemMatch, error) {
+ cm.logger.Info("Phase 5: Starting enhanced content matching by filename with full metadata loading")
+
+ // Get all items from destination server and load their full metadata
+ destLibraries, err := cm.destClient.GetLibraries()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get destination libraries: %w", err)
+ }
+
+ var allDestItems []*EnhancedMediaItem
+ for _, library := range destLibraries {
+ cm.logger.WithFields(map[string]interface{}{
+ "library_id": library.Key,
+ "library_title": library.Title,
+ }).Debug("Retrieving items from destination library with full metadata")
+
+ // Get library content using Key
+ items, err := cm.destClient.GetLibraryContent(library.Key)
+ if err != nil {
+ cm.logger.WithError(err).WithField("library_id", library.Key).Warn("Failed to get content from destination library")
+ continue
+ }
+
+ // Load full metadata for each destination item
+ for i, item := range items {
+ cm.logger.WithFields(map[string]interface{}{
+ "progress": fmt.Sprintf("%d/%d", i+1, len(items)),
+ "library": library.Title,
+ }).Debug("Loading full metadata for destination item")
+
+ enhancedItem, err := cm.loadDestinationFullMetadata(item, library.Key, library.Type)
+ if err != nil {
+ cm.logger.WithError(err).WithField("item", fmt.Sprintf("%T", item)).Debug("Failed to load full metadata for destination item")
+ continue
+ }
+
+ if enhancedItem != nil {
+ allDestItems = append(allDestItems, enhancedItem)
+ }
+ }
+ }
+
+ // Build filename index for destination items with enhanced metadata
+ destFileIndex := make(map[string]*EnhancedMediaItem)
+ for _, enhancedItem := range allDestItems {
+ // Extract file paths from the enhanced item
+ filePaths := cm.extractEnhancedFilePaths(enhancedItem)
+ for _, filePath := range filePaths {
+ filename := filepath.Base(filePath)
+ if filename != "" {
+ destFileIndex[filename] = enhancedItem
+ }
+ }
+ }
+
+ cm.logger.WithFields(map[string]interface{}{
+ "dest_items": len(allDestItems),
+ "indexed_files": len(destFileIndex),
+ }).Info("Built enhanced destination file index with full metadata")
+
+ // Match source items to destination items
+ var matches []ItemMatch
+ for _, sourceEnhanced := range sourceItems {
+ // Extract file paths from source enhanced item
+ sourceFilePaths := cm.extractEnhancedFilePaths(sourceEnhanced)
+
+ for _, sourceFilePath := range sourceFilePaths {
+ sourceFilename := filepath.Base(sourceFilePath)
+ if sourceFilename == "" {
+ continue
+ }
+
+ // Look for exact filename match
+ if destEnhanced, exists := destFileIndex[sourceFilename]; exists {
+ match := ItemMatch{
+ SourceItem: sourceEnhanced,
+ DestItem: destEnhanced,
+ Filename: sourceFilename,
+ }
+ matches = append(matches, match)
+
+ cm.logger.WithFields(map[string]interface{}{
+ "filename": sourceFilename,
+ "source_item": cm.getEnhancedItemTitle(sourceEnhanced),
+ "dest_item": cm.getEnhancedItemTitle(destEnhanced),
+ }).Debug("Found enhanced filename match with full metadata")
+
+ break // Only match once per source item
+ }
+ }
+ }
+
+ cm.logger.WithFields(map[string]interface{}{
+ "source_items": len(sourceItems),
+ "matches": len(matches),
+ }).Info("Enhanced content matching with full metadata complete")
+
+ return matches, nil
+}
+
+// extractFilePaths extracts file paths from metadata
+func (cm *ContentMatcher) extractFilePaths(item interface{}) []string {
+ var paths []string
+
+ switch v := item.(type) {
+ case plex.Movie:
+ for _, media := range v.Media {
+ for _, part := range media.Part {
+ if part.File != "" {
+ paths = append(paths, part.File)
+ }
+ }
+ }
+ case plex.TVShow:
+ for _, media := range v.Media {
+ for _, part := range media.Part {
+ if part.File != "" {
+ paths = append(paths, part.File)
+ }
+ }
+ }
+ case plex.Episode:
+ for _, media := range v.Media {
+ for _, part := range media.Part {
+ if part.File != "" {
+ paths = append(paths, part.File)
+ }
+ }
+ }
+ }
+
+ return paths
+}
+
+// getItemTitle safely extracts title from an item
+func (cm *ContentMatcher) getItemTitle(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.Title
+ case plex.TVShow:
+ return v.Title
+ case plex.Episode:
+ return v.Title
+ default:
+ return "unknown"
+ }
+}
+
+// loadDestinationFullMetadata loads complete metadata for a destination item
+func (cm *ContentMatcher) loadDestinationFullMetadata(item interface{}, libraryID, libraryType string) (*EnhancedMediaItem, error) {
+ // Get the rating key from the basic item
+ ratingKey := cm.getRatingKey(item)
+ if ratingKey == "" {
+ return nil, fmt.Errorf("item has no rating key")
+ }
+
+ // Load full metadata based on item type
+ switch item.(type) {
+ case plex.Movie:
+ fullMovie, err := cm.destClient.GetMovieDetails(ratingKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load full destination movie metadata: %w", err)
+ }
+ return &EnhancedMediaItem{
+ Item: *fullMovie,
+ LibraryID: libraryID,
+ ItemType: "movie",
+ }, nil
+
+ case plex.TVShow:
+ fullTVShow, err := cm.destClient.GetTVShowDetails(ratingKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load full destination TV show metadata: %w", err)
+ }
+ return &EnhancedMediaItem{
+ Item: *fullTVShow,
+ LibraryID: libraryID,
+ ItemType: "show",
+ }, nil
+
+ case plex.Episode:
+ // For episodes, use the basic item for now
+ return &EnhancedMediaItem{
+ Item: item,
+ LibraryID: libraryID,
+ ItemType: "episode",
+ }, nil
+
+ default:
+ return nil, fmt.Errorf("unsupported destination item type: %T", item)
+ }
+}
+
+// getRatingKey safely extracts rating key from any item type
+func (cm *ContentMatcher) getRatingKey(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.RatingKey.String()
+ case plex.TVShow:
+ return v.RatingKey.String()
+ case plex.Episode:
+ return v.RatingKey.String()
+ default:
+ return ""
+ }
+}
+
+// extractEnhancedFilePaths extracts file paths from an enhanced media item
+func (cm *ContentMatcher) extractEnhancedFilePaths(enhancedItem *EnhancedMediaItem) []string {
+ return cm.extractFilePaths(enhancedItem.Item)
+}
+
+// getEnhancedItemTitle safely extracts title from an enhanced media item
+func (cm *ContentMatcher) getEnhancedItemTitle(enhancedItem *EnhancedMediaItem) string {
+ return cm.getItemTitle(enhancedItem.Item)
+}
diff --git a/internal/discovery/discovery.go b/internal/discovery/discovery.go
new file mode 100644
index 0000000..ed4bb54
--- /dev/null
+++ b/internal/discovery/discovery.go
@@ -0,0 +1,203 @@
+package discovery
+
+import (
+ "fmt"
+
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/internal/plex"
+)
+
+// EnhancedMediaItem wraps Plex media items with library context and full metadata
+type EnhancedMediaItem struct {
+ Item interface{} // plex.Movie, plex.TVShow, or plex.Episode with FULL metadata
+ LibraryID string // Library ID for API operations
+ ItemType string // "movie", "show", "episode"
+}
+
+// ContentDiscovery implements Phase 1: Complete Library Scanning
+type ContentDiscovery struct {
+ sourceClient *plex.Client
+ syncLabel string
+ logger *logger.Logger
+}
+
+// NewContentDiscovery creates a new content discovery instance
+func NewContentDiscovery(sourceClient *plex.Client, syncLabel string, logger *logger.Logger) *ContentDiscovery {
+ return &ContentDiscovery{
+ sourceClient: sourceClient,
+ syncLabel: syncLabel,
+ logger: logger,
+ }
+}
+
+// DiscoverSyncableContent implements Phase 1 and 2 from the implementation plan:
+// 1. List all items from all libraries on the source server with FULL metadata
+// 2. If any movie contains the sync tag, add it to the processing list with complete metadata
+// If any TV show contains the sync label, list all episodes of all seasons and add them with complete metadata
+func (cd *ContentDiscovery) DiscoverSyncableContent() ([]*EnhancedMediaItem, error) {
+ cd.logger.Info("Phase 1: Starting enhanced content discovery with full metadata loading")
+
+ var itemsToSync []*EnhancedMediaItem
+
+ // Get all libraries from source server
+ libraries, err := cd.sourceClient.GetLibraries()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get libraries: %w", err)
+ }
+
+ cd.logger.WithField("library_count", len(libraries)).Info("Retrieved libraries from source server")
+
+ for _, library := range libraries {
+ cd.logger.WithFields(map[string]interface{}{
+ "library_id": library.Key,
+ "library_title": library.Title,
+ }).Debug("Scanning library for content with full metadata")
+
+ // Get all items from this library with basic info first
+ labeledItems, err := cd.sourceClient.GetItemsWithLabel(library.Key, cd.syncLabel)
+ if err != nil {
+ cd.logger.WithError(err).WithFields(map[string]interface{}{
+ "library_id": library.Key,
+ "sync_label": cd.syncLabel,
+ }).Warn("Failed to get items with label")
+ continue
+ }
+
+ cd.logger.WithFields(map[string]interface{}{
+ "library_id": library.Key,
+ "sync_label": cd.syncLabel,
+ "labeled_items": len(labeledItems),
+ }).Info("Retrieved items with sync label, now loading full metadata")
+
+ for i, item := range labeledItems {
+ cd.logger.WithFields(map[string]interface{}{
+ "progress": fmt.Sprintf("%d/%d", i+1, len(labeledItems)),
+ "library": library.Title,
+ }).Debug("Loading full metadata for item")
+
+ enhancedItem, err := cd.loadFullMetadata(item, library.Key, library.Type)
+ if err != nil {
+ cd.logger.WithError(err).WithField("item", fmt.Sprintf("%T", item)).Warn("Failed to load full metadata for item")
+ continue
+ }
+
+ if enhancedItem != nil {
+ itemsToSync = append(itemsToSync, enhancedItem)
+ cd.logger.WithFields(map[string]interface{}{
+ "title": cd.getItemTitle(enhancedItem.Item),
+ "item_type": enhancedItem.ItemType,
+ "library_id": enhancedItem.LibraryID,
+ }).Debug("Added item with full metadata to sync list")
+ }
+ }
+ }
+
+ cd.logger.WithField("total_items_to_sync", len(itemsToSync)).Info("Phase 1 & 2: Enhanced content discovery with full metadata complete")
+
+ return itemsToSync, nil
+}
+
+// GetItemFilePaths extracts file paths from a media item
+func (cd *ContentDiscovery) GetItemFilePaths(item interface{}) ([]string, error) {
+ var filePaths []string
+
+ switch v := item.(type) {
+ case plex.Movie:
+ for _, media := range v.Media {
+ for _, part := range media.Part {
+ if part.File != "" {
+ filePaths = append(filePaths, part.File)
+ }
+ }
+ }
+ case plex.TVShow:
+ for _, media := range v.Media {
+ for _, part := range media.Part {
+ if part.File != "" {
+ filePaths = append(filePaths, part.File)
+ }
+ }
+ }
+ }
+
+ return filePaths, nil
+}
+
+// loadFullMetadata loads complete metadata for an item including all labels, genres, etc.
+func (cd *ContentDiscovery) loadFullMetadata(item interface{}, libraryID, libraryType string) (*EnhancedMediaItem, error) {
+ // Get the rating key from the basic item
+ ratingKey := cd.getRatingKey(item)
+ if ratingKey == "" {
+ return nil, fmt.Errorf("item has no rating key")
+ }
+
+ // Load full metadata based on item type
+ switch item.(type) {
+ case plex.Movie:
+ fullMovie, err := cd.sourceClient.GetMovieDetails(ratingKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load full movie metadata: %w", err)
+ }
+ return &EnhancedMediaItem{
+ Item: *fullMovie,
+ LibraryID: libraryID,
+ ItemType: "movie",
+ }, nil
+
+ case plex.TVShow:
+ fullTVShow, err := cd.sourceClient.GetTVShowDetails(ratingKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load full TV show metadata: %w", err)
+ }
+ return &EnhancedMediaItem{
+ Item: *fullTVShow,
+ LibraryID: libraryID,
+ ItemType: "show",
+ }, nil
+
+ case plex.Episode:
+ // For episodes, we could add a GetEpisodeDetails method if needed
+ // For now, episodes from GetItemsWithLabel should have sufficient metadata
+ return &EnhancedMediaItem{
+ Item: item,
+ LibraryID: libraryID,
+ ItemType: "episode",
+ }, nil
+
+ default:
+ return nil, fmt.Errorf("unsupported item type: %T", item)
+ }
+}
+
+// getRatingKey safely extracts rating key from any item type
+func (cd *ContentDiscovery) getRatingKey(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.RatingKey.String()
+ case plex.TVShow:
+ return v.RatingKey.String()
+ case plex.Episode:
+ return v.RatingKey.String()
+ default:
+ return ""
+ }
+}
+
+// getItemTitle safely extracts title from any item type
+func (cd *ContentDiscovery) getItemTitle(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.Title
+ case plex.TVShow:
+ return v.Title
+ case plex.Episode:
+ return v.Title
+ default:
+ return "unknown"
+ }
+}
+
+// GetEnhancedItemFilePaths extracts file paths from an enhanced media item
+func (cd *ContentDiscovery) GetEnhancedItemFilePaths(enhancedItem *EnhancedMediaItem) ([]string, error) {
+ return cd.GetItemFilePaths(enhancedItem.Item)
+}
diff --git a/internal/discovery/library_manager.go b/internal/discovery/library_manager.go
new file mode 100644
index 0000000..7449388
--- /dev/null
+++ b/internal/discovery/library_manager.go
@@ -0,0 +1,213 @@
+package discovery
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/internal/plex"
+)
+
+// LibraryManager handles Phase 4: Library refresh and monitoring
+type LibraryManager struct {
+ destClient *plex.Client
+ logger *logger.Logger
+}
+
+// NewLibraryManager creates a new library manager
+func NewLibraryManager(destClient *plex.Client, log *logger.Logger) *LibraryManager {
+ return &LibraryManager{
+ destClient: destClient,
+ logger: log,
+ }
+}
+
+// TriggerRefreshAndWait triggers library scans and waits for completion
+func (lm *LibraryManager) TriggerRefreshAndWait() error {
+ lm.logger.Info("Phase 4: Triggering library refresh on destination server")
+
+ // First, wait for any existing scans to complete before starting new ones
+ lm.logger.Debug("Checking for existing library scans before starting new ones")
+ if err := lm.waitForExistingScansComplete(); err != nil {
+ lm.logger.WithError(err).Warn("Failed to wait for existing scans, proceeding anyway")
+ }
+
+ // Get all destination libraries
+ libraries, err := lm.destClient.GetLibraries()
+ if err != nil {
+ return fmt.Errorf("failed to get destination libraries: %w", err)
+ }
+
+ lm.logger.WithField("library_count", len(libraries)).Info("Triggering scans for all libraries")
+
+ // Track which libraries we successfully triggered scans for
+ var successfulScans []plex.Library
+ var failedScans []string
+
+ // Trigger scan for each library
+ for _, library := range libraries {
+ lm.logger.WithFields(map[string]interface{}{
+ "library_id": library.Key,
+ "library_title": library.Title,
+ "library_type": library.Type,
+ }).Debug("Triggering library scan")
+
+ if err := lm.destClient.TriggerLibraryScan(library.Key); err != nil {
+ lm.logger.WithError(err).WithFields(map[string]interface{}{
+ "library_id": library.Key,
+ "library_title": library.Title,
+ }).Error("Failed to trigger library scan")
+ failedScans = append(failedScans, library.Title)
+ continue
+ }
+
+ successfulScans = append(successfulScans, library)
+ }
+
+ // Log summary of scan triggers
+ lm.logger.WithFields(map[string]interface{}{
+ "successful_scans": len(successfulScans),
+ "failed_scans": len(failedScans),
+ "total_libraries": len(libraries),
+ }).Info("Library scan trigger summary")
+
+ if len(failedScans) > 0 {
+ lm.logger.WithField("failed_libraries", failedScans).Warn("Some library scans failed to trigger")
+ }
+
+ // If no scans were successfully triggered, don't wait
+ if len(successfulScans) == 0 {
+ return fmt.Errorf("failed to trigger any library scans")
+ }
+
+ // Monitor scan completion for successfully triggered scans
+ return lm.waitForAllScansComplete(successfulScans)
+}
+
+// waitForExistingScansComplete waits for any existing library scans to complete
+func (lm *LibraryManager) waitForExistingScansComplete() error {
+ lm.logger.Debug("Checking for existing library scan activities")
+
+ scanInProgress, activities, err := lm.destClient.IsLibraryScanInProgress()
+ if err != nil {
+ return fmt.Errorf("failed to check existing scan status: %w", err)
+ }
+
+ if !scanInProgress {
+ lm.logger.Debug("No existing library scans in progress")
+ return nil
+ }
+
+ lm.logger.WithField("active_scans", len(activities)).Info("Waiting for existing library scans to complete before starting new ones")
+
+ const maxExistingWaitTime = 5 * time.Minute
+ startTime := time.Now()
+
+ for {
+ if time.Since(startTime) > maxExistingWaitTime {
+ lm.logger.Warn("Timed out waiting for existing scans to complete, proceeding anyway")
+ return nil
+ }
+
+ scanInProgress, activities, err := lm.destClient.IsLibraryScanInProgress()
+ if err != nil {
+ lm.logger.WithError(err).Warn("Error checking existing scan status")
+ return nil
+ }
+
+ if !scanInProgress {
+ lm.logger.Info("Existing library scans completed")
+ return nil
+ }
+
+ lm.logger.WithField("remaining_scans", len(activities)).Debug("Still waiting for existing scans to complete")
+ time.Sleep(10 * time.Second)
+ }
+}
+
+// waitForAllScansComplete monitors all library scans until completion
+func (lm *LibraryManager) waitForAllScansComplete(libraries []plex.Library) error {
+ lm.logger.Info("Monitoring library scan completion using Plex activities API")
+
+ const (
+ pollInterval = 5 * time.Second // Check every 5 seconds
+ maxWaitTime = 10 * time.Minute // Maximum wait time
+ progressLogTime = 30 * time.Second // Log progress every 30 seconds
+ )
+
+ startTime := time.Now()
+ lastProgressLog := time.Now()
+
+ for {
+ // Check if we've exceeded maximum wait time
+ if time.Since(startTime) > maxWaitTime {
+ lm.logger.WithField("max_wait_time", maxWaitTime).Warn("Library scan monitoring timed out")
+ return fmt.Errorf("library scan monitoring timed out after %v", maxWaitTime)
+ }
+
+ // Check if any library scans are still in progress
+ scanInProgress, activities, err := lm.destClient.IsLibraryScanInProgress()
+ if err != nil {
+ lm.logger.WithError(err).Warn("Failed to check library scan status, continuing to wait")
+ time.Sleep(pollInterval)
+ continue
+ }
+
+ // If no scans in progress, we're done
+ if !scanInProgress {
+ duration := time.Since(startTime)
+ lm.logger.WithFields(map[string]interface{}{
+ "total_duration": duration,
+ "library_count": len(libraries),
+ }).Info("All library scans completed successfully")
+ return nil
+ }
+
+ // Log progress periodically at INFO level, but log individual checks at DEBUG
+ if time.Since(lastProgressLog) >= progressLogTime {
+ lm.logger.WithFields(map[string]interface{}{
+ "active_scans": len(activities),
+ "elapsed": time.Since(startTime).Round(time.Second),
+ }).Info("Library scans still in progress")
+ lm.logScanProgress(activities)
+ lastProgressLog = time.Now()
+ } else {
+ // Log individual checks at DEBUG level
+ lm.logger.WithFields(map[string]interface{}{
+ "active_scans": len(activities),
+ "elapsed": time.Since(startTime).Round(time.Second),
+ }).Debug("Checking library scan status")
+ }
+
+ // Wait before next check
+ time.Sleep(pollInterval)
+ }
+}
+
+// logScanProgress logs the current progress of library scans
+func (lm *LibraryManager) logScanProgress(activities []plex.Activity) {
+ if len(activities) == 0 {
+ return
+ }
+
+ lm.logger.WithField("active_scans", len(activities)).Debug("Library scans in progress...")
+
+ for _, activity := range activities {
+ fields := map[string]interface{}{
+ "activity_uuid": activity.UUID,
+ "title": activity.Title,
+ "progress": fmt.Sprintf("%d%%", activity.Progress),
+ }
+
+ // Add library ID if context is available
+ if activity.Context != nil && activity.Context.LibrarySectionID != "" {
+ fields["library_id"] = activity.Context.LibrarySectionID
+ }
+
+ if activity.Subtitle != "" {
+ fields["subtitle"] = activity.Subtitle
+ }
+
+ lm.logger.WithFields(fields).Debug("Scan progress")
+ }
+}
diff --git a/internal/logger/logger.go b/internal/logger/logger.go
new file mode 100644
index 0000000..71a6762
--- /dev/null
+++ b/internal/logger/logger.go
@@ -0,0 +1,253 @@
+package logger
+
+import (
+ "os"
+ "time"
+
+ "github.com/nullable-eth/syncarr/pkg/types"
+ "github.com/sirupsen/logrus"
+)
+
+// Logger wraps logrus with our custom functionality
+type Logger struct {
+ *logrus.Logger
+}
+
+// New creates a new logger with the specified log level
+func New(level string) *Logger {
+ logger := logrus.New()
+
+ // Set log level
+ logLevel, err := logrus.ParseLevel(level)
+ if err != nil {
+ logLevel = logrus.InfoLevel
+ }
+ logger.SetLevel(logLevel)
+
+ // Set formatter
+ logger.SetFormatter(&logrus.JSONFormatter{
+ TimestampFormat: time.RFC3339,
+ })
+
+ // Set output
+ logger.SetOutput(os.Stdout)
+
+ return &Logger{Logger: logger}
+}
+
+// LogSyncStart logs the beginning of a sync cycle
+func (l *Logger) LogSyncStart(itemCount int) {
+ l.WithFields(logrus.Fields{
+ "event": "sync_start",
+ "item_count": itemCount,
+ }).Info("Starting sync cycle")
+}
+
+// LogItemProcessed logs successful processing of a media item
+func (l *Logger) LogItemProcessed(item types.SyncableItem, duration time.Duration) {
+ l.WithFields(logrus.Fields{
+ "event": "item_processed",
+ "rating_key": item.RatingKey,
+ "title": item.Title,
+ "duration_ms": duration.Milliseconds(),
+ }).Debug("Media item processed successfully")
+}
+
+// LogItemSkipped logs when a media item was skipped (unchanged)
+func (l *Logger) LogItemSkipped(item types.SyncableItem, reason string) {
+ l.WithFields(logrus.Fields{
+ "event": "item_skipped",
+ "rating_key": item.RatingKey,
+ "title": item.Title,
+ "reason": reason,
+ }).Debug("Media item skipped")
+}
+
+// LogTransferStarted logs when a file transfer begins
+func (l *Logger) LogTransferStarted(sourcePath, destPath string, sizeBytes int64) {
+ l.WithFields(logrus.Fields{
+ "event": "transfer_started",
+ "source_path": sourcePath,
+ "dest_path": destPath,
+ "size_bytes": sizeBytes,
+ }).Info("File transfer started")
+}
+
+// LogTransferCompleted logs when a file transfer completes
+func (l *Logger) LogTransferCompleted(sourcePath, destPath string, sizeBytes int64, duration time.Duration) {
+ l.WithFields(logrus.Fields{
+ "event": "transfer_completed",
+ "source_path": sourcePath,
+ "dest_path": destPath,
+ "size_bytes": sizeBytes,
+ "duration_ms": duration.Milliseconds(),
+ "rate_mbps": float64(sizeBytes*8) / float64(duration.Seconds()) / 1000000, // Mbps
+ }).Info("File transfer completed")
+}
+
+// LogError logs an error with context
+func (l *Logger) LogError(err error, context map[string]interface{}) {
+ fields := logrus.Fields{
+ "event": "error",
+ "error": err.Error(),
+ }
+
+ // Add context fields
+ for k, v := range context {
+ fields[k] = v
+ }
+
+ l.WithFields(fields).Error("An error occurred")
+}
+
+// LogSyncError logs a sync-specific error
+func (l *Logger) LogSyncError(syncErr types.SyncError) {
+ l.WithFields(logrus.Fields{
+ "event": "sync_error",
+ "error_type": syncErr.Type,
+ "message": syncErr.Message,
+ "item": syncErr.Item,
+ "library_id": syncErr.LibraryID,
+ "recoverable": syncErr.Recoverable,
+ }).Error("Sync error occurred")
+}
+
+// LogSyncComplete logs the completion of a sync cycle
+func (l *Logger) LogSyncComplete(stats types.SyncStats) {
+ l.WithFields(logrus.Fields{
+ "event": "sync_complete",
+ "items_processed": stats.ItemsProcessed,
+ "items_failed": stats.ItemsFailures,
+ "items_skipped": stats.ItemsSkipped,
+ "files_transferred": stats.FilesTransferred,
+ "bytes_transferred": stats.BytesTransferred,
+ "watched_states_synced": stats.WatchedStatesSynced,
+ "duration_ms": stats.Duration.Milliseconds(),
+ }).Info("Sync cycle completed")
+}
+
+// LogForceFullSync logs when force full sync is enabled
+func (l *Logger) LogForceFullSync() {
+ l.WithFields(logrus.Fields{
+ "event": "force_full_sync",
+ }).Info("Force full sync enabled - clearing all sync state")
+}
+
+// LogStateCleared logs when sync state is cleared
+func (l *Logger) LogStateCleared() {
+ l.WithFields(logrus.Fields{
+ "event": "state_cleared",
+ }).Info("Sync state cleared successfully")
+}
+
+// LogLibraryScanTriggered logs when a library scan is triggered
+func (l *Logger) LogLibraryScanTriggered(libraryID, libraryName string) {
+ l.WithFields(logrus.Fields{
+ "event": "library_scan_triggered",
+ "library_id": libraryID,
+ "library_name": libraryName,
+ }).Info("Library scan triggered")
+}
+
+// LogLibraryScanCompleted logs when a library scan completes
+func (l *Logger) LogLibraryScanCompleted(libraryID, libraryName string, duration time.Duration) {
+ l.WithFields(logrus.Fields{
+ "event": "library_scan_completed",
+ "library_id": libraryID,
+ "library_name": libraryName,
+ "duration_ms": duration.Milliseconds(),
+ }).Info("Library scan completed")
+}
+
+// LogWatchedStateSync logs watched state synchronization
+func (l *Logger) LogWatchedStateSync(ratingKey, title string, sourceWatched, destWatched bool) {
+ l.WithFields(logrus.Fields{
+ "event": "watched_state_sync",
+ "rating_key": ratingKey,
+ "title": title,
+ "source_watched": sourceWatched,
+ "dest_watched": destWatched,
+ }).Debug("Watched state synchronized")
+}
+
+// LogRetryAttempt logs a retry attempt
+func (l *Logger) LogRetryAttempt(operation string, attempt int, maxAttempts int, err error) {
+ l.WithFields(logrus.Fields{
+ "event": "retry_attempt",
+ "operation": operation,
+ "attempt": attempt,
+ "max_attempts": maxAttempts,
+ "error": err.Error(),
+ }).Warn("Retrying operation after error")
+}
+
+// LogDeadLetterQueue logs when an item is added to the dead letter queue
+func (l *Logger) LogDeadLetterQueue(item types.FailedItem) {
+ l.WithFields(logrus.Fields{
+ "event": "dead_letter_queue",
+ "rating_key": item.Item.RatingKey,
+ "title": item.Item.Title,
+ "error": item.Error,
+ "retry_count": item.RetryCount,
+ "max_retries": item.MaxRetries,
+ "next_retry": item.NextRetryTime,
+ "permanent": item.Permanent,
+ }).Warn("Item added to dead letter queue")
+}
+
+// LogWorkerPoolStarted logs when the worker pool starts
+func (l *Logger) LogWorkerPoolStarted(workerCount int) {
+ l.WithFields(logrus.Fields{
+ "event": "worker_pool_started",
+ "worker_count": workerCount,
+ }).Info("Worker pool started")
+}
+
+// LogWorkerPoolStopped logs when the worker pool stops
+func (l *Logger) LogWorkerPoolStopped() {
+ l.WithFields(logrus.Fields{
+ "event": "worker_pool_stopped",
+ }).Info("Worker pool stopped")
+}
+
+// LogRateLimitHit logs when rate limiting is triggered
+func (l *Logger) LogRateLimitHit(service string, waitTime time.Duration) {
+ l.WithFields(logrus.Fields{
+ "event": "rate_limit_hit",
+ "service": service,
+ "wait_time": waitTime.Milliseconds(),
+ }).Debug("Rate limit hit, waiting")
+}
+
+// LogBandwidthThrottled logs when bandwidth throttling is applied
+func (l *Logger) LogBandwidthThrottled(currentRate, limitRate float64) {
+ l.WithFields(logrus.Fields{
+ "event": "bandwidth_throttled",
+ "current_mbps": currentRate,
+ "limit_mbps": limitRate,
+ }).Debug("Bandwidth throttling applied")
+}
+
+// LogCompressionUsed logs when compression is used for a transfer
+func (l *Logger) LogCompressionUsed(filePath string, originalSize, compressedSize int64, algorithm string) {
+ compressionRatio := float64(compressedSize) / float64(originalSize)
+ l.WithFields(logrus.Fields{
+ "event": "compression_used",
+ "file_path": filePath,
+ "original_size": originalSize,
+ "compressed_size": compressedSize,
+ "compression_ratio": compressionRatio,
+ "algorithm": algorithm,
+ }).Debug("Compression applied to transfer")
+}
+
+// LogTransferResumed logs when a transfer is resumed
+func (l *Logger) LogTransferResumed(filePath string, resumePosition int64, totalSize int64) {
+ l.WithFields(logrus.Fields{
+ "event": "transfer_resumed",
+ "file_path": filePath,
+ "resume_position": resumePosition,
+ "total_size": totalSize,
+ "progress_pct": float64(resumePosition) / float64(totalSize) * 100,
+ }).Info("File transfer resumed")
+}
diff --git a/internal/metadata/sync.go b/internal/metadata/sync.go
new file mode 100644
index 0000000..120d96e
--- /dev/null
+++ b/internal/metadata/sync.go
@@ -0,0 +1,539 @@
+package metadata
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/nullable-eth/syncarr/internal/discovery"
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/internal/plex"
+)
+
+// Synchronizer handles metadata synchronization between source and destination Plex servers
+type Synchronizer struct {
+ sourceClient *plex.Client
+ destClient *plex.Client
+ logger *logger.Logger
+}
+
+// NewSynchronizer creates a new metadata synchronizer
+func NewSynchronizer(sourceClient, destClient *plex.Client, logger *logger.Logger) *Synchronizer {
+ return &Synchronizer{
+ sourceClient: sourceClient,
+ destClient: destClient,
+ logger: logger,
+ }
+}
+
+// SyncMetadata synchronizes metadata for a single media item using concrete plex types
+func (s *Synchronizer) SyncMetadata(sourceItem interface{}, destRatingKey string) error {
+ // Extract rating key and title from concrete plex types
+ sourceRatingKey := s.getItemRatingKey(sourceItem)
+ itemTitle := s.getItemTitle(sourceItem)
+
+ if sourceRatingKey == "" {
+ return fmt.Errorf("source item has no rating key")
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "source_rating_key": sourceRatingKey,
+ "dest_rating_key": destRatingKey,
+ "title": itemTitle,
+ }).Debug("Starting comprehensive metadata synchronization")
+
+ var syncErrors []string
+
+ // Sync watched state
+ if err := s.syncWatchedState(sourceRatingKey, destRatingKey); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync watched state")
+ syncErrors = append(syncErrors, fmt.Sprintf("watched state: %v", err))
+ }
+
+ // Sync metadata based on item type
+ switch sourceItem := sourceItem.(type) {
+ case plex.Movie:
+ if err := s.syncMovieMetadata(sourceItem, destRatingKey); err != nil {
+ syncErrors = append(syncErrors, fmt.Sprintf("movie metadata: %v", err))
+ }
+ case plex.TVShow:
+ if err := s.syncTVShowMetadata(sourceItem, destRatingKey); err != nil {
+ syncErrors = append(syncErrors, fmt.Sprintf("TV show metadata: %v", err))
+ }
+ default:
+ s.logger.WithField("item_type", fmt.Sprintf("%T", sourceItem)).Debug("Unsupported item type for comprehensive sync")
+ syncErrors = append(syncErrors, "unsupported item type")
+ }
+
+ if len(syncErrors) > 0 {
+ s.logger.WithFields(map[string]interface{}{
+ "source_rating_key": sourceRatingKey,
+ "dest_rating_key": destRatingKey,
+ "errors": syncErrors,
+ }).Warn("Some metadata synchronization operations failed")
+ return fmt.Errorf("metadata sync partially failed: %v", syncErrors)
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "source_rating_key": sourceRatingKey,
+ "dest_rating_key": destRatingKey,
+ }).Debug("Comprehensive metadata synchronization completed")
+
+ return nil
+}
+
+// SyncEnhancedMetadata synchronizes comprehensive metadata using enhanced items with library context
+func (s *Synchronizer) SyncEnhancedMetadata(sourceEnhanced, destEnhanced *discovery.EnhancedMediaItem) error {
+ sourceRatingKey := s.getItemRatingKey(sourceEnhanced.Item)
+ destRatingKey := s.getItemRatingKey(destEnhanced.Item)
+ itemTitle := s.getItemTitle(sourceEnhanced.Item)
+
+ if sourceRatingKey == "" || destRatingKey == "" {
+ return fmt.Errorf("source or destination item has no rating key")
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "source_rating_key": sourceRatingKey,
+ "dest_rating_key": destRatingKey,
+ "source_library": sourceEnhanced.LibraryID,
+ "dest_library": destEnhanced.LibraryID,
+ "title": itemTitle,
+ }).Debug("Starting enhanced metadata synchronization with library context")
+
+ var syncErrors []string
+
+ // Sync watched state
+ if err := s.syncWatchedState(sourceRatingKey, destRatingKey); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync watched state")
+ syncErrors = append(syncErrors, fmt.Sprintf("watched state: %v", err))
+ }
+
+ // Sync metadata based on item type with library context
+ switch sourceItem := sourceEnhanced.Item.(type) {
+ case plex.Movie:
+ if err := s.syncEnhancedMovieMetadata(sourceItem, destRatingKey, destEnhanced.LibraryID); err != nil {
+ syncErrors = append(syncErrors, fmt.Sprintf("enhanced movie metadata: %v", err))
+ }
+ case plex.TVShow:
+ if err := s.syncEnhancedTVShowMetadata(sourceItem, destRatingKey, destEnhanced.LibraryID); err != nil {
+ syncErrors = append(syncErrors, fmt.Sprintf("enhanced TV show metadata: %v", err))
+ }
+ default:
+ s.logger.WithField("item_type", fmt.Sprintf("%T", sourceEnhanced.Item)).Debug("Unsupported item type for enhanced sync")
+ syncErrors = append(syncErrors, "unsupported item type")
+ }
+
+ if len(syncErrors) > 0 {
+ s.logger.WithFields(map[string]interface{}{
+ "source_rating_key": sourceRatingKey,
+ "dest_rating_key": destRatingKey,
+ "errors": syncErrors,
+ }).Warn("Some enhanced metadata synchronization operations failed")
+ return fmt.Errorf("enhanced metadata sync partially failed: %v", syncErrors)
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "source_rating_key": sourceRatingKey,
+ "dest_rating_key": destRatingKey,
+ }).Debug("Enhanced metadata synchronization completed")
+
+ return nil
+}
+
+// syncWatchedState synchronizes watched state between source and destination
+func (s *Synchronizer) syncWatchedState(sourceRatingKey, destRatingKey string) error {
+ // Get watched state from source
+ sourceWatchedState, err := s.sourceClient.GetWatchedState(sourceRatingKey)
+ if err != nil {
+ return fmt.Errorf("failed to get source watched state: %w", err)
+ }
+
+ // Get watched state from destination
+ destWatchedState, err := s.destClient.GetWatchedState(destRatingKey)
+ if err != nil {
+ return fmt.Errorf("failed to get destination watched state: %w", err)
+ }
+
+ // Determine which state is more recent and sync accordingly
+ syncToDest := false
+ syncToSource := false
+
+ // If source is watched but destination is not, sync to destination
+ if sourceWatchedState.Watched && !destWatchedState.Watched {
+ if destWatchedState.LastViewedAt == 0 ||
+ sourceWatchedState.LastViewedAt > destWatchedState.LastViewedAt {
+ syncToDest = true
+ }
+ }
+
+ // If destination is watched but source is not, sync to source
+ if !sourceWatchedState.Watched && destWatchedState.Watched {
+ if sourceWatchedState.LastViewedAt == 0 ||
+ destWatchedState.LastViewedAt > sourceWatchedState.LastViewedAt {
+ syncToSource = true
+ }
+ }
+
+ // If both are watched, sync the one with the higher view count or more recent date
+ if sourceWatchedState.Watched && destWatchedState.Watched {
+ if sourceWatchedState.ViewCount > destWatchedState.ViewCount {
+ syncToDest = true
+ } else if destWatchedState.ViewCount > sourceWatchedState.ViewCount {
+ syncToSource = true
+ } else if sourceWatchedState.LastViewedAt > destWatchedState.LastViewedAt {
+ syncToDest = true
+ } else if destWatchedState.LastViewedAt > sourceWatchedState.LastViewedAt {
+ syncToSource = true
+ }
+ }
+
+ // Perform synchronization
+ if syncToDest {
+ if err := s.destClient.SetWatchedState(destRatingKey, sourceWatchedState.Watched); err != nil {
+ return fmt.Errorf("failed to sync watched state to destination: %w", err)
+ }
+ s.logger.LogWatchedStateSync(destRatingKey, "", sourceWatchedState.Watched, destWatchedState.Watched)
+ }
+
+ if syncToSource {
+ if err := s.sourceClient.SetWatchedState(sourceRatingKey, destWatchedState.Watched); err != nil {
+ return fmt.Errorf("failed to sync watched state to source: %w", err)
+ }
+ s.logger.LogWatchedStateSync(sourceRatingKey, "", destWatchedState.Watched, sourceWatchedState.Watched)
+ }
+
+ return nil
+}
+
+// syncMovieMetadata synchronizes all movie-specific metadata fields
+func (s *Synchronizer) syncMovieMetadata(sourceMovie plex.Movie, destRatingKey string) error {
+ var errors []string
+
+ // We need the library ID for some operations - for now we'll skip operations that require it
+ // TODO: Pass library ID through the sync chain
+
+ // Sync user rating
+ if sourceMovie.UserRating.Value > 0 {
+ if err := s.destClient.SetUserRating(destRatingKey, sourceMovie.UserRating.Value); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync user rating")
+ errors = append(errors, fmt.Sprintf("user rating: %v", err))
+ }
+ }
+
+ // Sync labels (requires library ID - skip for now)
+ if len(sourceMovie.Label) > 0 {
+ s.logger.Debug("Label sync requires library ID - skipping for now")
+ // labels := s.extractMovieLabels(sourceMovie)
+ // if err := s.destClient.SetLabels(destRatingKey, libraryID, labels); err != nil {
+ // errors = append(errors, fmt.Sprintf("labels: %v", err))
+ // }
+ }
+
+ if len(errors) > 0 {
+ return fmt.Errorf("movie metadata sync errors: %v", errors)
+ }
+
+ s.logger.WithField("dest_rating_key", destRatingKey).Debug("Movie metadata sync completed")
+ return nil
+}
+
+// syncTVShowMetadata synchronizes all TV show-specific metadata fields
+func (s *Synchronizer) syncTVShowMetadata(sourceTVShow plex.TVShow, destRatingKey string) error {
+ var errors []string
+
+ // Sync user rating
+ if sourceTVShow.UserRating.Value > 0 {
+ if err := s.destClient.SetUserRating(destRatingKey, sourceTVShow.UserRating.Value); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync user rating")
+ errors = append(errors, fmt.Sprintf("user rating: %v", err))
+ }
+ }
+
+ // Sync labels (requires library ID - skip for now)
+ if len(sourceTVShow.Label) > 0 {
+ s.logger.Debug("Label sync requires library ID - skipping for now")
+ // labels := s.extractTVShowLabels(sourceTVShow)
+ // if err := s.destClient.SetLabels(destRatingKey, libraryID, labels); err != nil {
+ // errors = append(errors, fmt.Sprintf("labels: %v", err))
+ // }
+ }
+
+ if len(errors) > 0 {
+ return fmt.Errorf("TV show metadata sync errors: %v", errors)
+ }
+
+ s.logger.WithField("dest_rating_key", destRatingKey).Debug("TV show metadata sync completed")
+ return nil
+}
+
+// syncEnhancedMovieMetadata synchronizes all movie metadata fields with library context
+func (s *Synchronizer) syncEnhancedMovieMetadata(sourceMovie plex.Movie, destRatingKey, destLibraryID string) error {
+ var errors []string
+
+ // Sync user rating
+ if sourceMovie.UserRating.Value > 0 {
+ if err := s.destClient.SetUserRating(destRatingKey, sourceMovie.UserRating.Value); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync user rating")
+ errors = append(errors, fmt.Sprintf("user rating: %v", err))
+ } else {
+ s.logger.WithFields(map[string]interface{}{
+ "rating_key": destRatingKey,
+ "rating": sourceMovie.UserRating.Value,
+ }).Debug("Synced user rating")
+ }
+ }
+
+ // Sync labels - now we have the library ID!
+ if len(sourceMovie.Label) > 0 {
+ labels := s.extractMovieLabels(sourceMovie)
+ if err := s.destClient.SetLabels(destRatingKey, destLibraryID, labels); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync labels")
+ errors = append(errors, fmt.Sprintf("labels: %v", err))
+ } else {
+ s.logger.WithFields(map[string]interface{}{
+ "rating_key": destRatingKey,
+ "library_id": destLibraryID,
+ "labels": labels,
+ "label_count": len(labels),
+ }).Debug("Synced labels")
+ }
+ }
+
+ // Sync genres using the existing UpdateMediaField method
+ if len(sourceMovie.Genre) > 0 {
+ genres := s.extractMovieGenres(sourceMovie)
+ if err := s.destClient.UpdateMediaField(destRatingKey, destLibraryID, genres, "genre", "movie"); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync genres")
+ errors = append(errors, fmt.Sprintf("genres: %v", err))
+ } else {
+ s.logger.WithFields(map[string]interface{}{
+ "rating_key": destRatingKey,
+ "library_id": destLibraryID,
+ "genres": genres,
+ "genre_count": len(genres),
+ }).Debug("Synced genres")
+ }
+ }
+
+ if len(errors) > 0 {
+ return fmt.Errorf("enhanced movie metadata sync errors: %v", errors)
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "dest_rating_key": destRatingKey,
+ "dest_library_id": destLibraryID,
+ }).Debug("Enhanced movie metadata sync completed")
+ return nil
+}
+
+// syncEnhancedTVShowMetadata synchronizes all TV show metadata fields with library context
+func (s *Synchronizer) syncEnhancedTVShowMetadata(sourceTVShow plex.TVShow, destRatingKey, destLibraryID string) error {
+ var errors []string
+
+ // Sync user rating
+ if sourceTVShow.UserRating.Value > 0 {
+ if err := s.destClient.SetUserRating(destRatingKey, sourceTVShow.UserRating.Value); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync user rating")
+ errors = append(errors, fmt.Sprintf("user rating: %v", err))
+ } else {
+ s.logger.WithFields(map[string]interface{}{
+ "rating_key": destRatingKey,
+ "rating": sourceTVShow.UserRating.Value,
+ }).Debug("Synced user rating")
+ }
+ }
+
+ // Sync labels - now we have the library ID!
+ if len(sourceTVShow.Label) > 0 {
+ labels := s.extractTVShowLabels(sourceTVShow)
+ if err := s.destClient.SetLabels(destRatingKey, destLibraryID, labels); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync labels")
+ errors = append(errors, fmt.Sprintf("labels: %v", err))
+ } else {
+ s.logger.WithFields(map[string]interface{}{
+ "rating_key": destRatingKey,
+ "library_id": destLibraryID,
+ "labels": labels,
+ "label_count": len(labels),
+ }).Debug("Synced labels")
+ }
+ }
+
+ // Sync genres using the existing UpdateMediaField method
+ if len(sourceTVShow.Genre) > 0 {
+ genres := s.extractTVShowGenres(sourceTVShow)
+ if err := s.destClient.UpdateMediaField(destRatingKey, destLibraryID, genres, "genre", "show"); err != nil {
+ s.logger.WithError(err).Debug("Failed to sync genres")
+ errors = append(errors, fmt.Sprintf("genres: %v", err))
+ } else {
+ s.logger.WithFields(map[string]interface{}{
+ "rating_key": destRatingKey,
+ "library_id": destLibraryID,
+ "genres": genres,
+ "genre_count": len(genres),
+ }).Debug("Synced genres")
+ }
+ }
+
+ if len(errors) > 0 {
+ return fmt.Errorf("enhanced TV show metadata sync errors: %v", errors)
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "dest_rating_key": destRatingKey,
+ "dest_library_id": destLibraryID,
+ }).Debug("Enhanced TV show metadata sync completed")
+ return nil
+}
+
+// extractMovieLabels extracts label strings from a Movie
+func (s *Synchronizer) extractMovieLabels(movie plex.Movie) []string {
+ var labels []string
+ for _, label := range movie.Label {
+ labels = append(labels, label.Tag)
+ }
+ return labels
+}
+
+// extractTVShowLabels extracts label strings from a TV Show
+func (s *Synchronizer) extractTVShowLabels(tvshow plex.TVShow) []string {
+ var labels []string
+ for _, label := range tvshow.Label {
+ labels = append(labels, label.Tag)
+ }
+ return labels
+}
+
+// extractMovieGenres extracts genre strings from a Movie
+func (s *Synchronizer) extractMovieGenres(movie plex.Movie) []string {
+ var genres []string
+ for _, genre := range movie.Genre {
+ genres = append(genres, genre.Tag)
+ }
+ return genres
+}
+
+// extractTVShowGenres extracts genre strings from a TV Show
+func (s *Synchronizer) extractTVShowGenres(tvshow plex.TVShow) []string {
+ var genres []string
+ for _, genre := range tvshow.Genre {
+ genres = append(genres, genre.Tag)
+ }
+ return genres
+}
+
+// SyncBulkMetadata synchronizes metadata for multiple items using concrete plex types
+func (s *Synchronizer) SyncBulkMetadata(items []MetadataSync) error {
+ for i, item := range items {
+ itemTitle := s.getItemTitle(item.SourceItem)
+ sourceRatingKey := s.getItemRatingKey(item.SourceItem)
+
+ s.logger.WithFields(map[string]interface{}{
+ "progress": fmt.Sprintf("%d/%d", i+1, len(items)),
+ "title": itemTitle,
+ }).Debug("Processing metadata sync")
+
+ if err := s.SyncMetadata(item.SourceItem, item.DestRatingKey); err != nil {
+ s.logger.LogError(err, map[string]interface{}{
+ "source_rating_key": sourceRatingKey,
+ "dest_rating_key": item.DestRatingKey,
+ "title": itemTitle,
+ })
+ // Continue with other items even if one fails
+ }
+
+ // Small delay to avoid overwhelming the servers
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ return nil
+}
+
+// MetadataSync represents a metadata synchronization operation using concrete plex types
+type MetadataSync struct {
+ SourceItem interface{} `json:"sourceItem"` // Concrete plex types (Movie, TVShow, Episode)
+ DestRatingKey string `json:"destRatingKey"`
+}
+
+// ValidateMetadataConsistency checks if metadata is consistent between source and destination
+func (s *Synchronizer) ValidateMetadataConsistency(sourceRatingKey, destRatingKey string) (*ConsistencyReport, error) {
+ report := &ConsistencyReport{
+ SourceRatingKey: sourceRatingKey,
+ DestRatingKey: destRatingKey,
+ Issues: []string{},
+ Timestamp: time.Now(),
+ }
+
+ // Check watched state consistency
+ sourceWatched, err := s.sourceClient.GetWatchedState(sourceRatingKey)
+ if err != nil {
+ report.Issues = append(report.Issues, fmt.Sprintf("Failed to get source watched state: %v", err))
+ return report, nil
+ }
+
+ destWatched, err := s.destClient.GetWatchedState(destRatingKey)
+ if err != nil {
+ report.Issues = append(report.Issues, fmt.Sprintf("Failed to get destination watched state: %v", err))
+ return report, nil
+ }
+
+ if sourceWatched.Watched != destWatched.Watched {
+ report.Issues = append(report.Issues,
+ fmt.Sprintf("Watched state mismatch: source=%t, dest=%t",
+ sourceWatched.Watched, destWatched.Watched))
+ }
+
+ if abs(sourceWatched.ViewCount-destWatched.ViewCount) > 1 {
+ report.Issues = append(report.Issues,
+ fmt.Sprintf("View count mismatch: source=%d, dest=%d",
+ sourceWatched.ViewCount, destWatched.ViewCount))
+ }
+
+ report.IsConsistent = len(report.Issues) == 0
+ return report, nil
+}
+
+// ConsistencyReport represents the result of a metadata consistency check
+type ConsistencyReport struct {
+ SourceRatingKey string `json:"sourceRatingKey"`
+ DestRatingKey string `json:"destRatingKey"`
+ IsConsistent bool `json:"isConsistent"`
+ Issues []string `json:"issues"`
+ Timestamp time.Time `json:"timestamp"`
+}
+
+// Helper functions
+
+// getItemRatingKey safely extracts rating key from concrete plex types
+func (s *Synchronizer) getItemRatingKey(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.RatingKey.String()
+ case plex.TVShow:
+ return v.RatingKey.String()
+ case plex.Episode:
+ return v.RatingKey.String()
+ default:
+ return ""
+ }
+}
+
+// getItemTitle safely extracts title from concrete plex types
+func (s *Synchronizer) getItemTitle(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.Title
+ case plex.TVShow:
+ return v.Title
+ case plex.Episode:
+ return v.Title
+ default:
+ return "unknown"
+ }
+}
+
+func abs(x int) int {
+ if x < 0 {
+ return -x
+ }
+ return x
+}
diff --git a/internal/orchestrator/sync.go b/internal/orchestrator/sync.go
new file mode 100644
index 0000000..58ed09c
--- /dev/null
+++ b/internal/orchestrator/sync.go
@@ -0,0 +1,897 @@
+package orchestrator
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/nullable-eth/syncarr/internal/config"
+ "github.com/nullable-eth/syncarr/internal/discovery"
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/internal/metadata"
+ "github.com/nullable-eth/syncarr/internal/plex"
+ "github.com/nullable-eth/syncarr/internal/transfer"
+)
+
+// SyncOrchestrator coordinates the 6-phase synchronization process
+type SyncOrchestrator struct {
+ config *config.Config
+ logger *logger.Logger
+ sourceClient *plex.Client
+ destClient *plex.Client
+ contentDiscovery *discovery.ContentDiscovery
+ fileTransfer transfer.FileTransferrer
+ libraryManager *discovery.LibraryManager
+ contentMatcher *discovery.ContentMatcher
+ metadataSync *metadata.Synchronizer
+ lastSyncTime time.Time
+ syncedFiles map[string]bool // Track files that should exist on destination
+}
+
+// NewSyncOrchestrator creates a new sync orchestrator with all required components
+func NewSyncOrchestrator(cfg *config.Config, log *logger.Logger) (*SyncOrchestrator, error) {
+ orchestrator := &SyncOrchestrator{
+ config: cfg,
+ logger: log,
+ syncedFiles: make(map[string]bool),
+ }
+
+ // Initialize Plex clients
+ log.Info("Creating source Plex client")
+ sourceClient, err := plex.NewClient(&cfg.Source, log)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create source Plex client: %w", err)
+ }
+ orchestrator.sourceClient = sourceClient
+
+ log.Info("Creating destination Plex client")
+ destClient, err := plex.NewClient(&cfg.Destination, log)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create destination Plex client: %w", err)
+ }
+ orchestrator.destClient = destClient
+
+ // Initialize content discovery (Phase 1 & 2)
+ orchestrator.contentDiscovery = discovery.NewContentDiscovery(sourceClient, cfg.SyncLabel, log)
+
+ // Phase 3: Transfer Files - Auto-detect optimal transfer method
+ if isSSHConfigured(cfg.SSH, log) {
+ // Auto-detect optimal transfer method (rsync preferred for performance)
+ transferMethod := transfer.GetOptimalTransferMethod(log)
+ fileTransfer, err := transfer.NewTransferrer(transferMethod, cfg, log)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create file transferrer: %w", err)
+ }
+ orchestrator.fileTransfer = fileTransfer
+ log.WithField("transfer_method", string(transferMethod)).Info("High-performance file transfer enabled")
+ } else {
+ log.Info("SSH not configured - running in metadata-only sync mode")
+ }
+
+ // Initialize library manager (Phase 4)
+ orchestrator.libraryManager = discovery.NewLibraryManager(destClient, log)
+
+ // Initialize content matcher (Phase 5)
+ orchestrator.contentMatcher = discovery.NewContentMatcher(sourceClient, destClient, log)
+
+ // Initialize metadata synchronizer (Phase 6)
+ orchestrator.metadataSync = metadata.NewSynchronizer(sourceClient, destClient, log)
+
+ return orchestrator, nil
+}
+
+// Close closes all connections and resources
+func (s *SyncOrchestrator) Close() error {
+ var errs []error
+
+ if s.fileTransfer != nil {
+ if err := s.fileTransfer.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("failed to close transfer client: %w", err))
+ }
+ }
+
+ if len(errs) > 0 {
+ return fmt.Errorf("errors closing orchestrator: %v", errs)
+ }
+
+ return nil
+}
+
+// RunSyncCycle executes the complete 6-phase synchronization workflow
+func (s *SyncOrchestrator) RunSyncCycle() error {
+ startTime := time.Now()
+ s.logger.Info("Starting 6-phase synchronization cycle")
+
+ defer func() {
+ duration := time.Since(startTime)
+ s.logger.WithField("total_duration", duration).Info("Sync cycle completed successfully")
+ s.lastSyncTime = startTime
+ }()
+
+ // Pre-flight check: Test destination server availability
+ s.logger.Debug("Testing destination server availability")
+ if err := s.destClient.TestConnection(); err != nil {
+ s.logger.WithError(err).Warn("Destination Plex server is not available, skipping sync cycle")
+ return fmt.Errorf("destination server unavailable: %w", err)
+ }
+ s.logger.Info("Destination server is available, proceeding with sync")
+
+ // Phase 1 & 2: Content Discovery and Filtering with Full Metadata
+ s.logger.Info("Phase 1 & 2: Discovering and filtering syncable content with full metadata")
+ itemsToSync, err := s.contentDiscovery.DiscoverSyncableContent()
+ if err != nil {
+ return fmt.Errorf("content discovery failed: %w", err)
+ }
+ s.logger.WithField("item_count", len(itemsToSync)).Info("Enhanced content discovery complete")
+
+ if len(itemsToSync) == 0 {
+ s.logger.Info("No items found for synchronization")
+ return nil
+ }
+
+ // Phase 3: File Transfer (skip if SSH not configured)
+ if s.fileTransfer != nil {
+ s.logger.Info("Phase 3: Transferring files")
+
+ // Clear the synced files map for this cycle
+ s.syncedFiles = make(map[string]bool)
+
+ totalItems := len(itemsToSync)
+ var transferredCount, errorCount int
+
+ for i, enhancedItem := range itemsToSync {
+ s.logger.WithFields(map[string]interface{}{
+ "progress": fmt.Sprintf("%d/%d", i+1, totalItems),
+ "title": s.getEnhancedItemTitle(enhancedItem),
+ "library_id": enhancedItem.LibraryID,
+ }).Debug("Transferring enhanced item files")
+
+ if err := s.transferEnhancedItemFiles(enhancedItem); err != nil {
+ s.logger.WithError(err).WithField("item", s.getEnhancedItemTitle(enhancedItem)).Error("Failed to transfer enhanced item files")
+ errorCount++
+ continue
+ }
+ transferredCount++
+
+ // Log progress summary every 100 items or at significant milestones
+ if (i+1)%100 == 0 || (i+1) == totalItems || (i+1)%500 == 0 {
+ s.logger.WithFields(map[string]interface{}{
+ "completed": i + 1,
+ "total": totalItems,
+ "progress": fmt.Sprintf("%.1f%%", float64(i+1)/float64(totalItems)*100),
+ }).Debug("File transfer progress")
+ }
+ }
+
+ // Log final transfer summary
+ s.logger.WithFields(map[string]interface{}{
+ "total_items": totalItems,
+ "transferred": transferredCount,
+ "errors": errorCount,
+ "success_rate": fmt.Sprintf("%.1f%%", float64(transferredCount)/float64(totalItems)*100),
+ }).Debug("File transfer phase complete")
+
+ // Phase 3.5: Cleanup - Remove files on destination that aren't in current sync list
+ s.logger.Info("Phase 3.5: Cleaning up orphaned files on destination")
+ if err := s.cleanupOrphanedFiles(); err != nil {
+ s.logger.WithError(err).Warn("Failed to cleanup orphaned files, continuing")
+ } else {
+ s.logger.Info("Cleanup phase complete")
+ }
+
+ // Phase 4: Library Refresh and Monitoring (only needed after file transfer)
+ s.logger.Info("Phase 4: Refreshing destination libraries")
+ if err := s.libraryManager.TriggerRefreshAndWait(); err != nil {
+ return fmt.Errorf("library refresh failed: %w", err)
+ }
+ s.logger.Info("Library refresh complete")
+ } else {
+ s.logger.Info("Phase 3: Skipping file transfer (SSH not configured)")
+ s.logger.Info("Phase 4: Skipping library refresh (no files transferred)")
+ }
+
+ // Phase 5: Content Matching
+ s.logger.Info("Phase 5: Matching items by filename")
+ matches, err := s.contentMatcher.MatchItemsByFilename(itemsToSync)
+ if err != nil {
+ return fmt.Errorf("content matching failed: %w", err)
+ }
+ s.logger.WithFields(map[string]interface{}{
+ "source_items": len(itemsToSync),
+ "matches": len(matches),
+ "success_rate": fmt.Sprintf("%.1f%%", float64(len(matches))/float64(len(itemsToSync))*100),
+ }).Info("Content matching complete")
+
+ // Phase 6: Metadata Synchronization
+ s.logger.Info("Phase 6: Synchronizing metadata")
+ if len(matches) == 0 {
+ s.logger.Info("No matches found, skipping metadata synchronization")
+ } else {
+ success, errors, skipped := s.syncAllMetadata(matches)
+ s.logger.WithFields(map[string]interface{}{
+ "total": len(matches),
+ "success": success,
+ "errors": errors,
+ "skipped": skipped,
+ }).Info("Metadata synchronization complete")
+ }
+
+ s.logger.Info("π Sync cycle completed successfully!")
+ return nil
+}
+
+// transferEnhancedItemFiles handles file transfer for an enhanced item with path mapping
+func (s *SyncOrchestrator) transferEnhancedItemFiles(enhancedItem *discovery.EnhancedMediaItem) error {
+ // Extract file paths based on item type from the enhanced item
+ var filePaths []string
+
+ switch v := enhancedItem.Item.(type) {
+ case plex.Movie:
+ filePaths = s.extractMovieFilePaths(v)
+ case plex.TVShow:
+ // For TV shows, get all episodes and their file paths
+ episodes, err := s.sourceClient.GetAllTVShowEpisodes(v.RatingKey.String())
+ if err != nil {
+ return fmt.Errorf("failed to get episodes for TV show %s: %w", v.Title, err)
+ }
+ for _, episode := range episodes {
+ episodePaths := s.extractEpisodeFilePaths(episode)
+ filePaths = append(filePaths, episodePaths...)
+ }
+ case plex.Episode:
+ filePaths = s.extractEpisodeFilePaths(v)
+ default:
+ s.logger.WithField("item_type", fmt.Sprintf("%T", enhancedItem.Item)).Warn("Unknown enhanced item type for file transfer")
+ return nil
+ }
+
+ // Transfer each file with path mapping
+ for _, sourcePath := range filePaths {
+ if sourcePath == "" {
+ continue
+ }
+
+ // Map source Plex path to local path
+ localPath, err := s.fileTransfer.MapSourcePathToLocal(sourcePath)
+ if err != nil {
+ s.logger.WithError(err).WithField("source_path", sourcePath).Error("Failed to map source path to local path")
+ continue
+ }
+
+ // Check if local file exists
+ if _, err := os.Stat(localPath); os.IsNotExist(err) {
+ s.logger.WithField("local_path", localPath).Warn("Local file does not exist, skipping transfer")
+ continue
+ }
+
+ // Map local path to destination path
+ destPath, err := s.fileTransfer.MapLocalPathToDest(localPath)
+ if err != nil {
+ s.logger.WithError(err).WithField("local_path", localPath).Error("Failed to map local path to destination path")
+ continue
+ }
+
+ // Track this file as synced (should exist on destination) before transfer
+ s.syncedFiles[destPath] = true
+
+ // Transfer the file
+ if err := s.fileTransfer.TransferFile(localPath, destPath); err != nil {
+ s.logger.WithError(err).WithFields(map[string]interface{}{
+ "local_path": localPath,
+ "dest_path": destPath,
+ }).Error("Failed to transfer file")
+ continue
+ }
+
+ // Transfer completed successfully (detailed logging handled in transfer layer)
+ }
+
+ return nil
+}
+
+// findRelatedFiles finds all files in the same directory with the same prefix (up to first period)
+func (s *SyncOrchestrator) findRelatedFiles(mainFilePath string) []string {
+ var allPaths []string
+
+ // Always include the main file
+ allPaths = append(allPaths, mainFilePath)
+
+ // Get directory and filename
+ dir := filepath.Dir(mainFilePath)
+ filename := filepath.Base(mainFilePath)
+
+ // Extract prefix (up to first period)
+ dotIndex := strings.Index(filename, ".")
+ if dotIndex == -1 {
+ // No dot found, use the entire filename as prefix
+ return allPaths
+ }
+
+ prefix := filename[:dotIndex]
+
+ // Map source path to local path for directory listing
+ localDir, err := s.fileTransfer.MapSourcePathToLocal(dir)
+ if err != nil {
+ s.logger.WithError(err).WithField("source_dir", dir).Debug("Failed to map source directory to local path")
+ return allPaths
+ }
+
+ // List all files in the directory
+ entries, err := os.ReadDir(localDir)
+ if err != nil {
+ s.logger.WithError(err).WithField("local_dir", localDir).Debug("Failed to read directory for related files")
+ return allPaths
+ }
+
+ // Find files with matching prefix
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ entryName := entry.Name()
+ if strings.HasPrefix(entryName, prefix+".") && entryName != filename {
+ // Construct the full source path for the related file
+ relatedSourcePath := filepath.Join(dir, entryName)
+ allPaths = append(allPaths, relatedSourcePath)
+ s.logger.WithFields(map[string]interface{}{
+ "main_file": mainFilePath,
+ "related_file": relatedSourcePath,
+ }).Debug("Found related file")
+ }
+ }
+
+ return allPaths
+}
+
+// extractMovieFilePaths extracts file paths from a Movie and includes related files
+func (s *SyncOrchestrator) extractMovieFilePaths(movie plex.Movie) []string {
+ var paths []string
+ for _, media := range movie.Media {
+ for _, part := range media.Part {
+ if part.File != "" {
+ relatedFiles := s.findRelatedFiles(part.File)
+ paths = append(paths, relatedFiles...)
+ }
+ }
+ }
+ return paths
+}
+
+// extractEpisodeFilePaths extracts file paths from an Episode and includes related files
+func (s *SyncOrchestrator) extractEpisodeFilePaths(episode plex.Episode) []string {
+ var paths []string
+ for _, media := range episode.Media {
+ for _, part := range media.Part {
+ if part.File != "" {
+ relatedFiles := s.findRelatedFiles(part.File)
+ paths = append(paths, relatedFiles...)
+ }
+ }
+ }
+ return paths
+}
+
+// cleanupOrphanedFiles removes files on the destination that aren't in the current sync list
+func (s *SyncOrchestrator) cleanupOrphanedFiles() error {
+ if s.config.DestRootDir == "" {
+ s.logger.Debug("No destination root directory configured, skipping cleanup")
+ return nil
+ }
+
+ s.logger.WithField("dest_root", s.config.DestRootDir).Info("Scanning destination directory for orphaned files")
+
+ // Get list of all files in destination directory
+ destFiles, err := s.fileTransfer.ListDirectoryContents(s.config.DestRootDir)
+ if err != nil {
+ return fmt.Errorf("failed to list destination directory contents: %w", err)
+ }
+
+ orphanedCount := 0
+ for _, destFile := range destFiles {
+ // Check if this file is in our current sync list
+ if !s.syncedFiles[destFile] {
+ s.logger.WithField("orphaned_file", destFile).Debug("Removing orphaned file from destination")
+
+ if err := s.fileTransfer.DeleteFile(destFile); err != nil {
+ s.logger.WithError(err).WithField("file", destFile).Warn("Failed to delete orphaned file")
+ continue
+ }
+ orphanedCount++
+ }
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "synced_files": len(s.syncedFiles),
+ "dest_files": len(destFiles),
+ "orphaned_files": orphanedCount,
+ }).Debug("Cleanup phase statistics")
+
+ return nil
+}
+
+// syncAllMetadata implements Phase 6: Complete metadata transfer with comparison
+func (s *SyncOrchestrator) syncAllMetadata(matches []discovery.ItemMatch) (int, int, int) {
+ var successCount, errorCount, skippedCount int
+
+ for i, match := range matches {
+ s.logger.WithFields(map[string]interface{}{
+ "progress": fmt.Sprintf("%d/%d", i+1, len(matches)),
+ "filename": match.Filename,
+ "source": s.getEnhancedItemTitle(match.SourceItem),
+ "dest": s.getEnhancedItemTitle(match.DestItem),
+ }).Debug("Checking enhanced item metadata")
+
+ // Get destination rating key from enhanced item
+ destRatingKey := s.getEnhancedItemRatingKey(match.DestItem)
+ if destRatingKey == "" {
+ s.logger.WithField("filename", match.Filename).Warn("Could not get destination rating key for enhanced metadata sync")
+ errorCount++
+ continue
+ }
+
+ // Compare enhanced metadata before syncing - now we have full metadata for both items
+ needsSync, err := s.compareEnhancedMetadata(match.SourceItem, match.DestItem)
+ if err != nil {
+ s.logger.WithError(err).WithField("filename", match.Filename).Debug("Failed to compare enhanced metadata, will sync anyway")
+ needsSync = true // Default to syncing if comparison fails
+ }
+
+ if !needsSync {
+ s.logger.WithFields(map[string]interface{}{
+ "filename": match.Filename,
+ "source_key": s.getEnhancedItemRatingKey(match.SourceItem),
+ "dest_key": destRatingKey,
+ }).Debug("Enhanced metadata already synchronized, skipping")
+ skippedCount++
+ } else {
+ // Sync metadata using the enhanced metadata synchronizer
+ s.logger.WithFields(map[string]interface{}{
+ "filename": match.Filename,
+ "source_key": s.getEnhancedItemRatingKey(match.SourceItem),
+ "dest_key": destRatingKey,
+ }).Debug("Syncing enhanced metadata differences")
+
+ // if err := s.syncEnhancedItemMetadata(match.SourceItem, match.DestItem); err != nil {
+ // s.logger.WithError(err).WithField("filename", match.Filename).Error("Failed to sync enhanced metadata")
+ // errorCount++
+ // continue
+ // }
+ successCount++
+ }
+
+ // Log progress summary every 100 items or at significant milestones
+ if (i+1)%100 == 0 || (i+1) == len(matches) || (i+1)%500 == 0 {
+ s.logger.WithFields(map[string]interface{}{
+ "completed": i + 1,
+ "total": len(matches),
+ "progress": fmt.Sprintf("%.1f%%", float64(i+1)/float64(len(matches))*100),
+ }).Debug("Metadata sync progress")
+ }
+ }
+
+ // Log final metadata sync summary
+ s.logger.WithFields(map[string]interface{}{
+ "total_matches": len(matches),
+ "synced": successCount,
+ "skipped": skippedCount,
+ "errors": errorCount,
+ "sync_rate": fmt.Sprintf("%.1f%%", float64(successCount)/float64(len(matches))*100),
+ }).Debug("Metadata synchronization complete")
+
+ return successCount, errorCount, skippedCount
+}
+
+// compareMetadata compares comprehensive metadata between source and destination items
+
+// findMetadataDifferences compares two metadata items and returns a list of differences
+func (s *SyncOrchestrator) findMetadataDifferences(sourceItem, destItem interface{}, sourceKey, destKey string) []string {
+ var differences []string
+
+ // Handle Movie comparison
+ if sourceMovie, ok := sourceItem.(plex.Movie); ok {
+ if destMovie, ok := destItem.(plex.Movie); ok {
+ differences = append(differences, s.compareMovieMetadata(sourceMovie, destMovie)...)
+ } else {
+ differences = append(differences, "item types differ (source: Movie, dest: not Movie)")
+ }
+ return differences
+ }
+
+ // Handle TVShow comparison
+ if sourceTVShow, ok := sourceItem.(plex.TVShow); ok {
+ if destTVShow, ok := destItem.(plex.TVShow); ok {
+ differences = append(differences, s.compareTVShowMetadata(sourceTVShow, destTVShow)...)
+ } else {
+ differences = append(differences, "item types differ (source: TVShow, dest: not TVShow)")
+ }
+ return differences
+ }
+
+ differences = append(differences, "unsupported item type for comparison")
+ return differences
+}
+
+// compareMovieMetadata compares all non-server-specific Movie fields
+func (s *SyncOrchestrator) compareMovieMetadata(source, dest plex.Movie) []string {
+ var differences []string
+
+ // Compare basic fields
+ if source.Title != dest.Title {
+ differences = append(differences, fmt.Sprintf("title differs: '%s' vs '%s'", source.Title, dest.Title))
+ }
+ if source.OriginalTitle != dest.OriginalTitle {
+ differences = append(differences, fmt.Sprintf("original title differs: '%s' vs '%s'", source.OriginalTitle, dest.OriginalTitle))
+ }
+ if source.Year != dest.Year {
+ differences = append(differences, fmt.Sprintf("year differs: %d vs %d", source.Year, dest.Year))
+ }
+ if source.Studio != dest.Studio {
+ differences = append(differences, fmt.Sprintf("studio differs: '%s' vs '%s'", source.Studio, dest.Studio))
+ }
+ if source.ContentRating != dest.ContentRating {
+ differences = append(differences, fmt.Sprintf("content rating differs: '%s' vs '%s'", source.ContentRating, dest.ContentRating))
+ }
+ if source.Summary != dest.Summary {
+ differences = append(differences, "summary differs")
+ }
+ if source.Tagline != dest.Tagline {
+ differences = append(differences, fmt.Sprintf("tagline differs: '%s' vs '%s'", source.Tagline, dest.Tagline))
+ }
+
+ // Compare ratings (allow small differences due to precision)
+ if abs(int64(source.UserRating.Value*10-dest.UserRating.Value*10)) > 1 {
+ differences = append(differences, fmt.Sprintf("user rating differs: %.1f vs %.1f", source.UserRating.Value, dest.UserRating.Value))
+ }
+
+ // Compare artwork
+ if source.Thumb != dest.Thumb {
+ differences = append(differences, "poster (thumb) differs")
+ }
+ if source.Art != dest.Art {
+ differences = append(differences, "background (art) differs")
+ }
+
+ // Compare arrays
+ if !s.compareTagArrays(source.Genre, dest.Genre) {
+ differences = append(differences, fmt.Sprintf("genres differ: %v vs %v", s.extractTags(source.Genre), s.extractTags(dest.Genre)))
+ }
+ if !s.compareTagArrays(source.Label, dest.Label) {
+ differences = append(differences, fmt.Sprintf("labels differ: %v vs %v", s.extractTags(source.Label), s.extractTags(dest.Label)))
+ }
+ if !s.compareCollectionArrays(source.Collection, dest.Collection) {
+ differences = append(differences, fmt.Sprintf("collections differ: %v vs %v", s.extractCollectionTags(source.Collection), s.extractCollectionTags(dest.Collection)))
+ }
+
+ // Compare watched state
+ if source.ViewCount != dest.ViewCount {
+ differences = append(differences, fmt.Sprintf("view count differs: %d vs %d", source.ViewCount, dest.ViewCount))
+ }
+
+ return differences
+}
+
+// compareTVShowMetadata compares all non-server-specific TV Show fields
+func (s *SyncOrchestrator) compareTVShowMetadata(source, dest plex.TVShow) []string {
+ var differences []string
+
+ // Compare basic fields
+ if source.Title != dest.Title {
+ differences = append(differences, fmt.Sprintf("title differs: '%s' vs '%s'", source.Title, dest.Title))
+ }
+ if source.OriginalTitle != dest.OriginalTitle {
+ differences = append(differences, fmt.Sprintf("original title differs: '%s' vs '%s'", source.OriginalTitle, dest.OriginalTitle))
+ }
+ if source.Year != dest.Year {
+ differences = append(differences, fmt.Sprintf("year differs: %d vs %d", source.Year, dest.Year))
+ }
+ if source.Studio != dest.Studio {
+ differences = append(differences, fmt.Sprintf("studio differs: '%s' vs '%s'", source.Studio, dest.Studio))
+ }
+ if source.Network != dest.Network {
+ differences = append(differences, fmt.Sprintf("network differs: '%s' vs '%s'", source.Network, dest.Network))
+ }
+ if source.ContentRating != dest.ContentRating {
+ differences = append(differences, fmt.Sprintf("content rating differs: '%s' vs '%s'", source.ContentRating, dest.ContentRating))
+ }
+ if source.Summary != dest.Summary {
+ differences = append(differences, "summary differs")
+ }
+ if source.Tagline != dest.Tagline {
+ differences = append(differences, fmt.Sprintf("tagline differs: '%s' vs '%s'", source.Tagline, dest.Tagline))
+ }
+
+ // Compare ratings (allow small differences due to precision)
+ if abs(int64(source.UserRating.Value*10-dest.UserRating.Value*10)) > 1 {
+ differences = append(differences, fmt.Sprintf("user rating differs: %.1f vs %.1f", source.UserRating.Value, dest.UserRating.Value))
+ }
+
+ // Compare artwork
+ if source.Thumb != dest.Thumb {
+ differences = append(differences, "poster (thumb) differs")
+ }
+ if source.Art != dest.Art {
+ differences = append(differences, "background (art) differs")
+ }
+
+ // Compare arrays
+ if !s.compareTagArrays(source.Genre, dest.Genre) {
+ differences = append(differences, fmt.Sprintf("genres differ: %v vs %v", s.extractTags(source.Genre), s.extractTags(dest.Genre)))
+ }
+ if !s.compareTagArrays(source.Label, dest.Label) {
+ differences = append(differences, fmt.Sprintf("labels differ: %v vs %v", s.extractTags(source.Label), s.extractTags(dest.Label)))
+ }
+ if !s.compareCollectionArrays(source.Collection, dest.Collection) {
+ differences = append(differences, fmt.Sprintf("collections differ: %v vs %v", s.extractCollectionTags(source.Collection), s.extractCollectionTags(dest.Collection)))
+ }
+
+ // Compare watched state
+ if source.ViewCount != dest.ViewCount {
+ differences = append(differences, fmt.Sprintf("view count differs: %d vs %d", source.ViewCount, dest.ViewCount))
+ }
+
+ return differences
+}
+
+// compareTagArrays compares arrays of tags (Genre/Label)
+func (s *SyncOrchestrator) compareTagArrays(source, dest interface{}) bool {
+ sourceTags := s.extractTags(source)
+ destTags := s.extractTags(dest)
+
+ if len(sourceTags) != len(destTags) {
+ return false
+ }
+
+ // Convert to maps for easier comparison
+ sourceMap := make(map[string]bool)
+ destMap := make(map[string]bool)
+
+ for _, tag := range sourceTags {
+ sourceMap[tag] = true
+ }
+ for _, tag := range destTags {
+ destMap[tag] = true
+ }
+
+ // Check if all source tags exist in dest
+ for tag := range sourceMap {
+ if !destMap[tag] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// compareCollectionArrays compares arrays of collections
+func (s *SyncOrchestrator) compareCollectionArrays(source, dest []plex.Collection) bool {
+ sourceTags := s.extractCollectionTags(source)
+ destTags := s.extractCollectionTags(dest)
+
+ if len(sourceTags) != len(destTags) {
+ return false
+ }
+
+ // Convert to maps for easier comparison
+ sourceMap := make(map[string]bool)
+ destMap := make(map[string]bool)
+
+ for _, tag := range sourceTags {
+ sourceMap[tag] = true
+ }
+ for _, tag := range destTags {
+ destMap[tag] = true
+ }
+
+ // Check if all source tags exist in dest
+ for tag := range sourceMap {
+ if !destMap[tag] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// extractTags extracts tag strings from Genre or Label arrays
+func (s *SyncOrchestrator) extractTags(items interface{}) []string {
+ var tags []string
+
+ switch v := items.(type) {
+ case []plex.Genre:
+ for _, item := range v {
+ tags = append(tags, item.Tag)
+ }
+ case []plex.Label:
+ for _, item := range v {
+ tags = append(tags, item.Tag)
+ }
+ }
+
+ return tags
+}
+
+// extractCollectionTags extracts tag strings from Collection arrays
+func (s *SyncOrchestrator) extractCollectionTags(collections []plex.Collection) []string {
+ var tags []string
+ for _, collection := range collections {
+ tags = append(tags, collection.Tag)
+ }
+ return tags
+}
+
+// abs returns the absolute value of an int64
+func abs(x int64) int64 {
+ if x < 0 {
+ return -x
+ }
+ return x
+}
+
+// TODO: Uncomment when plexgo library implements complete metadata sync functions
+// func (s *SyncOrchestrator) syncItemMetadata(match discovery.ItemMatch) error {
+// sourceItem := match.SourceItem
+// destRatingKey := s.getDestRatingKey(match.DestItem)
+//
+// // Sync all metadata fields using plexgo library functions
+// if err := s.syncBasicMetadata(sourceItem, destRatingKey); err != nil {
+// return err
+// }
+//
+// // Sync user ratings
+// if err := s.sourceClient.SetUserRating(destRatingKey, sourceItem.UserRating); err != nil {
+// return err
+// }
+//
+// // Sync selected poster
+// if err := s.syncPoster(sourceItem, destRatingKey); err != nil {
+// return err
+// }
+//
+// // Sync custom titles and names
+// if err := s.syncCustomFields(sourceItem, destRatingKey); err != nil {
+// return err
+// }
+//
+// // Sync all labels
+// if err := s.sourceClient.SetItemLabels(destRatingKey, sourceItem.Labels); err != nil {
+// return err
+// }
+//
+// // Sync watched state
+// if err := s.syncWatchedState(sourceItem, destRatingKey); err != nil {
+// return err
+// }
+//
+// return nil
+// }
+
+// RunContinuous runs the sync process in a continuous loop
+func (s *SyncOrchestrator) RunContinuous() error {
+ s.logger.WithField("interval", s.config.Interval.String()).Info("Starting continuous sync mode")
+
+ ticker := time.NewTicker(s.config.Interval)
+ defer ticker.Stop()
+
+ // Run initial sync
+ if err := s.RunSyncCycle(); err != nil {
+ s.logger.WithError(err).Error("Initial sync cycle failed")
+ }
+
+ // Run periodic syncs
+ for range ticker.C {
+ if err := s.RunSyncCycle(); err != nil {
+ s.logger.WithError(err).Error("Sync cycle failed")
+ }
+ }
+
+ return nil
+}
+
+// HandleForceFullSync clears all sync state and forces a complete re-sync
+func (s *SyncOrchestrator) HandleForceFullSync() error {
+ if !s.config.ForceFullSync {
+ return nil
+ }
+
+ s.logger.Info("Force full sync enabled - will perform complete synchronization")
+
+ // TODO: Clear sync state from database/storage when state management is implemented
+ s.logger.Info("Sync state cleared for force full sync")
+
+ return nil
+}
+
+// Helper methods for Enhanced Media Items
+
+// getEnhancedItemTitle safely extracts title from an enhanced media item
+func (s *SyncOrchestrator) getEnhancedItemTitle(enhancedItem *discovery.EnhancedMediaItem) string {
+ return s.getItemTitle(enhancedItem.Item)
+}
+
+// getEnhancedItemRatingKey safely extracts rating key from an enhanced media item
+func (s *SyncOrchestrator) getEnhancedItemRatingKey(enhancedItem *discovery.EnhancedMediaItem) string {
+ return s.getItemRatingKey(enhancedItem.Item)
+}
+
+// compareEnhancedMetadata compares metadata between enhanced source and destination items
+func (s *SyncOrchestrator) compareEnhancedMetadata(sourceEnhanced, destEnhanced *discovery.EnhancedMediaItem) (bool, error) {
+ // Now we have FULL metadata for both items, so we can do direct comparison
+ differences := s.findEnhancedMetadataDifferences(sourceEnhanced, destEnhanced)
+
+ if len(differences) > 0 {
+ s.logger.WithFields(map[string]interface{}{
+ "source_key": s.getEnhancedItemRatingKey(sourceEnhanced),
+ "dest_key": s.getEnhancedItemRatingKey(destEnhanced),
+ "differences": differences,
+ }).Debug("Enhanced metadata differences found")
+ return true, nil
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "source_key": s.getEnhancedItemRatingKey(sourceEnhanced),
+ "dest_key": s.getEnhancedItemRatingKey(destEnhanced),
+ }).Debug("Enhanced metadata is synchronized")
+
+ return false, nil
+}
+
+// findEnhancedMetadataDifferences compares two enhanced metadata items and returns differences
+func (s *SyncOrchestrator) findEnhancedMetadataDifferences(sourceEnhanced, destEnhanced *discovery.EnhancedMediaItem) []string {
+ // Direct comparison using full metadata
+ return s.findMetadataDifferences(sourceEnhanced.Item, destEnhanced.Item,
+ s.getEnhancedItemRatingKey(sourceEnhanced), s.getEnhancedItemRatingKey(destEnhanced))
+}
+
+// Legacy Helper methods (for backward compatibility)
+
+// getItemTitle safely extracts title from an item
+func (s *SyncOrchestrator) getItemTitle(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.Title
+ case plex.TVShow:
+ return v.Title
+ default:
+ return "unknown"
+ }
+}
+
+// getItemRatingKey safely extracts rating key from an item
+func (s *SyncOrchestrator) getItemRatingKey(item interface{}) string {
+ switch v := item.(type) {
+ case plex.Movie:
+ return v.RatingKey.String()
+ case plex.TVShow:
+ return v.RatingKey.String()
+ case plex.Episode:
+ return v.RatingKey.String()
+ default:
+ return ""
+ }
+}
+
+// isSSHConfigured checks if SSH is properly configured for username/password authentication
+func isSSHConfigured(sshConfig config.SSHConfig, log *logger.Logger) bool {
+ // Check if SSH user and password are provided
+ if sshConfig.User == "" || sshConfig.Password == "" {
+ log.Debug("SSH user or password not provided")
+ return false
+ }
+
+ // Check for common placeholder values (but not "nullable" since that's a real username)
+ if sshConfig.User == "your-ssh-username" ||
+ sshConfig.Password == "your-ssh-password" {
+ log.Info("SSH configuration contains placeholder values - skipping SSH setup")
+ return false
+ }
+
+ log.WithFields(map[string]interface{}{
+ "ssh_user": sshConfig.User,
+ "ssh_port": sshConfig.Port,
+ }).Debug("SSH configured for username/password authentication")
+
+ return true
+}
diff --git a/internal/plex/client.go b/internal/plex/client.go
new file mode 100644
index 0000000..fad4bf3
--- /dev/null
+++ b/internal/plex/client.go
@@ -0,0 +1,963 @@
+package plex
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/nullable-eth/syncarr/internal/config"
+ "github.com/nullable-eth/syncarr/internal/logger"
+)
+
+// Client represents a Plex API client
+type Client struct {
+ config *config.PlexServerConfig
+ logger *logger.Logger
+ httpClient *http.Client
+}
+
+// NewClient creates a new Plex client
+func NewClient(cfg *config.PlexServerConfig, log *logger.Logger) (*Client, error) {
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+
+ client := &Client{
+ config: cfg,
+ logger: log,
+ httpClient: &http.Client{Transport: tr},
+ }
+
+ // Test the connection
+ if err := client.TestConnection(); err != nil {
+ return nil, fmt.Errorf("failed to connect to Plex server: %w", err)
+ }
+
+ client.logger.WithFields(map[string]interface{}{
+ "server_url": client.buildURL(""),
+ "host": cfg.Host,
+ "port": cfg.Port,
+ "https": cfg.RequireHTTPS,
+ }).Info("Plex client created successfully")
+
+ return client, nil
+}
+
+// TestConnection tests if the Plex server is reachable by hitting the /identity endpoint
+func (c *Client) TestConnection() error {
+ url := c.buildURL("/identity")
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to connect to Plex server: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("plex server returned status %d", resp.StatusCode)
+ }
+
+ c.logger.WithField("server_url", c.config.Host).Debug("Successfully connected to Plex server")
+ return nil
+}
+
+// GetLibraries fetches all libraries from Plex
+func (c *Client) GetLibraries() ([]Library, error) {
+ librariesURL := c.buildURL("/library/sections")
+
+ req, err := http.NewRequest("GET", librariesURL, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch libraries: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("plex API returned status %d. Response: %s", resp.StatusCode, string(body))
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ var libraryResponse LibraryResponse
+ if err := json.Unmarshal(body, &libraryResponse); err != nil {
+ return nil, fmt.Errorf("failed to parse library response: %w. Response body: %s", err, string(body))
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_count": len(libraryResponse.MediaContainer.Directory),
+ }).Info("Retrieved libraries using labelarr-based client")
+
+ return libraryResponse.MediaContainer.Directory, nil
+}
+
+// GetMoviesFromLibrary fetches all movies from a specific library with detailed metadata including labels
+func (c *Client) GetMoviesFromLibrary(libraryID string) ([]Movie, error) {
+ moviesURL := c.buildURL(fmt.Sprintf("/library/sections/%s/all", libraryID))
+
+ req, err := http.NewRequest("GET", moviesURL, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch movies: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("plex API returned status %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ var plexResponse PlexResponse
+ if err := json.Unmarshal(body, &plexResponse); err != nil {
+ return nil, fmt.Errorf("failed to parse movies response: %w", err)
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "item_count": len(plexResponse.MediaContainer.Metadata),
+ }).Info("Retrieved basic movie metadata, fetching detailed metadata for labels")
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "movie_count": len(plexResponse.MediaContainer.Metadata),
+ }).Debug("Retrieved movies from library")
+
+ return plexResponse.MediaContainer.Metadata, nil
+}
+
+// GetTVShowsFromLibrary fetches all TV shows from a specific library
+func (c *Client) GetTVShowsFromLibrary(libraryID string) ([]TVShow, error) {
+ tvShowsURL := c.buildURL(fmt.Sprintf("/library/sections/%s/all", libraryID))
+
+ req, err := http.NewRequest("GET", tvShowsURL, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch TV shows: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("plex API returned status %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ var tvShowResponse TVShowResponse
+ if err := json.Unmarshal(body, &tvShowResponse); err != nil {
+ return nil, fmt.Errorf("failed to parse TV shows response: %w", err)
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "item_count": len(tvShowResponse.MediaContainer.Metadata),
+ }).Info("Retrieved TV shows from library")
+
+ return tvShowResponse.MediaContainer.Metadata, nil
+}
+
+// GetAllTVShowEpisodes fetches ALL episodes for a specific TV show
+func (c *Client) GetAllTVShowEpisodes(ratingKey string) ([]Episode, error) {
+ episodesURL := c.buildURL(fmt.Sprintf("/library/metadata/%s/allLeaves", ratingKey))
+
+ req, err := http.NewRequest("GET", episodesURL, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch all TV show episodes: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("plex API returned status %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ var episodeResponse EpisodeResponse
+ if err := json.Unmarshal(body, &episodeResponse); err != nil {
+ return nil, fmt.Errorf("failed to parse episodes response: %w", err)
+ }
+
+ return episodeResponse.MediaContainer.Metadata, nil
+}
+
+// UpdateMediaField updates a media item's field (labels or genres) with new keywords
+func (c *Client) UpdateMediaField(mediaID, libraryID string, keywords []string, updateField string, mediaType string) error {
+ c.logger.WithFields(map[string]interface{}{
+ "media_id": mediaID,
+ "library_id": libraryID,
+ "update_field": updateField,
+ "keyword_count": len(keywords),
+ }).Debug("Making Plex API call to update media field")
+
+ return c.updateMediaField(mediaID, libraryID, keywords, updateField, c.getMediaTypeForLibraryType(mediaType))
+}
+
+// RemoveMediaFieldKeywords removes keywords from a media item's field
+func (c *Client) RemoveMediaFieldKeywords(mediaID, libraryID string, valuesToRemove []string, updateField string, lockField bool, mediaType string) error {
+ return c.removeMediaFieldKeywords(mediaID, libraryID, valuesToRemove, updateField, lockField, c.getMediaTypeForLibraryType(mediaType))
+}
+
+// TriggerLibraryScan triggers a scan of the specified library
+func (c *Client) TriggerLibraryScan(libraryID string) error {
+ url := c.buildURL(fmt.Sprintf("/library/sections/%s/refresh", libraryID))
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to trigger library scan: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("failed to trigger library scan, status code: %d", resp.StatusCode)
+ }
+
+ c.logger.WithField("library_id", libraryID).Debug("Triggered library scan")
+ return nil
+}
+
+// GetActivities retrieves all current activities from the Plex server
+func (c *Client) GetActivities() (*ActivitiesResponse, error) {
+ url := c.buildURL("/activities")
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/xml")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get activities: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("failed to get activities, status code: %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ var activitiesResponse ActivitiesResponse
+ if err := xml.Unmarshal(body, &activitiesResponse); err != nil {
+ return nil, fmt.Errorf("failed to parse activities response: %w", err)
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "activity_count": activitiesResponse.Size,
+ "activities": len(activitiesResponse.Activities),
+ }).Debug("Retrieved server activities")
+
+ return &activitiesResponse, nil
+}
+
+// IsLibraryScanInProgress checks if any library scans are currently running
+func (c *Client) IsLibraryScanInProgress() (bool, []Activity, error) {
+ activities, err := c.GetActivities()
+ if err != nil {
+ return false, nil, err
+ }
+
+ var libraryScanActivities []Activity
+ for _, activity := range activities.Activities {
+ if activity.Type == "library.update.section" {
+ libraryScanActivities = append(libraryScanActivities, activity)
+ }
+ }
+
+ return len(libraryScanActivities) > 0, libraryScanActivities, nil
+}
+
+// GetWatchedState retrieves the watched state for a media item
+func (c *Client) GetWatchedState(ratingKey string) (*WatchedState, error) {
+ url := c.buildURL(fmt.Sprintf("/library/metadata/%s", ratingKey))
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get media metadata: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("failed to get media metadata, status code: %d", resp.StatusCode)
+ }
+
+ // For now, return default state - TODO: Parse actual response
+ watchedState := &WatchedState{
+ Watched: false,
+ ViewCount: 0,
+ ViewOffset: 0,
+ LastViewedAt: 0,
+ }
+
+ c.logger.WithField("rating_key", ratingKey).Debug("Retrieved watched state (parsing not yet implemented)")
+ return watchedState, nil
+}
+
+// SetWatchedState sets the watched state for a media item
+func (c *Client) SetWatchedState(ratingKey string, watched bool) error {
+ var endpoint string
+ if watched {
+ endpoint = "/:/scrobble"
+ } else {
+ endpoint = "/:/unscrobble"
+ }
+
+ urlStr := c.buildURL(endpoint)
+
+ // Parse the URL to add query parameters
+ parsedURL, err := url.Parse(urlStr)
+ if err != nil {
+ return fmt.Errorf("failed to parse URL: %w", err)
+ }
+
+ params := parsedURL.Query()
+ params.Set("key", ratingKey)
+ params.Set("identifier", "com.plexapp.plugins.library")
+ params.Set("X-Plex-Token", c.config.Token)
+ parsedURL.RawQuery = params.Encode()
+
+ req, err := http.NewRequest("GET", parsedURL.String(), nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to set watched state: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("failed to set watched state, status code: %d", resp.StatusCode)
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "rating_key": ratingKey,
+ "watched": watched,
+ }).Debug("Set watched state")
+
+ return nil
+}
+
+// SetUserRating sets the user rating for a media item (0.0 to 10.0)
+func (c *Client) SetUserRating(ratingKey string, rating float64) error {
+ if rating < 0 || rating > 10 {
+ return fmt.Errorf("rating must be between 0 and 10, got %.1f", rating)
+ }
+
+ urlStr := c.buildURL("/:/rate")
+ parsedURL, err := url.Parse(urlStr)
+ if err != nil {
+ return fmt.Errorf("failed to parse URL: %w", err)
+ }
+
+ params := parsedURL.Query()
+ params.Set("key", ratingKey)
+ params.Set("rating", fmt.Sprintf("%.0f", rating))
+ params.Set("identifier", "com.plexapp.plugins.library")
+ params.Set("X-Plex-Token", c.config.Token)
+ parsedURL.RawQuery = params.Encode()
+
+ req, err := http.NewRequest("GET", parsedURL.String(), nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to set user rating: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("failed to set user rating, status code: %d", resp.StatusCode)
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "rating_key": ratingKey,
+ "rating": rating,
+ }).Debug("Set user rating")
+
+ return nil
+}
+
+// SetLabels sets labels for a media item
+func (c *Client) SetLabels(ratingKey, libraryID string, labels []string) error {
+ return c.UpdateMediaField(ratingKey, libraryID, labels, "label", "movie")
+}
+
+// SetTitle sets the title for a media item
+func (c *Client) SetTitle(ratingKey, libraryID, title string) error {
+ return c.updateBasicField(ratingKey, libraryID, "title", title)
+}
+
+// SetSummary sets the summary for a media item
+func (c *Client) SetSummary(ratingKey, libraryID, summary string) error {
+ return c.updateBasicField(ratingKey, libraryID, "summary", summary)
+}
+
+// updateBasicField updates basic text fields like title, summary, etc.
+func (c *Client) updateBasicField(ratingKey, libraryID, fieldName, value string) error {
+ baseURL := c.buildURL(fmt.Sprintf("/library/sections/%s/all", libraryID))
+
+ parsedURL, err := url.Parse(baseURL)
+ if err != nil {
+ return fmt.Errorf("failed to parse URL: %w", err)
+ }
+
+ params := parsedURL.Query()
+ params.Set("type", "1") // Assume movie for now, could be enhanced
+ params.Set("id", ratingKey)
+ params.Set(fieldName, value)
+ params.Set("X-Plex-Token", c.config.Token)
+ parsedURL.RawQuery = params.Encode()
+
+ req, err := http.NewRequest("PUT", parsedURL.String(), nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to update %s: %w", fieldName, err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("failed to update %s, status code: %d - Response: %s", fieldName, resp.StatusCode, string(body))
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "rating_key": ratingKey,
+ "field": fieldName,
+ "value": value,
+ }).Debug("Updated basic field")
+
+ return nil
+}
+
+// Helper Methods
+
+// updateMediaField is a generic function to update media fields (movies: type=1, TV shows: type=2)
+func (c *Client) updateMediaField(mediaID, libraryID string, keywords []string, updateField string, mediaType int) error {
+ startTime := time.Now()
+
+ // Build the base URL
+ baseURL := c.buildURL(fmt.Sprintf("/library/sections/%s/all", libraryID))
+
+ // Parse the URL to add query parameters properly
+ parsedURL, err := url.Parse(baseURL)
+ if err != nil {
+ return fmt.Errorf("failed to parse URL: %w", err)
+ }
+
+ // Create query parameters
+ params := parsedURL.Query()
+ params.Set("type", fmt.Sprintf("%d", mediaType))
+ params.Set("id", mediaID)
+ params.Set("includeExternalMedia", "1")
+
+ // Add indexed label/genre parameters like label[0].tag.tag, label[1].tag.tag, etc.
+ for i, keyword := range keywords {
+ paramName := fmt.Sprintf("%s[%d].tag.tag", updateField, i)
+ params.Set(paramName, keyword)
+ }
+
+ params.Set(fmt.Sprintf("%s.locked", updateField), "1")
+ params.Set("X-Plex-Token", c.config.Token)
+
+ parsedURL.RawQuery = params.Encode()
+
+ req, err := http.NewRequest("PUT", parsedURL.String(), nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to update media field: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("plex API returned status %d when updating media field - Response: %s", resp.StatusCode, string(body))
+ }
+
+ duration := time.Since(startTime)
+ c.logger.WithField("duration", duration).Debug("Plex API call completed")
+
+ return nil
+}
+
+// removeMediaFieldKeywords is a generic function to remove keywords from media fields (movies: type=1, TV shows: type=2)
+func (c *Client) removeMediaFieldKeywords(mediaID, libraryID string, valuesToRemove []string, updateField string, lockField bool, mediaType int) error {
+ // Build the base URL
+ baseURL := c.buildURL(fmt.Sprintf("/library/sections/%s/all", libraryID))
+
+ // Parse the URL to add query parameters properly
+ parsedURL, err := url.Parse(baseURL)
+ if err != nil {
+ return fmt.Errorf("failed to parse URL: %w", err)
+ }
+
+ // Create query parameters
+ params := parsedURL.Query()
+ params.Set("type", fmt.Sprintf("%d", mediaType))
+ params.Set("id", mediaID)
+ params.Set("includeExternalMedia", "1")
+
+ // Join values with commas for the -= operator
+ combinedValues := strings.Join(valuesToRemove, ",")
+
+ // Add removal parameter using the -= operator
+ paramName := fmt.Sprintf("%s[].tag.tag-", updateField)
+ params.Set(paramName, combinedValues)
+
+ if lockField {
+ params.Set(fmt.Sprintf("%s.locked", updateField), "1")
+ } else {
+ params.Set(fmt.Sprintf("%s.locked", updateField), "0")
+ }
+ params.Set("X-Plex-Token", c.config.Token)
+
+ parsedURL.RawQuery = params.Encode()
+
+ req, err := http.NewRequest("PUT", parsedURL.String(), nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to remove media field keywords: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("plex API returned status %d when removing media field keywords - Response: %s", resp.StatusCode, string(body))
+ }
+
+ return nil
+}
+
+// getMediaTypeForLibraryType converts library type strings to Plex API media type integers
+func (c *Client) getMediaTypeForLibraryType(libraryType string) int {
+ switch libraryType {
+ case "movie":
+ return 1
+ case "show":
+ return 2
+ default:
+ // Default to 1 for unknown types
+ return 1
+ }
+}
+
+// buildURL constructs a full URL for Plex API requests
+func (c *Client) buildURL(path string) string {
+ protocol := "http"
+ if c.config.RequireHTTPS {
+ protocol = "https"
+ }
+ return fmt.Sprintf("%s://%s:%s%s", protocol, c.config.Host, c.config.Port, path)
+}
+
+// GetLibraryContent retrieves all content from a specific library (movies and TV shows)
+func (c *Client) GetLibraryContent(libraryID string) ([]interface{}, error) {
+ libraries, err := c.GetLibraries()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get libraries: %w", err)
+ }
+
+ // Find the library type
+ var libraryType string
+ for _, lib := range libraries {
+ if lib.Key == libraryID {
+ libraryType = lib.Type
+ break
+ }
+ }
+
+ var allItems []interface{}
+
+ if libraryType == "movie" {
+ movies, err := c.GetMoviesFromLibrary(libraryID)
+ if err != nil {
+ return nil, err
+ }
+ for _, movie := range movies {
+ allItems = append(allItems, movie)
+ }
+ } else if libraryType == "show" {
+ shows, err := c.GetTVShowsFromLibrary(libraryID)
+ if err != nil {
+ return nil, err
+ }
+ for _, show := range shows {
+ allItems = append(allItems, show)
+ }
+ } else {
+ // Try both types for unknown library types
+ movies, err := c.GetMoviesFromLibrary(libraryID)
+ if err == nil {
+ for _, movie := range movies {
+ allItems = append(allItems, movie)
+ }
+ }
+ shows, err := c.GetTVShowsFromLibrary(libraryID)
+ if err == nil {
+ for _, show := range shows {
+ allItems = append(allItems, show)
+ }
+ }
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "item_count": len(allItems),
+ "library_type": libraryType,
+ }).Info("Retrieved library content")
+
+ return allItems, nil
+}
+
+// GetItemsWithLabelDirect efficiently retrieves items with a specific label using server-side filtering
+// and then fetches detailed metadata including labels for each item
+func (c *Client) GetItemsWithLabelDirect(libraryID, label string) ([]interface{}, error) {
+ // Use Plex API query parameters to filter by label server-side
+ // This is much more efficient than downloading all items and filtering client-side
+ url := c.buildURL(fmt.Sprintf("/library/sections/%s/all", libraryID))
+
+ // Add label filter query parameter (based on Python PlexAPI approach)
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating request: %w", err)
+ }
+
+ // Add query parameters for label filtering
+ q := req.URL.Query()
+ q.Add("label", label) // Server-side label filtering
+ req.URL.RawQuery = q.Encode()
+
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error making request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("API request failed with status: %d", resp.StatusCode)
+ }
+
+ var result struct {
+ MediaContainer struct {
+ Metadata []json.RawMessage `json:"Metadata"`
+ } `json:"MediaContainer"`
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return nil, fmt.Errorf("error decoding response: %w", err)
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "label": label,
+ "filtered_items": len(result.MediaContainer.Metadata),
+ }).Info("Got basic items with label filter, fetching detailed metadata")
+
+ var items []interface{}
+ for _, rawItem := range result.MediaContainer.Metadata {
+ var basicItem struct {
+ Type string `json:"type"`
+ RatingKey string `json:"ratingKey"`
+ Title string `json:"title"`
+ }
+ if err := json.Unmarshal(rawItem, &basicItem); err != nil {
+ c.logger.WithError(err).Warn("Failed to parse basic item info")
+ continue
+ }
+
+ switch basicItem.Type {
+ case "movie":
+ // Get detailed movie metadata including labels
+ detailedMovie, err := c.GetMovieDetails(basicItem.RatingKey)
+ if err != nil {
+ c.logger.WithError(err).WithFields(map[string]interface{}{
+ "rating_key": basicItem.RatingKey,
+ "title": basicItem.Title,
+ }).Warn("Failed to fetch detailed metadata, using basic metadata")
+ continue
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "rating_key": detailedMovie.RatingKey.String(),
+ "title": detailedMovie.Title,
+ }).Debug("Successfully fetched detailed movie metadata")
+
+ items = append(items, *detailedMovie)
+
+ case "show":
+ // Get detailed TV show metadata including labels
+ detailedShow, err := c.GetTVShowDetails(basicItem.RatingKey)
+ if err != nil {
+ c.logger.WithError(err).WithFields(map[string]interface{}{
+ "rating_key": basicItem.RatingKey,
+ "title": basicItem.Title,
+ }).Warn("Failed to fetch detailed show metadata, using basic metadata")
+ continue
+ }
+
+ // Get all episodes for this TV show
+ episodes, err := c.GetAllTVShowEpisodes(basicItem.RatingKey)
+ if err != nil {
+ c.logger.WithError(err).WithFields(map[string]interface{}{
+ "rating_key": basicItem.RatingKey,
+ "title": basicItem.Title,
+ }).Warn("Failed to get TV show episodes, adding show without episodes")
+ items = append(items, *detailedShow)
+ continue
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "show_title": detailedShow.Title,
+ "rating_key": detailedShow.RatingKey.String(),
+ "episode_count": len(episodes),
+ }).Debug("Successfully fetched detailed show metadata with episodes")
+
+ // Add the detailed show
+ items = append(items, *detailedShow)
+ c.logger.WithFields(map[string]interface{}{
+ "show_title": detailedShow.Title,
+ "rating_key": detailedShow.RatingKey.String(),
+ "label_count": len(detailedShow.Label),
+ "episode_count": len(episodes),
+ }).Debug("Added detailed TV show metadata with episodes")
+
+ // Add all episodes
+ for _, episode := range episodes {
+ items = append(items, episode)
+ }
+ }
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "label": label,
+ "total_items": len(items),
+ }).Info("Completed detailed metadata fetch for labeled items")
+
+ return items, nil
+}
+
+// GetItemsWithLabel now uses the more efficient server-side filtering
+func (c *Client) GetItemsWithLabel(libraryID, label string) ([]interface{}, error) {
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "label": label,
+ }).Debug("Getting items with label using server-side filtering")
+
+ // Try the efficient server-side filtering first
+ items, err := c.GetItemsWithLabelDirect(libraryID, label)
+ if err != nil {
+ c.logger.WithError(err).WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "label": label,
+ }).Warn("Server-side filtering failed, falling back to client-side")
+ // Fallback to client-side filtering if server-side fails
+ return c.GetItemsWithLabelClientSide(libraryID, label)
+ }
+
+ c.logger.WithFields(map[string]interface{}{
+ "library_id": libraryID,
+ "label": label,
+ "item_count": len(items),
+ }).Debug("Found items with label using server-side filtering")
+ return items, nil
+}
+
+// GetItemsWithLabelClientSide provides fallback client-side filtering
+func (c *Client) GetItemsWithLabelClientSide(libraryID, label string) ([]interface{}, error) {
+ // Get all content from the library
+ allItems, err := c.GetLibraryContent(libraryID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Filter items that have the specified label (client-side)
+ var filteredItems []interface{}
+ for _, item := range allItems {
+ if c.itemHasLabel(item, label) {
+ filteredItems = append(filteredItems, item)
+ }
+ }
+
+ return filteredItems, nil
+}
+
+// itemHasLabel checks if an item has a specific label
+func (c *Client) itemHasLabel(item interface{}, label string) bool {
+ switch v := item.(type) {
+ case Movie:
+ for _, lbl := range v.Label {
+ if lbl.Tag == label {
+ return true
+ }
+ }
+ case TVShow:
+ for _, lbl := range v.Label {
+ if lbl.Tag == label {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// GetMovieDetails fetches detailed metadata for a specific movie including labels
+func (c *Client) GetMovieDetails(ratingKey string) (*Movie, error) {
+ url := c.buildURL(fmt.Sprintf("/library/metadata/%s", ratingKey))
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch movie details: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("plex API returned status %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ var movieResponse PlexResponse
+ if err := json.Unmarshal(body, &movieResponse); err != nil {
+ return nil, fmt.Errorf("failed to parse movie details response: %w", err)
+ }
+
+ if len(movieResponse.MediaContainer.Metadata) == 0 {
+ return nil, fmt.Errorf("no movie found with rating key %s", ratingKey)
+ }
+
+ return &movieResponse.MediaContainer.Metadata[0], nil
+}
+
+// GetTVShowDetails fetches detailed metadata for a specific TV show including labels
+func (c *Client) GetTVShowDetails(ratingKey string) (*TVShow, error) {
+ url := c.buildURL(fmt.Sprintf("/library/metadata/%s", ratingKey))
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+ req.Header.Set("X-Plex-Token", c.config.Token)
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch TV show details: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("plex API returned status %d", resp.StatusCode)
+ }
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ var tvShowResponse TVShowResponse
+ if err := json.Unmarshal(body, &tvShowResponse); err != nil {
+ return nil, fmt.Errorf("failed to parse TV show details response: %w", err)
+ }
+
+ if len(tvShowResponse.MediaContainer.Metadata) == 0 {
+ return nil, fmt.Errorf("no TV show found with rating key %s", ratingKey)
+ }
+
+ return &tvShowResponse.MediaContainer.Metadata[0], nil
+}
diff --git a/internal/plex/types.go b/internal/plex/types.go
new file mode 100644
index 0000000..4e13092
--- /dev/null
+++ b/internal/plex/types.go
@@ -0,0 +1,420 @@
+package plex
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+)
+
+// Library represents a Plex library
+type Library struct {
+ Key string `json:"key"`
+ Type string `json:"type"`
+ Title string `json:"title"`
+ Agent string `json:"agent"`
+}
+
+// LibraryContainer holds library directory information
+type LibraryContainer struct {
+ Size int `json:"size"`
+ Directory []Library `json:"Directory"`
+}
+
+// LibraryResponse represents the response from library endpoints
+type LibraryResponse struct {
+ MediaContainer LibraryContainer `json:"MediaContainer"`
+}
+
+// Movie represents a Plex movie
+type Movie struct {
+ RatingKey FlexibleRatingKey `json:"ratingKey"`
+ Title string `json:"title"`
+ TitleSort string `json:"titleSort,omitempty"`
+ OriginalTitle string `json:"originalTitle,omitempty"`
+ Year int `json:"year"`
+ Duration int `json:"duration,omitempty"`
+ ContentRating string `json:"contentRating,omitempty"`
+ Studio string `json:"studio,omitempty"`
+ Tagline string `json:"tagline,omitempty"`
+ Summary string `json:"summary,omitempty"`
+ Rating FlexibleRating `json:"rating,omitempty"`
+ AudienceRating FlexibleRating `json:"audienceRating,omitempty"`
+ RatingImage string `json:"ratingImage,omitempty"`
+ AudienceRatingImage string `json:"audienceRatingImage,omitempty"`
+ ViewCount int `json:"viewCount,omitempty"`
+ LastViewedAt int `json:"lastViewedAt,omitempty"`
+ ViewOffset int `json:"viewOffset,omitempty"`
+ UserRating FlexibleRating `json:"userRating,omitempty"`
+ OriginallyAvailableAt string `json:"originallyAvailableAt,omitempty"`
+ AddedAt int `json:"addedAt,omitempty"`
+ UpdatedAt int `json:"updatedAt,omitempty"`
+ Thumb string `json:"thumb,omitempty"`
+ Art string `json:"art,omitempty"`
+ Theme string `json:"theme,omitempty"`
+ ChapterSource string `json:"chapterSource,omitempty"`
+ PrimaryExtraKey string `json:"primaryExtraKey,omitempty"`
+ EditionTitle string `json:"editionTitle,omitempty"`
+ EnableCreditsMarkerGeneration int `json:"enableCreditsMarkerGeneration,omitempty"`
+ LanguageOverride string `json:"languageOverride,omitempty"`
+ UseOriginalTitle int `json:"useOriginalTitle,omitempty"`
+ Slug string `json:"slug,omitempty"`
+ SourceURI string `json:"sourceURI,omitempty"`
+ Label []Label `json:"Label,omitempty"`
+ Genre []Genre `json:"Genre,omitempty"`
+ Director []Director `json:"Director,omitempty"`
+ Writer []Writer `json:"Writer,omitempty"`
+ Producer []Producer `json:"Producer,omitempty"`
+ Role []Role `json:"Role,omitempty"`
+ Country []Country `json:"Country,omitempty"`
+ Collection []Collection `json:"Collection,omitempty"`
+ Guid FlexibleGuid `json:"Guid,omitempty"`
+ Media []Media `json:"Media,omitempty"`
+}
+
+// MediaItem interface implementation for Movie
+func (m Movie) GetRatingKey() string { return m.RatingKey.String() }
+func (m Movie) GetTitle() string { return m.Title }
+func (m Movie) GetYear() int { return m.Year }
+func (m Movie) GetGuid() []Guid { return []Guid(m.Guid) }
+func (m Movie) GetMedia() []Media { return m.Media }
+func (m Movie) GetLabel() []Label { return m.Label }
+func (m Movie) GetGenre() []Genre { return m.Genre }
+
+// TVShow represents a Plex TV show
+type TVShow struct {
+ RatingKey FlexibleRatingKey `json:"ratingKey"`
+ Title string `json:"title"`
+ TitleSort string `json:"titleSort,omitempty"`
+ OriginalTitle string `json:"originalTitle,omitempty"`
+ Year int `json:"year"`
+ Duration int `json:"duration,omitempty"`
+ ContentRating string `json:"contentRating,omitempty"`
+ Studio string `json:"studio,omitempty"`
+ Network string `json:"network,omitempty"`
+ Tagline string `json:"tagline,omitempty"`
+ Summary string `json:"summary,omitempty"`
+ Rating FlexibleRating `json:"rating,omitempty"`
+ AudienceRating FlexibleRating `json:"audienceRating,omitempty"`
+ RatingImage string `json:"ratingImage,omitempty"`
+ AudienceRatingImage string `json:"audienceRatingImage,omitempty"`
+ ViewCount int `json:"viewCount,omitempty"`
+ LastViewedAt int `json:"lastViewedAt,omitempty"`
+ ViewOffset int `json:"viewOffset,omitempty"`
+ UserRating FlexibleRating `json:"userRating,omitempty"`
+ OriginallyAvailableAt string `json:"originallyAvailableAt,omitempty"`
+ AddedAt int `json:"addedAt,omitempty"`
+ UpdatedAt int `json:"updatedAt,omitempty"`
+ Thumb string `json:"thumb,omitempty"`
+ Art string `json:"art,omitempty"`
+ Theme string `json:"theme,omitempty"`
+ Index int `json:"index,omitempty"`
+ ChildCount int `json:"childCount,omitempty"`
+ SeasonCount int `json:"seasonCount,omitempty"`
+ LeafCount int `json:"leafCount,omitempty"`
+ ViewedLeafCount int `json:"viewedLeafCount,omitempty"`
+ EnableCreditsMarkerGeneration int `json:"enableCreditsMarkerGeneration,omitempty"`
+ EpisodeSort int `json:"episodeSort,omitempty"`
+ FlattenSeasons int `json:"flattenSeasons,omitempty"`
+ ShowOrdering string `json:"showOrdering,omitempty"`
+ LanguageOverride string `json:"languageOverride,omitempty"`
+ UseOriginalTitle int `json:"useOriginalTitle,omitempty"`
+ AudioLanguage string `json:"audioLanguage,omitempty"`
+ SubtitleLanguage string `json:"subtitleLanguage,omitempty"`
+ SubtitleMode int `json:"subtitleMode,omitempty"`
+ AutoDeletionItemPolicyUnwatchedLibrary int `json:"autoDeletionItemPolicyUnwatchedLibrary,omitempty"`
+ AutoDeletionItemPolicyWatchedLibrary int `json:"autoDeletionItemPolicyWatchedLibrary,omitempty"`
+ Slug string `json:"slug,omitempty"`
+ SourceURI string `json:"sourceURI,omitempty"`
+ Label []Label `json:"Label,omitempty"`
+ Genre []Genre `json:"Genre,omitempty"`
+ Director []Director `json:"Director,omitempty"`
+ Writer []Writer `json:"Writer,omitempty"`
+ Producer []Producer `json:"Producer,omitempty"`
+ Role []Role `json:"Role,omitempty"`
+ Country []Country `json:"Country,omitempty"`
+ Collection []Collection `json:"Collection,omitempty"`
+ Guid FlexibleGuid `json:"Guid,omitempty"`
+ Media []Media `json:"Media,omitempty"`
+ Location []Location `json:"Location,omitempty"`
+}
+
+// MediaItem interface implementation for TVShow
+func (t TVShow) GetRatingKey() string { return t.RatingKey.String() }
+func (t TVShow) GetTitle() string { return t.Title }
+func (t TVShow) GetYear() int { return t.Year }
+func (t TVShow) GetGuid() []Guid { return []Guid(t.Guid) }
+func (t TVShow) GetMedia() []Media { return t.Media }
+func (t TVShow) GetLabel() []Label { return t.Label }
+func (t TVShow) GetGenre() []Genre { return t.Genre }
+
+// Label represents a Plex label
+type Label struct {
+ Tag string `json:"tag"`
+}
+
+// Genre represents a Plex genre
+type Genre struct {
+ Tag string `json:"tag"`
+}
+
+// Guid represents a Plex GUID
+type Guid struct {
+ ID string `json:"id"`
+}
+
+// Media represents Plex media information
+type Media struct {
+ Part []Part `json:"Part,omitempty"`
+}
+
+// Part represents a media part with file information
+type Part struct {
+ File string `json:"file,omitempty"`
+ Size int64 `json:"size,omitempty"`
+}
+
+// FlexibleGuid handles both string and array formats from Plex API
+type FlexibleGuid []Guid
+
+func (fg *FlexibleGuid) UnmarshalJSON(data []byte) error {
+ // Try to unmarshal as array first
+ var guidArray []Guid
+ if err := json.Unmarshal(data, &guidArray); err == nil {
+ *fg = FlexibleGuid(guidArray)
+ return nil
+ }
+
+ // If that fails, try as single string
+ var guidString string
+ if err := json.Unmarshal(data, &guidString); err == nil {
+ *fg = FlexibleGuid([]Guid{{ID: guidString}})
+ return nil
+ }
+
+ // If both fail, try as single Guid object
+ var singleGuid Guid
+ if err := json.Unmarshal(data, &singleGuid); err == nil {
+ *fg = FlexibleGuid([]Guid{singleGuid})
+ return nil
+ }
+
+ return fmt.Errorf("cannot unmarshal Guid field")
+}
+
+// FlexibleRatingKey can handle both string and integer rating key values
+type FlexibleRatingKey struct {
+ Value string
+}
+
+// UnmarshalJSON implements custom JSON unmarshaling for FlexibleRatingKey
+func (frk *FlexibleRatingKey) UnmarshalJSON(data []byte) error {
+ // Try to unmarshal as a string first
+ var stringValue string
+ if err := json.Unmarshal(data, &stringValue); err == nil {
+ frk.Value = stringValue
+ return nil
+ }
+
+ // If that fails, try to unmarshal as an integer and convert to string
+ var intValue int
+ if err := json.Unmarshal(data, &intValue); err == nil {
+ frk.Value = fmt.Sprintf("%d", intValue)
+ return nil
+ }
+
+ // If both fail, return error
+ return fmt.Errorf("cannot unmarshal %s into FlexibleRatingKey", string(data))
+}
+
+// MarshalJSON implements custom JSON marshaling for FlexibleRatingKey
+func (frk FlexibleRatingKey) MarshalJSON() ([]byte, error) {
+ return json.Marshal(frk.Value)
+}
+
+// String returns the string representation of the rating key
+func (frk FlexibleRatingKey) String() string {
+ return frk.Value
+}
+
+// FlexibleRating can handle both single rating values and arrays of ratings
+type FlexibleRating struct {
+ Value float64
+}
+
+// UnmarshalJSON implements custom JSON unmarshaling for FlexibleRating
+func (fr *FlexibleRating) UnmarshalJSON(data []byte) error {
+ // Try to unmarshal as a single float64 first
+ var singleValue float64
+ if err := json.Unmarshal(data, &singleValue); err == nil {
+ fr.Value = singleValue
+ return nil
+ }
+
+ // If that fails, try to unmarshal as an array and take the first value
+ var arrayValue []float64
+ if err := json.Unmarshal(data, &arrayValue); err == nil {
+ if len(arrayValue) > 0 {
+ fr.Value = arrayValue[0]
+ }
+ return nil
+ }
+
+ // If both fail, set to 0
+ fr.Value = 0
+ return nil
+}
+
+// MarshalJSON implements custom JSON marshaling for FlexibleRating
+func (fr FlexibleRating) MarshalJSON() ([]byte, error) {
+ return json.Marshal(fr.Value)
+}
+
+// MediaContainer holds metadata for movies or TV shows
+type MediaContainer struct {
+ Size int `json:"size"`
+ Metadata []Movie `json:"Metadata"`
+}
+
+// TVShowContainer holds metadata for TV shows
+type TVShowContainer struct {
+ Size int `json:"size"`
+ Metadata []TVShow `json:"Metadata"`
+}
+
+// PlexResponse represents a standard Plex API response for movies
+type PlexResponse struct {
+ MediaContainer MediaContainer `json:"MediaContainer"`
+}
+
+// TVShowResponse represents a Plex API response for TV shows
+type TVShowResponse struct {
+ MediaContainer TVShowContainer `json:"MediaContainer"`
+}
+
+// Episode represents a Plex episode
+type Episode struct {
+ RatingKey FlexibleRatingKey `json:"ratingKey"`
+ Title string `json:"title"`
+ TitleSort string `json:"titleSort,omitempty"`
+ Summary string `json:"summary,omitempty"`
+ Index int `json:"index"` // Episode number
+ ParentIndex int `json:"parentIndex"` // Season number
+ Year int `json:"year,omitempty"`
+ Duration int `json:"duration,omitempty"`
+ ContentRating string `json:"contentRating,omitempty"`
+ Rating FlexibleRating `json:"rating,omitempty"`
+ AudienceRating FlexibleRating `json:"audienceRating,omitempty"`
+ UserRating FlexibleRating `json:"userRating,omitempty"`
+ OriginallyAvailableAt string `json:"originallyAvailableAt,omitempty"`
+ AddedAt int `json:"addedAt,omitempty"`
+ UpdatedAt int `json:"updatedAt,omitempty"`
+ Thumb string `json:"thumb,omitempty"`
+ Art string `json:"art,omitempty"`
+ ChapterSource string `json:"chapterSource,omitempty"`
+ GrandparentTitle string `json:"grandparentTitle,omitempty"` // Show title
+ GrandparentThumb string `json:"grandparentThumb,omitempty"`
+ GrandparentArt string `json:"grandparentArt,omitempty"`
+ GrandparentKey string `json:"grandparentKey,omitempty"`
+ GrandparentRatingKey FlexibleRatingKey `json:"grandparentRatingKey,omitempty"`
+ GrandparentGuid string `json:"grandparentGuid,omitempty"`
+ GrandparentSlug string `json:"grandparentSlug,omitempty"`
+ GrandparentTheme string `json:"grandparentTheme,omitempty"`
+ ParentTitle string `json:"parentTitle,omitempty"` // Season title
+ ParentThumb string `json:"parentThumb,omitempty"`
+ ParentKey string `json:"parentKey,omitempty"`
+ ParentRatingKey FlexibleRatingKey `json:"parentRatingKey,omitempty"`
+ ParentGuid string `json:"parentGuid,omitempty"`
+ ParentYear int `json:"parentYear,omitempty"`
+ SkipParent bool `json:"skipParent,omitempty"`
+ SourceURI string `json:"sourceURI,omitempty"`
+ Label []Label `json:"Label,omitempty"`
+ Genre []Genre `json:"Genre,omitempty"`
+ Director []Director `json:"Director,omitempty"`
+ Writer []Writer `json:"Writer,omitempty"`
+ Producer []Producer `json:"Producer,omitempty"`
+ Role []Role `json:"Role,omitempty"`
+ Collection []Collection `json:"Collection,omitempty"`
+ Guid FlexibleGuid `json:"Guid,omitempty"`
+ Media []Media `json:"Media,omitempty"`
+}
+
+// EpisodeContainer holds metadata for episodes
+type EpisodeContainer struct {
+ Size int `json:"size"`
+ Metadata []Episode `json:"Metadata"`
+}
+
+// EpisodeResponse represents a Plex API response for episodes
+type EpisodeResponse struct {
+ MediaContainer EpisodeContainer `json:"MediaContainer"`
+}
+
+// WatchedState represents the watched state of a media item
+type WatchedState struct {
+ Watched bool `json:"watched"`
+ ViewCount int `json:"viewCount"`
+ ViewOffset int `json:"viewOffset"`
+ LastViewedAt int `json:"lastViewedAt"`
+}
+
+// Activity represents a Plex server activity (like library scanning)
+type Activity struct {
+ UUID string `xml:"uuid,attr" json:"uuid"`
+ Type string `xml:"type,attr" json:"type"`
+ Cancellable int `xml:"cancellable,attr" json:"cancellable"`
+ UserID int `xml:"userID,attr" json:"userID"`
+ Title string `xml:"title,attr" json:"title"`
+ Subtitle string `xml:"subtitle,attr" json:"subtitle"`
+ Progress int `xml:"progress,attr" json:"progress"`
+ Context *ActivityContext `xml:"Context" json:"context,omitempty"`
+}
+
+// ActivityContext provides additional context for activities
+type ActivityContext struct {
+ LibrarySectionID string `xml:"librarySectionID,attr" json:"librarySectionID"`
+}
+
+// ActivitiesResponse represents the response from /activities endpoint
+type ActivitiesResponse struct {
+ XMLName xml.Name `xml:"MediaContainer"`
+ Size int `xml:"size,attr" json:"size"`
+ Activities []Activity `xml:"Activity" json:"activities"`
+}
+
+// Director represents a Plex director
+type Director struct {
+ Tag string `json:"tag"`
+}
+
+// Writer represents a Plex writer
+type Writer struct {
+ Tag string `json:"tag"`
+}
+
+// Producer represents a Plex producer
+type Producer struct {
+ Tag string `json:"tag"`
+}
+
+// Role represents a Plex actor/role
+type Role struct {
+ Tag string `json:"tag"`
+ Role string `json:"role,omitempty"`
+ Thumb string `json:"thumb,omitempty"`
+}
+
+// Country represents a Plex country
+type Country struct {
+ Tag string `json:"tag"`
+}
+
+// Collection represents a Plex collection
+type Collection struct {
+ Tag string `json:"tag"`
+}
+
+// Location represents a Plex location
+type Location struct {
+ ID int `json:"id"`
+ Path string `json:"path"`
+}
diff --git a/internal/transfer/rsync.go b/internal/transfer/rsync.go
new file mode 100644
index 0000000..3d8d6d2
--- /dev/null
+++ b/internal/transfer/rsync.go
@@ -0,0 +1,489 @@
+package transfer
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/nullable-eth/syncarr/internal/config"
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/pkg/types"
+)
+
+// RsyncTransfer handles file transfers using rsync over SSH
+type RsyncTransfer struct {
+ sshConfig *config.SSHConfig
+ serverConfig *config.PlexServerConfig
+ sourceReplaceFrom string
+ sourceReplaceTo string
+ destRootDir string
+ logger *logger.Logger
+ compressionLevel int // 0-9, 0=none, 6=default, 9=max
+ parallelStreams int // Number of parallel rsync streams
+ checksumSkip bool // Skip checksum verification for speed
+}
+
+// NewRsyncTransfer creates a new rsync transfer instance
+func NewRsyncTransfer(cfg *config.Config, log *logger.Logger) (*RsyncTransfer, error) {
+ return &RsyncTransfer{
+ sshConfig: &cfg.SSH,
+ serverConfig: &cfg.Destination,
+ sourceReplaceFrom: cfg.SourceReplaceFrom,
+ sourceReplaceTo: cfg.SourceReplaceTo,
+ destRootDir: cfg.DestRootDir,
+ logger: log,
+ compressionLevel: 1, // Light compression for speed vs bandwidth balance
+ parallelStreams: 4, // Multiple parallel streams
+ checksumSkip: true, // Skip checksums for max speed (trust network)
+ }, nil
+}
+
+// TransferFile transfers a single file using rsync
+func (r *RsyncTransfer) TransferFile(sourcePath, destPath string) error {
+ startTime := time.Now()
+
+ // Get source file info
+ fileInfo, err := os.Stat(sourcePath)
+ if err != nil {
+ return fmt.Errorf("failed to stat source file: %w", err)
+ }
+
+ // Log transfer start
+ r.logger.LogTransferStarted(sourcePath, destPath, fileInfo.Size())
+
+ // Ensure destination directory exists
+ if err := r.ensureDestinationDir(destPath); err != nil {
+ return fmt.Errorf("failed to create destination directory: %w", err)
+ }
+
+ // Build rsync command with optimizations
+ args := r.buildRsyncArgs(sourcePath, destPath)
+
+ cmd := exec.Command("rsync", args...)
+
+ // Capture output for debugging
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ r.logger.WithFields(map[string]interface{}{
+ "source_path": sourcePath,
+ "dest_path": destPath,
+ "rsync_args": strings.Join(args, " "),
+ "output": string(output),
+ }).Error("Rsync command failed")
+ return fmt.Errorf("rsync failed: %w", err)
+ }
+
+ duration := time.Since(startTime)
+ r.logger.LogTransferCompleted(sourcePath, destPath, fileInfo.Size(), duration)
+
+ return nil
+}
+
+// TransferFiles transfers multiple files using rsync (can batch for efficiency)
+func (r *RsyncTransfer) TransferFiles(files []types.FileTransfer) error {
+ // For small numbers of files, transfer individually
+ if len(files) <= 3 {
+ for _, file := range files {
+ if err := r.TransferFile(file.SourcePath, file.DestPath); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // For larger batches, use rsync's batch capabilities
+ return r.transferFilesBatch(files)
+}
+
+// buildRsyncArgs builds optimized rsync arguments
+func (r *RsyncTransfer) buildRsyncArgs(sourcePath, destPath string) []string {
+ remoteHost := fmt.Sprintf("%s@%s", r.sshConfig.User, r.serverConfig.Host)
+ remoteDest := fmt.Sprintf("%s:%s", remoteHost, destPath)
+
+ args := []string{
+ "-avz", // Archive mode, verbose, compression
+ "--progress", // Show progress
+ "--partial", // Keep partial transfers
+ "--inplace", // Update files in place (faster for large files)
+ }
+
+ // Compression settings
+ if r.compressionLevel > 0 {
+ args = append(args, fmt.Sprintf("--compress-level=%d", r.compressionLevel))
+ } else {
+ // Remove compression if level is 0
+ args[0] = "-av"
+ }
+
+ // Skip checksum verification for speed
+ if r.checksumSkip {
+ args = append(args, "--no-whole-file", "--no-compress")
+ }
+
+ // SSH options for performance
+ sshOpts := []string{
+ "-o", "Compression=no", // Handle compression in rsync, not SSH
+ "-o", "TCPKeepAlive=yes",
+ "-o", "ServerAliveInterval=30",
+ "-o", "ServerAliveCountMax=6",
+ "-o", "StrictHostKeyChecking=no",
+ }
+
+ if r.sshConfig.Port != "" && r.sshConfig.Port != "22" {
+ sshOpts = append(sshOpts, "-p", r.sshConfig.Port)
+ }
+
+ // Build SSH command - use sshpass for password authentication
+ var sshCmd string
+ if r.sshConfig.Password != "" {
+ sshCmd = fmt.Sprintf("sshpass -p '%s' ssh %s", r.sshConfig.Password, strings.Join(sshOpts, " "))
+ r.logger.Debug("Using sshpass for SSH password authentication")
+ } else {
+ sshCmd = fmt.Sprintf("ssh %s", strings.Join(sshOpts, " "))
+ r.logger.Debug("Using SSH key-based authentication")
+ }
+
+ args = append(args, "-e", sshCmd)
+ args = append(args, sourcePath, remoteDest)
+
+ return args
+}
+
+// transferFilesBatch transfers multiple files in batches for efficiency
+func (r *RsyncTransfer) transferFilesBatch(files []types.FileTransfer) error {
+ // Group files by directory for more efficient transfers
+ dirGroups := make(map[string][]types.FileTransfer)
+
+ for _, file := range files {
+ sourceDir := filepath.Dir(file.SourcePath)
+ dirGroups[sourceDir] = append(dirGroups[sourceDir], file)
+ }
+
+ // Transfer each directory group
+ for sourceDir, dirFiles := range dirGroups {
+ if err := r.transferDirectoryBatch(sourceDir, dirFiles); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// transferDirectoryBatch transfers all files in a directory efficiently
+func (r *RsyncTransfer) transferDirectoryBatch(sourceDir string, files []types.FileTransfer) error {
+ if len(files) == 0 {
+ return nil
+ }
+
+ // Create include file for specific files
+ includeFile, err := r.createIncludeFile(sourceDir, files)
+ if err != nil {
+ return fmt.Errorf("failed to create include file: %w", err)
+ }
+ defer os.Remove(includeFile)
+
+ // Use first file's destination to determine target directory
+ destDir := filepath.Dir(files[0].DestPath)
+
+ // Ensure destination directory exists for all files in batch
+ for _, file := range files {
+ if err := r.ensureDestinationDir(file.DestPath); err != nil {
+ return fmt.Errorf("failed to create destination directory for %s: %w", file.DestPath, err)
+ }
+ }
+
+ remoteHost := fmt.Sprintf("%s@%s", r.sshConfig.User, r.serverConfig.Host)
+ remoteDest := fmt.Sprintf("%s:%s/", remoteHost, destDir)
+
+ args := []string{
+ "-avz",
+ "--progress",
+ "--partial",
+ "--inplace",
+ fmt.Sprintf("--include-from=%s", includeFile),
+ "--exclude=*", // Exclude everything not in include file
+ }
+
+ // Add SSH options
+ sshOpts := []string{
+ "-o", "Compression=no",
+ "-o", "TCPKeepAlive=yes",
+ "-o", "StrictHostKeyChecking=no",
+ }
+
+ if r.sshConfig.Port != "" && r.sshConfig.Port != "22" {
+ sshOpts = append(sshOpts, "-p", r.sshConfig.Port)
+ }
+
+ // Build SSH command - use sshpass for password authentication
+ var sshCmd string
+ if r.sshConfig.Password != "" {
+ sshCmd = fmt.Sprintf("sshpass -p '%s' ssh %s", r.sshConfig.Password, strings.Join(sshOpts, " "))
+ r.logger.Debug("Using sshpass for batch transfer with SSH password authentication")
+ } else {
+ sshCmd = fmt.Sprintf("ssh %s", strings.Join(sshOpts, " "))
+ r.logger.Debug("Using SSH key-based authentication for batch transfer")
+ }
+
+ args = append(args, "-e", sshCmd)
+ args = append(args, sourceDir+"/", remoteDest)
+
+ cmd := exec.Command("rsync", args...)
+ output, err := cmd.CombinedOutput()
+
+ if err != nil {
+ r.logger.WithFields(map[string]interface{}{
+ "source_dir": sourceDir,
+ "dest_dir": destDir,
+ "file_count": len(files),
+ "rsync_args": strings.Join(args, " "),
+ "output": string(output),
+ }).Error("Batch rsync failed")
+ return fmt.Errorf("batch rsync failed: %w", err)
+ }
+
+ return nil
+}
+
+// createIncludeFile creates a temporary file listing specific files to include
+func (r *RsyncTransfer) createIncludeFile(baseDir string, files []types.FileTransfer) (string, error) {
+ tmpFile, err := os.CreateTemp("", "rsync-include-*.txt")
+ if err != nil {
+ return "", err
+ }
+ defer tmpFile.Close()
+
+ for _, file := range files {
+ // Get relative path from base directory
+ relPath, err := filepath.Rel(baseDir, file.SourcePath)
+ if err != nil {
+ return "", fmt.Errorf("failed to get relative path: %w", err)
+ }
+
+ // Write to include file
+ if _, err := fmt.Fprintln(tmpFile, relPath); err != nil {
+ return "", err
+ }
+ }
+
+ return tmpFile.Name(), nil
+}
+
+// Close is a no-op for rsync (no persistent connections)
+func (r *RsyncTransfer) Close() error {
+ return nil
+}
+
+// DeleteFile deletes a file on the remote server using SSH
+func (r *RsyncTransfer) DeleteFile(path string) error {
+ remoteHost := fmt.Sprintf("%s@%s", r.sshConfig.User, r.serverConfig.Host)
+
+ sshCmd := []string{
+ "ssh",
+ "-o", "StrictHostKeyChecking=no",
+ "-o", "ConnectTimeout=10",
+ }
+
+ if r.sshConfig.Port != "" && r.sshConfig.Port != "22" {
+ sshCmd = append(sshCmd, "-p", r.sshConfig.Port)
+ }
+
+ sshCmd = append(sshCmd, remoteHost, fmt.Sprintf("rm -f '%s'", path))
+
+ cmd := exec.Command(sshCmd[0], sshCmd[1:]...)
+ return cmd.Run()
+}
+
+// ListDirectoryContents recursively lists all files in a directory
+func (r *RsyncTransfer) ListDirectoryContents(rootPath string) ([]string, error) {
+ remoteHost := fmt.Sprintf("%s@%s", r.sshConfig.User, r.serverConfig.Host)
+
+ sshCmd := []string{
+ "ssh",
+ "-o", "StrictHostKeyChecking=no",
+ "-o", "ConnectTimeout=10",
+ }
+
+ if r.sshConfig.Port != "" && r.sshConfig.Port != "22" {
+ sshCmd = append(sshCmd, "-p", r.sshConfig.Port)
+ }
+
+ sshCmd = append(sshCmd, remoteHost, fmt.Sprintf("find '%s' -type f", rootPath))
+
+ cmd := exec.Command(sshCmd[0], sshCmd[1:]...)
+ output, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+
+ files := strings.Split(strings.TrimSpace(string(output)), "\n")
+ if len(files) == 1 && files[0] == "" {
+ return []string{}, nil
+ }
+
+ r.logger.WithFields(map[string]interface{}{
+ "root_path": rootPath,
+ "file_count": len(files),
+ }).Debug("Listed directory contents")
+
+ return files, nil
+}
+
+// FileExists checks if a file exists on the remote server using SSH
+func (r *RsyncTransfer) FileExists(path string) (bool, error) {
+ remoteHost := fmt.Sprintf("%s@%s", r.sshConfig.User, r.serverConfig.Host)
+
+ sshCmd := []string{
+ "ssh",
+ "-o", "StrictHostKeyChecking=no",
+ "-o", "ConnectTimeout=10",
+ }
+
+ if r.sshConfig.Port != "" && r.sshConfig.Port != "22" {
+ sshCmd = append(sshCmd, "-p", r.sshConfig.Port)
+ }
+
+ sshCmd = append(sshCmd, remoteHost, fmt.Sprintf("test -f '%s'", path))
+
+ cmd := exec.Command(sshCmd[0], sshCmd[1:]...)
+ err := cmd.Run()
+
+ return err == nil, nil
+}
+
+// GetFileSize returns the size of a remote file
+func (r *RsyncTransfer) GetFileSize(path string) (int64, error) {
+ remoteHost := fmt.Sprintf("%s@%s", r.sshConfig.User, r.serverConfig.Host)
+
+ sshCmd := []string{
+ "ssh",
+ "-o", "StrictHostKeyChecking=no",
+ "-o", "ConnectTimeout=10",
+ }
+
+ if r.sshConfig.Port != "" && r.sshConfig.Port != "22" {
+ sshCmd = append(sshCmd, "-p", r.sshConfig.Port)
+ }
+
+ sshCmd = append(sshCmd, remoteHost, fmt.Sprintf("stat -f%%z '%s' 2>/dev/null || stat -c%%s '%s'", path, path))
+
+ cmd := exec.Command(sshCmd[0], sshCmd[1:]...)
+ output, err := cmd.Output()
+ if err != nil {
+ return 0, err
+ }
+
+ var size int64
+ if _, err := fmt.Sscanf(strings.TrimSpace(string(output)), "%d", &size); err != nil {
+ return 0, err
+ }
+
+ return size, nil
+}
+
+// MapSourcePathToLocal converts a source Plex server path to a local filesystem path
+func (r *RsyncTransfer) MapSourcePathToLocal(sourcePath string) (string, error) {
+ if sourcePath == "" {
+ return "", fmt.Errorf("source path is empty")
+ }
+
+ // If no source replacement configured, use the Plex path as-is
+ if r.sourceReplaceFrom == "" || r.sourceReplaceTo == "" {
+ return filepath.FromSlash(sourcePath), nil
+ }
+
+ // Apply source replacement pattern
+ sourcePathNorm := filepath.ToSlash(sourcePath)
+ sourceReplaceFromNorm := filepath.ToSlash(r.sourceReplaceFrom)
+
+ if !strings.HasPrefix(sourcePathNorm, sourceReplaceFromNorm) {
+ return "", fmt.Errorf("source path %s does not start with replacement pattern %s", sourcePath, r.sourceReplaceFrom)
+ }
+
+ relativePath := strings.TrimPrefix(sourcePathNorm, sourceReplaceFromNorm)
+ relativePath = strings.TrimPrefix(relativePath, "/")
+
+ localPath := filepath.Join(r.sourceReplaceTo, relativePath)
+ return localPath, nil
+}
+
+// MapLocalPathToDest converts a local filesystem path to a destination server path
+func (r *RsyncTransfer) MapLocalPathToDest(localPath string) (string, error) {
+ if localPath == "" {
+ return "", fmt.Errorf("local path is empty")
+ }
+
+ if r.destRootDir == "" {
+ return "", fmt.Errorf("destination root directory not configured")
+ }
+
+ var relativePath string
+
+ if r.sourceReplaceTo != "" {
+ localPathNorm := filepath.ToSlash(localPath)
+ sourceReplaceToNorm := filepath.ToSlash(r.sourceReplaceTo)
+
+ if !strings.HasPrefix(localPathNorm, sourceReplaceToNorm) {
+ return "", fmt.Errorf("local path %s does not start with source replacement root %s", localPath, r.sourceReplaceTo)
+ }
+
+ relativePath = strings.TrimPrefix(localPathNorm, sourceReplaceToNorm)
+ relativePath = strings.TrimPrefix(relativePath, "/")
+ } else {
+ relativePath = filepath.Base(localPath)
+ }
+
+ destPath := strings.TrimSuffix(r.destRootDir, "/") + "/" + relativePath
+ return destPath, nil
+}
+
+// ensureDestinationDir creates the destination directory on the remote server if it doesn't exist
+func (r *RsyncTransfer) ensureDestinationDir(destPath string) error {
+ // Extract directory from destination path
+ destDir := filepath.Dir(destPath)
+
+ // Build SSH command to create directory
+ remoteHost := fmt.Sprintf("%s@%s", r.sshConfig.User, r.serverConfig.Host)
+ mkdirCmd := fmt.Sprintf("mkdir -p '%s'", destDir)
+
+ // SSH options
+ sshOpts := []string{
+ "-o", "StrictHostKeyChecking=no",
+ "-o", "ConnectTimeout=10",
+ }
+
+ if r.sshConfig.Port != "" && r.sshConfig.Port != "22" {
+ sshOpts = append(sshOpts, "-p", r.sshConfig.Port)
+ }
+
+ // Build command with authentication
+ var cmd *exec.Cmd
+ if r.sshConfig.Password != "" {
+ // Use sshpass for password authentication
+ args := append([]string{"-p", r.sshConfig.Password, "ssh"}, sshOpts...)
+ args = append(args, remoteHost, mkdirCmd)
+ cmd = exec.Command("sshpass", args...)
+ r.logger.WithField("dest_dir", destDir).Debug("Creating remote directory with sshpass")
+ } else {
+ // Use SSH key-based authentication
+ args := append(sshOpts, remoteHost, mkdirCmd)
+ cmd = exec.Command("ssh", args...)
+ r.logger.WithField("dest_dir", destDir).Debug("Creating remote directory with SSH keys")
+ }
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ r.logger.WithFields(map[string]interface{}{
+ "dest_dir": destDir,
+ "output": string(output),
+ }).Warn("Failed to create remote directory (may already exist)")
+ // Don't return error - directory might already exist
+ } else {
+ r.logger.WithField("dest_dir", destDir).Debug("Remote directory created successfully")
+ }
+
+ return nil
+}
diff --git a/internal/transfer/scp.go b/internal/transfer/scp.go
new file mode 100644
index 0000000..a8df284
--- /dev/null
+++ b/internal/transfer/scp.go
@@ -0,0 +1,585 @@
+package transfer
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/nullable-eth/syncarr/internal/config"
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/pkg/types"
+ "github.com/pkg/sftp"
+ "golang.org/x/crypto/ssh"
+)
+
+// SCPTransfer handles file transfers using SCP over SSH
+type SCPTransfer struct {
+ sshConfig *config.SSHConfig
+ serverConfig *config.PlexServerConfig
+ sourceReplaceFrom string // Optional: Source path pattern to replace
+ sourceReplaceTo string // Optional: Local path replacement
+ destRootDir string // Required: Destination root directory
+ logger *logger.Logger
+ sshClient *ssh.Client
+ sftpClient *sftp.Client
+ bufferSize int // Buffer size for transfers
+ maxConcurrent int // Maximum concurrent transfers
+}
+
+// NewSCPTransfer creates a new SCP transfer instance
+func NewSCPTransfer(cfg *config.Config, log *logger.Logger) (*SCPTransfer, error) {
+ transfer := &SCPTransfer{
+ sshConfig: &cfg.SSH,
+ serverConfig: &cfg.Destination,
+ sourceReplaceFrom: cfg.SourceReplaceFrom,
+ sourceReplaceTo: cfg.SourceReplaceTo,
+ destRootDir: cfg.DestRootDir,
+ logger: log,
+ bufferSize: 1024 * 1024, // 1MB buffer for better performance
+ maxConcurrent: 3, // Allow up to 3 concurrent transfers
+ }
+
+ // Establish SSH connection
+ if err := transfer.connect(); err != nil {
+ return nil, fmt.Errorf("failed to establish SSH connection: %w", err)
+ }
+
+ return transfer, nil
+}
+
+// connect establishes SSH and SFTP connections
+func (s *SCPTransfer) connect() error {
+ // Create SSH client configuration with password authentication and optimized settings
+ sshClientConfig := &ssh.ClientConfig{
+ User: s.sshConfig.User,
+ Auth: []ssh.AuthMethod{
+ ssh.Password(s.sshConfig.Password),
+ },
+ HostKeyCallback: ssh.InsecureIgnoreHostKey(), // For simplicity, ignore host key verification
+ Timeout: 30 * time.Second,
+ // Optimize for high throughput
+ Config: ssh.Config{
+ Ciphers: []string{
+ "aes128-ctr", "aes192-ctr", "aes256-ctr", // Faster AES-CTR ciphers
+ "aes128-gcm@openssh.com", "aes256-gcm@openssh.com",
+ },
+ },
+ }
+
+ // Connect to SSH server using destination Plex server host
+ sshAddr := fmt.Sprintf("%s:%s", s.serverConfig.Host, s.sshConfig.Port)
+ sshClient, err := ssh.Dial("tcp", sshAddr, sshClientConfig)
+ if err != nil {
+ return fmt.Errorf("failed to connect to SSH server %s: %w", sshAddr, err)
+ }
+ s.sshClient = sshClient
+
+ // Create SFTP client
+ sftpClient, err := sftp.NewClient(sshClient)
+ if err != nil {
+ if closeErr := sshClient.Close(); closeErr != nil {
+ s.logger.WithError(closeErr).Warn("Failed to close SSH client after SFTP creation error")
+ }
+ return fmt.Errorf("failed to create SFTP client: %w", err)
+ }
+ s.sftpClient = sftpClient
+
+ s.logger.WithField("ssh_host", sshAddr).Info("Successfully connected to SSH/SFTP server")
+ return nil
+}
+
+// Close closes the SSH and SFTP connections
+func (s *SCPTransfer) Close() error {
+ var errs []error
+
+ if s.sftpClient != nil {
+ if err := s.sftpClient.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("failed to close SFTP client: %w", err))
+ }
+ }
+
+ if s.sshClient != nil {
+ if err := s.sshClient.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("failed to close SSH client: %w", err))
+ }
+ }
+
+ if len(errs) > 0 {
+ return fmt.Errorf("errors closing connections: %v", errs)
+ }
+
+ return nil
+}
+
+// TransferFile transfers a single file from source to destination
+func (s *SCPTransfer) TransferFile(sourcePath, destPath string) error {
+ startTime := time.Now()
+
+ // Get source file info first
+ fileInfo, err := os.Stat(sourcePath)
+ if err != nil {
+ return fmt.Errorf("failed to stat source file: %w", err)
+ }
+
+ // Check if destination file already exists
+ destExists, err := s.FileExists(destPath)
+ if err != nil {
+ s.logger.WithError(err).WithField("dest_path", destPath).Warn("Failed to check if destination file exists, proceeding with transfer")
+ } else if destExists {
+ // Check if sizes match - if so, skip transfer entirely
+ destSize, err := s.GetFileSize(destPath)
+ if err != nil {
+ s.logger.WithError(err).WithField("dest_path", destPath).Warn("Failed to get destination file size, proceeding with transfer")
+ } else if destSize == fileInfo.Size() {
+ // Files are the same size, return early without any transfer logging
+ return nil
+ }
+ }
+
+ // If we get here, we're actually going to transfer the file
+ s.logger.LogTransferStarted(sourcePath, destPath, fileInfo.Size())
+
+ // Create destination directory if it doesn't exist
+ // Use forward slashes for remote paths (SFTP always uses Unix-style paths)
+ lastSlash := strings.LastIndex(destPath, "/")
+ if lastSlash == -1 {
+ return fmt.Errorf("invalid destination path format: %s", destPath)
+ }
+ destDir := destPath[:lastSlash]
+
+ s.logger.WithFields(map[string]interface{}{
+ "dest_dir": destDir,
+ "dest_path": destPath,
+ }).Debug("Creating destination directory")
+
+ if err := s.sftpClient.MkdirAll(destDir); err != nil {
+ return fmt.Errorf("failed to create destination directory %s: %w", destDir, err)
+ }
+
+ // Verify directory was created successfully
+ if dirInfo, err := s.sftpClient.Stat(destDir); err != nil {
+ return fmt.Errorf("destination directory %s was not created successfully: %w", destDir, err)
+ } else if !dirInfo.IsDir() {
+ return fmt.Errorf("destination path %s exists but is not a directory", destDir)
+ }
+
+ s.logger.WithField("dest_dir", destDir).Debug("Destination directory verified")
+
+ // Test write permissions by trying to create a temporary test file
+ testFilePath := destDir + "/.sync_test_" + fmt.Sprintf("%d", time.Now().UnixNano())
+ if testFile, testErr := s.sftpClient.Create(testFilePath); testErr != nil {
+ s.logger.WithFields(map[string]interface{}{
+ "dest_dir": destDir,
+ "test_file_path": testFilePath,
+ "test_error": testErr.Error(),
+ }).Error("Cannot create test file in destination directory - permissions issue?")
+ } else {
+ testFile.Close()
+ if removeErr := s.sftpClient.Remove(testFilePath); removeErr != nil {
+ s.logger.WithFields(map[string]interface{}{
+ "test_file_path": testFilePath,
+ "remove_error": removeErr.Error(),
+ }).Warn("Failed to clean up test file")
+ }
+ s.logger.WithField("dest_dir", destDir).Debug("Write permissions verified with test file")
+ }
+
+ // Open source file
+ srcFile, err := os.Open(sourcePath)
+ if err != nil {
+ return fmt.Errorf("failed to open source file: %w", err)
+ }
+ defer srcFile.Close()
+
+ // Create destination file with more detailed error context
+ s.logger.WithFields(map[string]interface{}{
+ "dest_path": destPath,
+ "dest_dir": destDir,
+ }).Debug("Creating destination file")
+
+ dstFile, err := s.sftpClient.Create(destPath)
+ if err != nil {
+ // Check if directory exists to provide better error context
+ if dirInfo, dirErr := s.sftpClient.Stat(destDir); dirErr != nil {
+ return fmt.Errorf("failed to create destination file %s: destination directory %s does not exist or is not accessible: %w (original error: %v)", destPath, destDir, dirErr, err)
+ } else if !dirInfo.IsDir() {
+ return fmt.Errorf("failed to create destination file %s: %s exists but is not a directory: %w", destPath, destDir, err)
+ } else {
+ // Directory exists, let's get more debugging info
+ s.logger.WithFields(map[string]interface{}{
+ "dest_dir": destDir,
+ "dir_mode": dirInfo.Mode().String(),
+ "dir_size": dirInfo.Size(),
+ "create_error": err.Error(),
+ }).Error("Directory exists but file creation failed")
+
+ // Try to list directory contents for debugging
+ if entries, listErr := s.sftpClient.ReadDir(destDir); listErr != nil {
+ s.logger.WithError(listErr).WithField("dest_dir", destDir).Debug("Could not list directory contents")
+ } else {
+ s.logger.WithFields(map[string]interface{}{
+ "dest_dir": destDir,
+ "entry_count": len(entries),
+ }).Debug("Directory contents listed successfully")
+ }
+
+ return fmt.Errorf("failed to create destination file %s in existing directory %s (mode: %s): %w", destPath, destDir, dirInfo.Mode().String(), err)
+ }
+ }
+ defer dstFile.Close()
+
+ // Copy file contents with optimized buffer
+ buffer := make([]byte, s.bufferSize)
+ bytesTransferred, err := io.CopyBuffer(dstFile, srcFile, buffer)
+ if err != nil {
+ return fmt.Errorf("failed to copy file contents: %w", err)
+ }
+
+ // Verify file size
+ if bytesTransferred != fileInfo.Size() {
+ return fmt.Errorf("file size mismatch: expected %d, transferred %d",
+ fileInfo.Size(), bytesTransferred)
+ }
+
+ duration := time.Since(startTime)
+ s.logger.LogTransferCompleted(sourcePath, destPath, bytesTransferred, duration)
+
+ return nil
+}
+
+// TransferItemFiles implements Phase 3: Directory-Based File Transfer
+// Copy all files in the containing directories (including subtitles) to the destination server
+func (s *SCPTransfer) TransferItemFiles(item *types.SyncableItem) error {
+ s.logger.WithField("item", s.getItemIdentifier(item)).Info("Phase 3: Starting directory-based file transfer")
+
+ // TODO: Uncomment when plexgo library implements proper Media and Part structures for file paths
+ // filePaths, err := s.getItemFilePaths(item)
+ // if err != nil {
+ // return fmt.Errorf("failed to get file paths for item: %w", err)
+ // }
+
+ // PLACEHOLDER: Generate placeholder file paths until proper implementation
+ filePaths := s.generatePlaceholderFilePaths(item)
+
+ if len(filePaths) == 0 {
+ s.logger.WithField("item", s.getItemIdentifier(item)).Warn("No file paths found for item")
+ return nil
+ }
+
+ // Transfer entire directories containing the files
+ processedDirs := make(map[string]bool)
+
+ for _, filePath := range filePaths {
+ sourceDir := filepath.Dir(filePath)
+
+ // Skip if we've already processed this directory
+ if processedDirs[sourceDir] {
+ continue
+ }
+ processedDirs[sourceDir] = true
+
+ destDir := s.calculateDestPath(sourceDir)
+
+ s.logger.WithFields(map[string]interface{}{
+ "source_dir": sourceDir,
+ "dest_dir": destDir,
+ }).Info("Transferring entire directory (includes subtitles, extras, etc.)")
+
+ // Copy entire directory (includes subtitles, extras, etc.)
+ if err := s.CopyDirectory(sourceDir, destDir); err != nil {
+ return fmt.Errorf("failed to copy directory %s to %s: %w", sourceDir, destDir, err)
+ }
+ }
+
+ s.logger.WithField("item", s.getItemIdentifier(item)).Info("Phase 3: Directory-based file transfer complete")
+ return nil
+}
+
+// TransferFiles transfers multiple files (legacy method, kept for compatibility)
+func (s *SCPTransfer) TransferFiles(files []types.FileTransfer) error {
+ for _, file := range files {
+ if err := s.TransferFile(file.SourcePath, file.DestPath); err != nil {
+ s.logger.LogError(err, map[string]interface{}{
+ "source_path": file.SourcePath,
+ "dest_path": file.DestPath,
+ })
+ return err
+ }
+ }
+ return nil
+}
+
+// TransferFilesParallel transfers multiple files in parallel for better performance
+func (s *SCPTransfer) TransferFilesParallel(files []types.FileTransfer) error {
+ if len(files) == 0 {
+ return nil
+ }
+
+ // Use a semaphore to limit concurrent transfers
+ semaphore := make(chan struct{}, s.maxConcurrent)
+ errChan := make(chan error, len(files))
+
+ var wg sync.WaitGroup
+
+ for _, file := range files {
+ wg.Add(1)
+ go func(f types.FileTransfer) {
+ defer wg.Done()
+
+ // Acquire semaphore
+ semaphore <- struct{}{}
+ defer func() { <-semaphore }()
+
+ if err := s.TransferFile(f.SourcePath, f.DestPath); err != nil {
+ s.logger.LogError(err, map[string]interface{}{
+ "source_path": f.SourcePath,
+ "dest_path": f.DestPath,
+ })
+ errChan <- err
+ return
+ }
+ }(file)
+ }
+
+ // Wait for all transfers to complete
+ wg.Wait()
+ close(errChan)
+
+ // Check for any errors
+ for err := range errChan {
+ if err != nil {
+ return err // Return first error encountered
+ }
+ }
+
+ return nil
+}
+
+// FileExists checks if a file exists on the remote server
+func (s *SCPTransfer) FileExists(path string) (bool, error) {
+ _, err := s.sftpClient.Stat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
+
+// GetFileSize returns the size of a remote file
+func (s *SCPTransfer) GetFileSize(path string) (int64, error) {
+ stat, err := s.sftpClient.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ return stat.Size(), nil
+}
+
+// DeleteFile deletes a file on the remote server
+func (s *SCPTransfer) DeleteFile(path string) error {
+ return s.sftpClient.Remove(path)
+}
+
+// CreateDirectory creates a directory on the remote server
+func (s *SCPTransfer) CreateDirectory(path string) error {
+ return s.sftpClient.MkdirAll(path)
+}
+
+// ListDirectoryContents recursively lists all files in a directory
+func (s *SCPTransfer) ListDirectoryContents(rootPath string) ([]string, error) {
+ var allFiles []string
+
+ err := s.walkDirectory(rootPath, func(path string, info os.FileInfo) error {
+ if !info.IsDir() {
+ allFiles = append(allFiles, path)
+ }
+ return nil
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to walk directory: %w", err)
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "root_path": rootPath,
+ "file_count": len(allFiles),
+ }).Debug("Listed directory contents")
+
+ return allFiles, nil
+}
+
+// walkDirectory recursively walks a directory tree on the remote server
+func (s *SCPTransfer) walkDirectory(path string, walkFunc func(path string, info os.FileInfo) error) error {
+ entries, err := s.sftpClient.ReadDir(path)
+ if err != nil {
+ return fmt.Errorf("failed to read directory %s: %w", path, err)
+ }
+
+ for _, entry := range entries {
+ entryPath := strings.TrimRight(path, "/") + "/" + entry.Name()
+
+ // Call the walk function for this entry
+ if err := walkFunc(entryPath, entry); err != nil {
+ return err
+ }
+
+ // If it's a directory, recursively walk it
+ if entry.IsDir() {
+ if err := s.walkDirectory(entryPath, walkFunc); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// CopyDirectory copies an entire directory from source to destination
+func (s *SCPTransfer) CopyDirectory(sourceDir, destDir string) error {
+ s.logger.WithFields(map[string]interface{}{
+ "source_dir": sourceDir,
+ "dest_dir": destDir,
+ }).Info("Starting directory copy")
+
+ // TODO: Implement recursive directory copying
+ // This would involve:
+ // 1. Walking the source directory tree
+ // 2. Creating destination directories
+ // 3. Copying all files including subtitles, extras, etc.
+
+ // PLACEHOLDER: Just create the destination directory for now
+ if err := s.sftpClient.MkdirAll(destDir); err != nil {
+ return fmt.Errorf("failed to create destination directory: %w", err)
+ }
+
+ s.logger.WithField("dest_dir", destDir).Warn("Directory copy not fully implemented - only created destination directory")
+ return nil
+}
+
+// calculateDestPath generates the destination path for a source directory
+func (s *SCPTransfer) calculateDestPath(sourceDir string) string {
+ // TODO: Implement proper path mapping based on configuration
+ // This should map source paths to destination paths based on:
+ // - DEST_MEDIA_PATH configuration
+ // - Library-specific path mappings
+ // - Volume mount configurations
+
+ // PLACEHOLDER: Simple path mapping
+ baseName := filepath.Base(sourceDir)
+ destPath := filepath.Join("/media/sync", baseName)
+
+ s.logger.WithFields(map[string]interface{}{
+ "source_dir": sourceDir,
+ "dest_path": destPath,
+ }).Debug("Generated destination path (placeholder logic)")
+
+ return destPath
+}
+
+// generatePlaceholderFilePaths generates placeholder file paths for testing
+func (s *SCPTransfer) generatePlaceholderFilePaths(item *types.SyncableItem) []string {
+ // PLACEHOLDER: Generate fake file paths until proper file path extraction is available
+ title := item.Title
+ if title == "" {
+ title = "unknown"
+ }
+
+ // Generate placeholder paths
+ placeholderPaths := []string{
+ fmt.Sprintf("/media/source/%s/%s.mkv", title, title),
+ fmt.Sprintf("/media/source/%s/%s.srt", title, title), // subtitle file
+ }
+
+ s.logger.WithFields(map[string]interface{}{
+ "item_title": title,
+ "placeholder_paths": placeholderPaths,
+ }).Debug("Generated placeholder file paths - not real file paths")
+
+ return placeholderPaths
+}
+
+// getItemIdentifier returns a string identifier for the item for logging purposes
+func (s *SCPTransfer) getItemIdentifier(item *types.SyncableItem) string {
+ identifier := item.RatingKey
+ if identifier == "" {
+ identifier = item.Title
+ }
+ if identifier == "" {
+ identifier = "unknown"
+ }
+
+ return identifier
+}
+
+// MapSourcePathToLocal converts a source Plex server path to a local filesystem path
+// If source replacement is configured, applies the pattern replacement
+// Otherwise, uses the Plex path as-is (useful for mounted volumes)
+func (s *SCPTransfer) MapSourcePathToLocal(sourcePath string) (string, error) {
+ if sourcePath == "" {
+ return "", fmt.Errorf("source path is empty")
+ }
+
+ // If no source replacement configured, use the Plex path as-is
+ if s.sourceReplaceFrom == "" || s.sourceReplaceTo == "" {
+ // Convert to local path separators for the current OS
+ localPath := filepath.FromSlash(sourcePath)
+ return localPath, nil
+ }
+
+ // Apply source replacement pattern
+ // Normalize paths for comparison (always use forward slashes)
+ sourcePathNorm := filepath.ToSlash(sourcePath)
+ sourceReplaceFromNorm := filepath.ToSlash(s.sourceReplaceFrom)
+
+ // Check if the source path starts with the replacement pattern
+ if !strings.HasPrefix(sourcePathNorm, sourceReplaceFromNorm) {
+ return "", fmt.Errorf("source path %s does not start with replacement pattern %s", sourcePath, s.sourceReplaceFrom)
+ }
+
+ // Remove the source pattern and replace with local pattern
+ relativePath := strings.TrimPrefix(sourcePathNorm, sourceReplaceFromNorm)
+ relativePath = strings.TrimPrefix(relativePath, "/") // Remove leading slash
+
+ // Build local path with proper separators for the target OS
+ localPath := filepath.Join(s.sourceReplaceTo, relativePath)
+ return localPath, nil
+}
+
+// MapLocalPathToDest converts a local filesystem path to a destination server path
+func (s *SCPTransfer) MapLocalPathToDest(localPath string) (string, error) {
+ if localPath == "" {
+ return "", fmt.Errorf("local path is empty")
+ }
+
+ if s.destRootDir == "" {
+ return "", fmt.Errorf("destination root directory not configured")
+ }
+
+ // Extract the relative path from local path
+ var relativePath string
+
+ if s.sourceReplaceTo != "" {
+ // If source replacement is configured, extract relative path from the replacement root
+ localPathNorm := filepath.ToSlash(localPath)
+ sourceReplaceToNorm := filepath.ToSlash(s.sourceReplaceTo)
+
+ if !strings.HasPrefix(localPathNorm, sourceReplaceToNorm) {
+ return "", fmt.Errorf("local path %s does not start with source replacement root %s", localPath, s.sourceReplaceTo)
+ }
+
+ relativePath = strings.TrimPrefix(localPathNorm, sourceReplaceToNorm)
+ relativePath = strings.TrimPrefix(relativePath, "/") // Remove leading slash
+ } else {
+ // If no source replacement, extract filename from the full path
+ relativePath = filepath.Base(localPath)
+ }
+
+ // Build destination path (always use forward slashes for remote paths)
+ destPath := strings.TrimSuffix(s.destRootDir, "/") + "/" + relativePath
+ return destPath, nil
+}
diff --git a/internal/transfer/transfer.go b/internal/transfer/transfer.go
new file mode 100644
index 0000000..ec8bc3f
--- /dev/null
+++ b/internal/transfer/transfer.go
@@ -0,0 +1,159 @@
+package transfer
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/nullable-eth/syncarr/internal/config"
+ "github.com/nullable-eth/syncarr/internal/logger"
+ "github.com/nullable-eth/syncarr/pkg/types"
+)
+
+// TransferMethod represents different transfer methods
+type TransferMethod string
+
+const (
+ TransferMethodSFTP TransferMethod = "sftp"
+ TransferMethodRsync TransferMethod = "rsync"
+)
+
+// FileTransferrer defines the interface for file transfer implementations
+type FileTransferrer interface {
+ TransferFile(sourcePath, destPath string) error
+ TransferFiles(files []types.FileTransfer) error
+ Close() error
+ FileExists(path string) (bool, error)
+ GetFileSize(path string) (int64, error)
+ DeleteFile(path string) error
+ ListDirectoryContents(rootPath string) ([]string, error)
+ MapSourcePathToLocal(sourcePath string) (string, error)
+ MapLocalPathToDest(localPath string) (string, error)
+}
+
+// NewTransferrer creates a new file transferrer based on the specified method
+func NewTransferrer(method TransferMethod, cfg *config.Config, log *logger.Logger) (FileTransferrer, error) {
+ switch method {
+ case TransferMethodSFTP:
+ return NewSCPTransfer(cfg, log)
+ case TransferMethodRsync:
+ return NewRsyncTransfer(cfg, log)
+ default:
+ return nil, fmt.Errorf("unsupported transfer method: %s", method)
+ }
+}
+
+// GetOptimalTransferMethod returns the recommended transfer method based on system capabilities
+func GetOptimalTransferMethod(log *logger.Logger) TransferMethod {
+ // Check if rsync is available
+ if IsRsyncAvailable(log) {
+ log.Info("rsync detected - using high-performance rsync transfers")
+ return TransferMethodRsync
+ }
+
+ log.Info("rsync not available - falling back to SFTP transfers")
+ return TransferMethodSFTP
+}
+
+// ForceTransferMethod forces a specific transfer method (useful for testing)
+func ForceTransferMethod(method TransferMethod, log *logger.Logger) TransferMethod {
+ log.WithField("forced_method", string(method)).Info("Using forced transfer method")
+ return method
+}
+
+// IsRsyncAvailable checks if rsync is installed and available locally
+func IsRsyncAvailable(log *logger.Logger) bool {
+ // Enhanced debugging for Windows rsync detection
+ log.Debug("Starting rsync availability check")
+
+ // Log PATH environment for debugging
+ pathEnv := os.Getenv("PATH")
+ pathDirs := strings.Split(pathEnv, string(os.PathListSeparator))
+ log.WithField("path_dir_count", len(pathDirs)).Debug("PATH environment variable loaded")
+
+ // Check for common rsync locations on Windows
+ rsyncDirs := []string{}
+ for _, dir := range pathDirs {
+ if strings.Contains(strings.ToLower(dir), "rsync") {
+ rsyncDirs = append(rsyncDirs, dir)
+ }
+ }
+ if len(rsyncDirs) > 0 {
+ log.WithField("rsync_dirs_in_path", rsyncDirs).Debug("Found rsync-related directories in PATH")
+ }
+
+ // Try different rsync executable names (Windows compatibility)
+ rsyncNames := []string{"rsync", "rsync.exe"}
+
+ for _, name := range rsyncNames {
+ log.WithField("checking_name", name).Debug("Checking for rsync executable")
+
+ rsyncPath, err := exec.LookPath(name)
+ if err != nil {
+ log.WithError(err).WithField("executable_name", name).Debug("LookPath failed for rsync name")
+ continue
+ }
+
+ log.WithField("rsync_path", rsyncPath).Info("rsync found locally via LookPath")
+
+ // Test if rsync actually runs (quick version check)
+ if testRsyncExecution(rsyncPath, log) {
+ return true
+ }
+ }
+
+ // Specifically check user's mentioned location: C:\rsyncd\bin
+ specificPaths := []string{
+ "C:\\rsyncd\\bin\\rsync.exe",
+ "C:\\rsyncd\\bin\\rsync",
+ "C:\\Program Files\\Git\\usr\\bin\\rsync.exe",
+ "C:\\msys64\\usr\\bin\\rsync.exe",
+ }
+
+ log.Debug("LookPath failed, checking specific common Windows rsync locations")
+ for _, specificPath := range specificPaths {
+ if _, err := os.Stat(specificPath); err == nil {
+ log.WithField("rsync_path", specificPath).Info("rsync found at specific Windows location")
+ if testRsyncExecution(specificPath, log) {
+ return true
+ }
+ } else {
+ log.WithField("path", specificPath).Debug("Specific rsync path does not exist")
+ }
+ }
+
+ log.Warn("rsync not found in PATH or common Windows locations")
+ log.WithField("search_names", rsyncNames).Debug("Searched for these rsync executable names")
+ log.WithField("specific_paths", specificPaths).Debug("Also checked these specific Windows paths")
+ log.Info("rsync requires installation on both local system and destination system")
+ log.Info("On Windows: ensure rsync is installed and available in PATH (current search: rsync, rsync.exe)")
+ return false
+}
+
+// testRsyncExecution tests if a found rsync executable actually works
+func testRsyncExecution(rsyncPath string, log *logger.Logger) bool {
+ log.WithField("rsync_path", rsyncPath).Debug("Testing rsync execution with --version")
+ cmd := exec.Command(rsyncPath, "--version")
+
+ output, err := cmd.Output()
+ if err != nil {
+ log.WithError(err).WithFields(map[string]interface{}{
+ "rsync_path": rsyncPath,
+ "command": rsyncPath + " --version",
+ }).Warn("rsync found but failed to execute --version command")
+ return false
+ }
+
+ versionText := string(output)
+ if len(versionText) > 100 {
+ versionText = versionText[:100] + "..."
+ }
+
+ log.WithFields(map[string]interface{}{
+ "rsync_path": rsyncPath,
+ "rsync_version": versionText,
+ }).Info("rsync version check successful - rsync is available")
+
+ return true
+}
diff --git a/pkg/types/types.go b/pkg/types/types.go
new file mode 100644
index 0000000..d66d1c4
--- /dev/null
+++ b/pkg/types/types.go
@@ -0,0 +1,114 @@
+package types
+
+import (
+ "time"
+)
+
+// SyncableItem represents an item that can be synchronized (kept for backward compatibility)
+type SyncableItem struct {
+ RatingKey string `json:"ratingKey"`
+ Title string `json:"title"`
+ LibraryID string `json:"libraryId"`
+ FilePaths []string `json:"filePaths"`
+ CustomFields map[string]interface{} `json:"customFields"`
+ Metadata interface{} `json:"metadata"` // Can hold Movie, TVShow, or Episode
+}
+
+// WatchedState represents the watch status of a media item
+type WatchedState struct {
+ Watched bool `json:"watched"`
+ ViewCount int `json:"viewCount"`
+ LastViewedAt time.Time `json:"lastViewedAt"`
+ ViewOffset int64 `json:"viewOffset"` // Resume position in milliseconds
+}
+
+// Library represents a Plex library (kept for backward compatibility)
+type Library struct {
+ ID string `json:"id"`
+ Title string `json:"title"`
+ Type string `json:"type"`
+ Path string `json:"path"`
+}
+
+// FileTransfer represents a file transfer operation
+type FileTransfer struct {
+ SourcePath string `json:"sourcePath"`
+ DestPath string `json:"destPath"`
+ Size int64 `json:"size"`
+}
+
+// SyncError represents a synchronization error
+type SyncError struct {
+ Type string `json:"type"`
+ Message string `json:"message"`
+ Item string `json:"item"`
+ LibraryID string `json:"libraryId"`
+ Timestamp time.Time `json:"timestamp"`
+ Details map[string]interface{} `json:"details"`
+ Recoverable bool `json:"recoverable"`
+}
+
+// SyncStats represents synchronization statistics
+type SyncStats struct {
+ StartTime time.Time `json:"startTime"`
+ EndTime time.Time `json:"endTime"`
+ Duration time.Duration
+ ItemsProcessed int `json:"itemsProcessed"`
+ ItemsSkipped int `json:"itemsSkipped"`
+ ItemsFailures int `json:"itemsFailures"`
+ Errors int `json:"errors"` // Alias for ItemsFailures for backward compatibility
+ FilesTransferred int `json:"filesTransferred"`
+ BytesTransferred int64 `json:"bytesTransferred"`
+ WatchedStatesSynced int `json:"watchedStatesSynced"`
+ MetadataFieldsSynced int `json:"metadataFieldsSynced"`
+}
+
+// FailedItem represents an item that failed processing
+type FailedItem struct {
+ ID string `json:"id"` // RatingKey for backward compatibility
+ Item SyncableItem `json:"item"`
+ Error string `json:"error"`
+ Timestamp time.Time `json:"timestamp"`
+ RetryCount int `json:"retryCount"`
+ MaxRetries int `json:"maxRetries"`
+ NextRetryTime time.Time `json:"nextRetryTime"`
+ Permanent bool `json:"permanent"`
+}
+
+// NewSyncableItem creates a SyncableItem from labelarr plex types (convenience function)
+func NewSyncableItem(ratingKey, title, libraryID string, filePaths []string) SyncableItem {
+ return SyncableItem{
+ RatingKey: ratingKey,
+ Title: title,
+ LibraryID: libraryID,
+ FilePaths: filePaths,
+ CustomFields: make(map[string]interface{}),
+ Metadata: nil,
+ }
+}
+
+// NewSyncableItemWithMetadata creates a SyncableItem with metadata from labelarr plex types
+func NewSyncableItemWithMetadata(ratingKey, title, libraryID string, filePaths []string, metadata interface{}) SyncableItem {
+ return SyncableItem{
+ RatingKey: ratingKey,
+ Title: title,
+ LibraryID: libraryID,
+ FilePaths: filePaths,
+ CustomFields: make(map[string]interface{}),
+ Metadata: metadata,
+ }
+}
+
+// NewFailedItem creates a FailedItem with proper ID field
+func NewFailedItem(item SyncableItem, error string) FailedItem {
+ return FailedItem{
+ ID: item.RatingKey, // Use RatingKey as ID
+ Item: item,
+ Error: error,
+ Timestamp: time.Now(),
+ RetryCount: 0,
+ MaxRetries: 3,
+ NextRetryTime: time.Now().Add(time.Minute * 5),
+ Permanent: false,
+ }
+}