diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 8e3a4a8..d2b5d24 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,12 +1,3 @@ -# These are supported funding model platforms - -github: [willfarrell]# Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] -patreon: # Replace with a single Patreon username -open_collective: # Replace with a single Open Collective username -ko_fi: # Replace with a single Ko-fi username -tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel -community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry -liberapay: # Replace with a single Liberapay username -issuehunt: # Replace with a single IssueHunt username -otechie: # Replace with a single Otechie username -custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] +github: [SimplicityGuy] +ko_fi: robertwlodarczyk +custom: [paypal.me/RWlodarczyk] diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..ad450a0 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,45 @@ +--- +version: 2 +updates: + # GitHub Actions dependencies + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + day: "monday" + time: "09:00" + timezone: "America/Los_Angeles" + commit-message: + prefix: "ci" + include: "scope" + labels: + - "dependencies" + - "ci" + assignees: + - "SimplicityGuy" + groups: + actions: + patterns: + - "*" + + # Docker base images + # Monitors Dockerfile for base image updates (docker:dind-alpine) + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + day: "monday" + time: "09:00" + timezone: "America/Los_Angeles" + commit-message: + prefix: "docker" + include: "scope" + labels: + - "dependencies" + - "docker" + assignees: + - "SimplicityGuy" + groups: + docker-images: + patterns: + - "*" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b80b36c..0696360 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,49 +1,78 @@ -name: build +--- +name: Build on: + workflow_dispatch: push: branches: - main - tags: - - '*' + pull_request: + branches: + - main schedule: - - cron: '0 0 * * *' + - cron: '0 1 * * 6' + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.actor }}/crontab jobs: - multi: + build-crontab: runs-on: ubuntu-latest + timeout-minutes: 90 + + permissions: + contents: read + packages: write + steps: - - - name: Checkout - uses: actions/checkout@v2 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 + - name: Checkout repository. + uses: actions/checkout@v6 + with: + submodules: true + + - name: Log in to the GitHub Container Registry. + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GHCR_TOKEN }} - - if: github.ref == 'refs/heads/main' - name: Conditional(Set tag as `latest`) - run: echo "tag=willfarrell/crontab:latest" >> $GITHUB_ENV + - name: Extract metadata (tags, labels) for Docker. + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=raw,value=latest,enable={{is_default_branch}} + type=ref,event=branch + type=ref,event=pr + type=schedule,pattern={{date 'YYYYMMDD'}} - - if: startsWith(github.ref, 'refs/tags/') - name: Conditional(Set tag as `{version}`) - run: echo "tag=willfarrell/crontab:${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up QEMU. + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx. + uses: docker/setup-buildx-action@v3 + with: + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 - - - name: Build and push - uses: docker/build-push-action@v2 + - name: Build and push Docker image to GitHub Container Registry. + uses: docker/build-push-action@v6 with: context: . - file: ./Dockerfile - push: true - tags: | - ${{ env.tag }} + platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + provenance: true + sbom: true + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Send notification to Discord. + uses: sarisia/actions-status-discord@b8381b25576cb341b2af39926ab42c5056cc44ed # v1.15.5 + if: always() + with: + webhook: ${{ secrets.DISCORD_WEBHOOK }} diff --git a/.github/workflows/cleanup-cache.yaml b/.github/workflows/cleanup-cache.yaml new file mode 100644 index 0000000..b16b706 --- /dev/null +++ b/.github/workflows/cleanup-cache.yaml @@ -0,0 +1,36 @@ +--- +name: Cleanup Cache + +on: + pull_request: + types: + - closed + +concurrency: + group: cleanup-cache-${{ github.event.pull_request.number }} + cancel-in-progress: true + +jobs: + cleanup: + runs-on: ubuntu-latest + timeout-minutes: 10 + permissions: + actions: write + steps: + - name: ๐Ÿงน Cleanup Cache + run: | + echo "Fetching list of cache keys" + cacheKeysForPR=$(gh cache list --ref "$BRANCH" --limit 100 --json id --jq ".[].id") + + ## Setting this to not fail the workflow while deleting cache keys. + set +e + echo "Deleting caches..." + for cacheKey in $cacheKeysForPR + do + gh cache delete "$cacheKey" + done + echo "Done" + env: + GH_TOKEN: ${{ github.token }} + GH_REPO: ${{ github.repository }} + BRANCH: refs/pull/${{ github.event.pull_request.number }}/merge diff --git a/.github/workflows/cleanup-images.yml b/.github/workflows/cleanup-images.yml new file mode 100644 index 0000000..3371016 --- /dev/null +++ b/.github/workflows/cleanup-images.yml @@ -0,0 +1,29 @@ +--- +name: Cleanup Docker Images + +on: + workflow_dispatch: + schedule: + - cron: "0 0 15 * *" # Monthly on the 15th at midnight UTC + +concurrency: + group: cleanup-images + cancel-in-progress: false + +jobs: + cleanup: + runs-on: ubuntu-latest + timeout-minutes: 30 + permissions: + packages: write # Required to delete packages + contents: read + steps: + - name: ๐Ÿงน Cleanup Docker Images + uses: dataaxiom/ghcr-cleanup-action@cd0cdb900b5dbf3a6f2cc869f0dbb0b8211f50c4 # v1.0.16 + with: + package: crontab + delete-partial-images: true + delete-untagged: true + keep-n-tagged: 2 + older-than: 30 days + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/update-dependencies.yml b/.github/workflows/update-dependencies.yml new file mode 100644 index 0000000..8e57f3c --- /dev/null +++ b/.github/workflows/update-dependencies.yml @@ -0,0 +1,93 @@ +--- +name: Update Dependencies + +on: + schedule: + - cron: '0 9 * * 1' # Weekly on Mondays at 9am UTC + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +jobs: + update-base-image: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository. + uses: actions/checkout@v4 + + - name: Set up Docker Buildx. + uses: docker/setup-buildx-action@v3 + + - name: Check for base image updates. + id: check-updates + run: | + # Get current base images from Dockerfile + BUILDER_IMAGE=$(grep "^FROM alpine:latest AS builder" Dockerfile | awk '{print $2}') + RELEASE_IMAGE=$(grep "^FROM docker:.* AS release" Dockerfile | awk '{print $2}') + + echo "Current builder image: $BUILDER_IMAGE" + echo "Current release image: $RELEASE_IMAGE" + + # Pull latest versions + docker pull "$BUILDER_IMAGE" --quiet + docker pull "$RELEASE_IMAGE" --quiet + + # Get digests + BUILDER_DIGEST=$(docker inspect --format='{{.RepoDigests}}' "$BUILDER_IMAGE" | grep -oP 'sha256:[a-f0-9]{64}' | head -1) + RELEASE_DIGEST=$(docker inspect --format='{{.RepoDigests}}' "$RELEASE_IMAGE" | grep -oP 'sha256:[a-f0-9]{64}' | head -1) + + echo "builder_digest=$BUILDER_DIGEST" >> $GITHUB_OUTPUT + echo "release_digest=$RELEASE_DIGEST" >> $GITHUB_OUTPUT + + # Check if we need to rebuild + if [ -z "$BUILDER_DIGEST" ] || [ -z "$RELEASE_DIGEST" ]; then + echo "updates_available=true" >> $GITHUB_OUTPUT + else + echo "updates_available=false" >> $GITHUB_OUTPUT + fi + + - name: Build test image. + if: steps.check-updates.outputs.updates_available == 'true' + uses: docker/build-push-action@v6 + with: + context: . + push: false + tags: test-image:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Create Pull Request. + if: steps.check-updates.outputs.updates_available == 'true' + uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5 + with: + token: ${{ secrets.GITHUB_TOKEN }} + commit-message: 'chore(deps): update base image dependencies' + title: 'chore(deps): update base image dependencies' + body: | + ## Automated Base Image Update + + This PR updates the base Docker images to their latest versions. + + ### Changes + - Builder image digest: `${{ steps.check-updates.outputs.builder_digest }}` + - Release image digest: `${{ steps.check-updates.outputs.release_digest }}` + + ### Testing + - Build test completed successfully + - Please review and test the updated images before merging + + --- + *This PR was automatically generated by the update-dependencies workflow.* + branch: chore/update-base-images + delete-branch: true + assignees: SimplicityGuy + labels: dependencies,automated,docker + + - name: Send notification to Discord. + uses: sarisia/actions-status-discord@b8381b25576cb341b2af39926ab42c5056cc44ed # v1.15.5 + if: always() + with: + webhook: ${{ secrets.DISCORD_WEBHOOK }} diff --git a/.gitignore b/.gitignore index f8a3aa0..d3ea90f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,13 @@ .idea *.iml - -config.json .vscode .DS_Store + +config.json +config.working.json + +jobs/ +projects/ + +# Backup files created by update script +*.backup-* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..6c8c303 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,32 @@ +--- +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0 + hooks: + - id: check-added-large-files + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: check-yaml + - id: detect-aws-credentials + - id: detect-private-key + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: b035497fb64e3f9faa91e833331688cc185891e6 # frozen: 0.36.0 + hooks: + - id: check-github-workflows + + - repo: https://github.com/executablebooks/mdformat + rev: 2d496dbc18e31b83a1596685347ffe0b6041daf0 # frozen: 1.0.0 + hooks: + - id: mdformat + additional_dependencies: + - mdformat-gfm + + - repo: https://github.com/hadolint/hadolint + rev: 4e697ba704fd23b2409b947a319c19c3ee54d24f # frozen: v2.14.0 + hooks: + - id: hadolint diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..ab6e48d --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,156 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +This is `docker-crontab`, a Docker-based cron job scheduler that allows running complex cron jobs in other containers. It's a lightweight alternative to mcuadros/ofelia with enterprise features. + +## Key Architecture + +### Core Components + +- **Dockerfile**: Multi-stage build using Alpine Linux base with Docker-in-Docker capability + + - Builder stage: Downloads and compresses `rq` tool for config parsing + - Release stage: Based on `docker:dind-alpine` with cron and Docker client + - Uses `su-exec` for proper user privilege handling + - Configurable Docker group ID via `DOCKER_GID` build arg (default: 999) + +- **entrypoint.sh**: Main orchestration script that: + + - Normalizes config files (JSON/YAML/TOML) using `rq` and `jq` + - Processes shared settings via `~~shared-settings` key + - Generates crontab entries and executable scripts + - Installs crontab files in user-writable directory (`/opt/crontab/crontabs`) + - Supports both `image` (docker run) and `container` (docker exec) execution modes + - Handles trigger chains and onstart commands + - Drops privileges to `docker` user for security + +### Configuration System + +- Supports JSON, YAML, and TOML config formats +- Config can be array or mapping (top-level keys ignored for organization) +- Special `~~shared-settings` key for shared configuration +- Key fields: `schedule`, `command`, `image`/`container`, `dockerargs`, `trigger`, `onstart` +- Schedule supports standard crontab syntax plus shortcuts (@hourly, @daily, @every 2m, etc.) +- Additional fields: `comment`, `name`, `environment`, `expose`, `networks`, `ports`, `volumes` + +### Job Execution Flow + +1. Config normalization: All formats converted to working JSON +1. Script generation: Each job becomes executable shell script in `/opt/crontab/jobs/` +1. Crontab creation: Standard crontab file generated with proper scheduling +1. Trigger processing: Post-job triggers executed in sequence +1. Onstart handling: Jobs marked with `onstart: true` run immediately + +## Development Commands + +### Building + +```bash +# Basic build +docker build -t crontab . + +# Build with custom Docker group ID +docker build --build-arg DOCKER_GID=$(stat -c '%g' /var/run/docker.sock) -t crontab . +``` + +### Running + +```bash +# Command line execution +docker run -d \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + -v ./config-samples/config.sample.json:/opt/crontab/config.json:ro \ + -v ./logs:/var/log/crontab:rw \ + crontab + +# With host directory for persistent config/logs +# Container will create directories with proper permissions +docker run -d \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + -v $PWD/crontab-config:/opt/crontab:rw \ + -v $PWD/crontab-logs:/var/log/crontab:rw \ + crontab + +# Docker Compose +docker-compose up +``` + +### Testing + +```bash +# Test with sample configuration +docker run -d \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + -v ./config-samples/config.sample.json:/opt/crontab/config.json:ro \ + crontab + +# Debug mode - view generated crontab and scripts +docker run --rm \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + -v ./config-samples/config.sample.json:/opt/crontab/config.json:ro \ + -e TEST_MODE=1 \ + crontab bash -c "cat /tmp/crontab-docker-testing/test && ls -la /tmp/crontab-docker-testing/jobs/" +``` + +The repository includes sample configurations in `config-samples/` for testing different scenarios. + +## Important Configuration Notes + +- **Docker Socket Access**: Container requires read-only access to `/var/run/docker.sock` +- **User Permissions**: Uses `docker` user with configurable GID to match host Docker group +- **Volume Mounts**: Config and log directories should be mounted as volumes +- **Network Access**: For docker-compose usage, containers need network connectivity via `--network` in `dockerargs` + +## Troubleshooting + +### Common Issues + +- **"failed switching to 'docker': operation not permitted"**: Docker group GID mismatch + + - Solution: Rebuild with correct GID using `--build-arg DOCKER_GID=$(stat -c '%g' /var/run/docker.sock)` + +- **"Permission denied" creating directories**: Volume mount permissions issue + + - Solution: Ensure host directories have correct ownership before mounting + - Quick fix: `sudo chown -R $(id -u):$(id -g) /path/to/host/directory` + - Or let container create directories (it runs as root initially, then drops privileges to `docker` user) + +- **Jobs not executing**: Check crontab generation and script permissions + + - Debug: Use `TEST_MODE=1` environment variable to inspect generated files + - Verify crontab file exists: Check `/opt/crontab/crontabs/docker` inside container + - Check logs: Container outputs cron job execution to stdout/stderr + +- **Container networking issues**: Ensure proper network configuration in `dockerargs` + + - For docker-compose: Add `--network ` to dockerargs + +### File Locations + +- Generated scripts: `/opt/crontab/jobs/` +- Working config: `/opt/crontab/config.working.json` +- Crontab directory: `/opt/crontab/crontabs/` +- Crontab file: `/opt/crontab/crontabs/docker` +- Logs: Container stdout/stderr (configure external logging as needed) + +## Security Considerations + +- Container runs as non-root `docker` user for security +- Docker socket access is read-only to prevent container escape +- Uses `su-exec` for privilege dropping instead of `sudo` +- Multi-stage build minimizes attack surface +- SBOM and provenance generation enabled in CI/CD + +## CI/CD + +GitHub Actions workflow (`.github/workflows/build.yml`): + +- Builds on push to main and PRs +- Multi-platform support (linux/amd64) +- Publishes to GitHub Container Registry (`ghcr.io`) +- Includes security scanning with SBOM and provenance +- Discord notifications for build status +- Weekly scheduled builds for base image security updates diff --git a/Dockerfile b/Dockerfile index 0685c21..fa11dcb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,26 +1,115 @@ -FROM alpine:3.12 as rq-build +#hadolint ignore=DL3007 +FROM alpine:3.23 AS builder + +LABEL org.opencontainers.image.title="crontab builder" \ + org.opencontainers.image.description="crontab builder" \ + org.opencontainers.image.authors="robert@simplicityguy.com" \ + org.opencontainers.image.source="https://github.com/SimplicityGuy/docker-crontab/blob/main/Dockerfile" \ + org.opencontainers.image.licenses="MIT" \ + org.opencontainers.image.created="$(date +'%Y-%m-%d')" \ + org.opencontainers.image.base.name="docker.io/library/alpine" + +# Platform arguments provided by Docker Buildx +ARG TARGETPLATFORM +ARG TARGETOS +ARG TARGETARCH +ARG TARGETVARIANT ENV RQ_VERSION=1.0.2 -WORKDIR /root/ +WORKDIR /usr/bin/rq/ -RUN apk --update add upx \ - && wget https://github.com/dflemstr/rq/releases/download/v${RQ_VERSION}/rq-v${RQ_VERSION}-x86_64-unknown-linux-musl.tar.gz \ - && tar -xvf rq-v1.0.2-x86_64-unknown-linux-musl.tar.gz \ - && upx --brute rq +#hadolint ignore=DL3018,SC2086 +RUN apk update --quiet && \ + apk upgrade --quiet && \ + apk add --quiet --no-cache \ + upx && \ + rm /var/cache/apk/* && \ + # Map Docker platform to rq release platform + case "${TARGETPLATFORM}" in \ + "linux/amd64") \ + RQ_PLATFORM="x86_64-unknown-linux-musl" \ + ;; \ + "linux/arm64") \ + RQ_PLATFORM="aarch64-unknown-linux-gnu" \ + ;; \ + "linux/arm/v7") \ + RQ_PLATFORM="armv7-unknown-linux-gnueabihf" \ + ;; \ + "linux/arm/v6") \ + RQ_PLATFORM="arm-unknown-linux-gnueabi" \ + ;; \ + *) \ + echo "Warning: Unknown platform ${TARGETPLATFORM}, defaulting to x86_64-unknown-linux-musl" && \ + RQ_PLATFORM="x86_64-unknown-linux-musl" \ + ;; \ + esac && \ + wget --quiet https://github.com/dflemstr/rq/releases/download/v${RQ_VERSION}/rq-v${RQ_VERSION}-${RQ_PLATFORM}.tar.gz && \ + tar -xvf rq-v${RQ_VERSION}-${RQ_PLATFORM}.tar.gz && \ + upx --brute rq -FROM library/docker:stable +#hadolint ignore=DL3007 +FROM docker:29.1.3-dind-alpine3.23 AS release -COPY --from=rq-build /root/rq /usr/local/bin +LABEL org.opencontainers.image.title="crontab" \ + org.opencontainers.image.description="A docker job scheduler (aka crontab for docker)." \ + org.opencontainers.image.authors="robert@simplicityguy.com" \ + org.opencontainers.image.source="https://github.com/SimplicityGuy/docker-crontab/blob/main/Dockerfile" \ + org.opencontainers.image.licenses="MIT" \ + org.opencontainers.image.created="$(date +'%Y-%m-%d')" \ + org.opencontainers.image.base.name="docker.io/library/docker" + +# Build argument for docker group ID, default to 999 which is common +ARG DOCKER_GID=999 ENV HOME_DIR=/opt/crontab -RUN apk add --no-cache --virtual .run-deps gettext jq bash tini \ - && mkdir -p ${HOME_DIR}/jobs ${HOME_DIR}/projects \ - && adduser -S docker -D -COPY docker-entrypoint / -ENTRYPOINT ["/sbin/tini", "--", "/docker-entrypoint"] +# Set shell with pipefail option to ensure pipe failures are caught +SHELL ["/bin/ash", "-o", "pipefail", "-c"] + +#hadolint ignore=DL3018 +RUN apk update --quiet && \ + apk upgrade --quiet && \ + apk add --quiet --no-cache \ + bash \ + coreutils \ + curl \ + gettext \ + jq \ + su-exec \ + tini \ + wget \ + shadow \ + python3 \ + py3-flask \ + py3-supervisor && \ + rm /var/cache/apk/* && \ + rm -rf /etc/periodic /etc/crontabs/root && \ + # Set SUID on crontab command so it can modify crontab files + chmod u+s /usr/bin/crontab && \ + # Remove docker group if it exists + getent group docker > /dev/null && delgroup docker || true && \ + # Check if GID is in use, if so use a different one + (getent group | grep -q ":${DOCKER_GID}:" && addgroup docker || addgroup -g ${DOCKER_GID} docker) && \ + # Create docker user and add to docker group + adduser -S docker -D -G docker && \ + mkdir -p ${HOME_DIR}/jobs ${HOME_DIR}/crontabs ${HOME_DIR}/data && \ + chown -R docker:docker ${HOME_DIR} + +COPY --from=builder /usr/bin/rq/rq /usr/local/bin +COPY entrypoint.sh /opt +COPY supervisord.conf /opt/crontab/ +COPY webapp/ /opt/crontab/webapp/ + +# Expose web UI port +EXPOSE 8080 + +ENTRYPOINT ["/sbin/tini", "--", "/opt/entrypoint.sh"] HEALTHCHECK --interval=5s --timeout=3s \ CMD ps aux | grep '[c]rond' || exit 1 -CMD ["crond", "-f", "-d", "6", "-c", "/etc/crontabs"] +# Run crond with custom crontabs directory owned by docker user +# -f: foreground mode +# -d 7: debug level 7 (highest) +# -c: crontabs directory +CMD ["crond", "-f", "-d", "7", "-c", "/opt/crontab/crontabs"] diff --git a/README.md b/README.md index 8fc98e0..17eda33 100644 --- a/README.md +++ b/README.md @@ -1,65 +1,220 @@ -# docker-crontab +# crontab -A simple wrapper over `docker` to all complex cron job to be run in other containers. - -## Supported tags and Dockerfile links - -- [`latest` (*Dockerfile*)](https://github.com/willfarrell/docker-crontab/blob/master/Dockerfile) -- [`1.0.0` (*Dockerfile*)](https://github.com/willfarrell/docker-crontab/blob/1.0.0/Dockerfile) -- [`0.6.0` (*Dockerfile*)](https://github.com/willfarrell/docker-crontab/blob/0.6.0/Dockerfile) +![crontab](https://github.com/SimplicityGuy/docker-crontab/actions/workflows/build.yml/badge.svg) ![License: MIT](https://img.shields.io/github/license/SimplicityGuy/docker-crontab) [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit)](https://github.com/pre-commit/pre-commit) -![](https://img.shields.io/docker/pulls/willfarrell/crontab "Total docker pulls") [![](https://images.microbadger.com/badges/image/willfarrell/crontab.svg)](http://microbadger.com/images/willfarrell/crontab "Get your own image badge on microbadger.com") +A simple wrapper over `docker` to all complex cron job to be run in other containers. ## Why? -Yes, I'm aware of [mcuadros/ofelia](https://github.com/mcuadros/ofelia) (>250MB when this was created), it was the main inspiration for this project. + +Yes, I'm aware of [mcuadros/ofelia](https://github.com/mcuadros/ofelia) (>250MB when this was created), it was the main inspiration for this project. A great project, don't get me wrong. It was just missing certain key enterprise features I felt were required to support where docker is heading. ## Features + - Easy to read schedule syntax allowed. - Allows for comments, cause we all need friendly reminders of what `update_script.sh` actually does. - Start an image using `image`. - Run command in a container using `container`. -- Run command on a instances of a scaled container using `project`. - Ability to trigger scripts in other containers on completion cron job using `trigger`. +- Ability to share settings between cron jobs using `~~shared-settings` as a key. +- **Web Dashboard UI** for monitoring and controlling cron jobs. + +## Web Dashboard + +The crontab container includes a built-in web dashboard for monitoring and managing your cron jobs. + +### Features + +- ๐Ÿ“Š **Job Monitoring**: View all scheduled jobs with their current status +- ๐Ÿ“… **Schedule Information**: See when jobs last ran and when they'll run next +- ๐Ÿ“ **Execution History**: Browse past executions with timestamps and exit codes +- ๐Ÿ” **Log Viewer**: View stdout and stderr output from job executions +- โ–ถ๏ธ **Manual Triggering**: Run jobs on-demand with a single click +- ๐Ÿ“ˆ **Dashboard Stats**: Overview of total jobs, failures, and recent activity +- ๐Ÿ”„ **Auto-Refresh**: Dashboard automatically updates every 30 seconds + +### Accessing the Web UI + +The web dashboard is available on port **8080** by default. + +**Docker Run:** + +```bash +docker run -d \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + -v ./config.json:/opt/crontab/config.json:ro \ + -v crontab-data:/opt/crontab/data \ + -p 8080:8080 \ + crontab +``` + +Then open http://localhost:8080 in your browser. + +**Docker Compose:** + +```yaml +services: + crontab: + build: . + ports: + - "8080:8080" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock:ro" + - "./config.json:/opt/crontab/config.json:ro" + - "crontab-data:/opt/crontab/data" # Persistent database + environment: + - WEB_UI_PORT=8080 + - JOB_HISTORY_RETENTION_DAYS=30 + - JOB_HISTORY_RETENTION_COUNT=1000 + +volumes: + crontab-data: +``` + +### Configuration + +Configure the web UI using environment variables: + +| Variable | Default | Description | +| ----------------------------- | ------- | ----------------------------------------- | +| `WEB_UI_PORT` | `8080` | Port for the web dashboard | +| `JOB_HISTORY_RETENTION_DAYS` | `30` | Keep execution history for this many days | +| `JOB_HISTORY_RETENTION_COUNT` | `1000` | Keep at least this many recent executions | + +### Data Persistence + +Job execution history is stored in a SQLite database at `/opt/crontab/data/crontab.db`. To persist this data across container restarts, mount a volume: + +```bash +-v crontab-data:/opt/crontab/data +``` + +### Health Check + +The web UI includes a health check endpoint at `/api/health`: + +```bash +curl http://localhost:8080/api/health +``` + +Response: + +```json +{ + "status": "healthy", + "crond_running": true, + "database_accessible": true, + "uptime_seconds": 86400 +} +``` + +### API Endpoints + +The dashboard exposes a REST API for programmatic access: + +- `GET /api/jobs` - List all jobs +- `GET /api/executions/` - Get execution history for a job +- `POST /api/trigger/` - Manually trigger a job +- `GET /api/stats` - Get dashboard statistics +- `GET /api/health` - Health check + +### Security Considerations + +The web UI does **not** include authentication by default. For production deployments: + +1. **Reverse Proxy**: Use a reverse proxy (nginx, Traefik) with authentication +1. **Network Isolation**: Run on a private network, not exposed to the internet +1. **Firewall Rules**: Restrict access to trusted IP addresses + +Example nginx reverse proxy with basic auth: + +```nginx +location /crontab/ { + auth_basic "Crontab Dashboard"; + auth_basic_user_file /etc/nginx/.htpasswd; + proxy_pass http://crontab:8080/; +} +``` ## Config file -The config file can be specifed in any of `json`, `toml`, or `yaml`, and can be defined as either an array or mapping (top-level keys will be ignored; can be useful for organizing commands) +The config file can be specified in any of `json`, `toml`, or `yaml`, and can be defined as either an array or mapping (top-level keys will be ignored; can be useful for organizing commands) - `name`: Human readable name that will be used as the job filename. Will be converted into a slug. Optional. - `comment`: Comments to be included with crontab entry. Optional. -- `schedule`: Crontab schedule syntax as described in https://en.wikipedia.org/wiki/Cron. Ex `@hourly`, `@every 1h30m`, `* * * * *`. Required. +- `schedule`: Crontab schedule syntax as described in https://en.wikipedia.org/wiki/Cron. Examples: `@hourly`, `@every 1h30m`, `* * * * *`. Required. - `command`: Command to be run on in crontab container or docker container/image. Required. - `image`: Docker images name (ex `library/alpine:3.5`). Optional. -- `project`: Docker Compose/Swarm project name. Optional, only applies when `contain` is included. -- `container`: Full container name or container alias if `project` is set. Ignored if `image` is included. Optional. +- `container`: Full container name. Ignored if `image` is included. Optional. - `dockerargs`: Command line docker `run`/`exec` arguments for full control. Defaults to ` `. -- `trigger`: Array of docker-crontab subset objects. Subset includes: `image`,`project`,`container`,`command`,`dockerargs` -- `onstart`: Run the command on `crontab` container start, set to `true`. Optional, defaults to falsey. +- `trigger`: Array of docker-crontab subset objects. Sub-set includes: `image`, `container`, `command`, `dockerargs` +- `onstart`: Run the command on `crontab` container start, set to `true`. Optional, defaults to false. See [`config-samples`](config-samples) for examples. ```json -[{ - "schedule":"@every 5m", - "command":"/usr/sbin/logrotate /etc/logrotate.conf" - },{ - "comment":"Regenerate Certificate then reload nginx", - "schedule":"43 6,18 * * *", - "command":"sh -c 'dehydrated --cron --out /etc/ssl --domain ${LE_DOMAIN} --challenge dns-01 --hook dehydrated-dns'", - "dockerargs":"--env-file /opt/crontab/env/letsencrypt.env -v webapp_nginx_tls_cert:/etc/ssl -v webapp_nginx_acme_challenge:/var/www/.well-known/acme-challenge", - "image":"willfarrell/letsencrypt", - "trigger":[{ - "command":"sh -c '/etc/scripts/make_hpkp ${NGINX_DOMAIN} && /usr/sbin/nginx -t && /usr/sbin/nginx -s reload'", - "project":"conduit", - "container":"nginx" - }], - "onstart":true - }] +{ + "logrotate": { + "schedule":"@every 5m", + "command":"/usr/sbin/logrotate /etc/logrotate.conf" + }, + "cert-regen": { + "comment":"Regenerate Certificate then reload nginx", + "schedule":"43 6,18 * * *", + "command":"sh -c 'dehydrated --cron --out /etc/ssl --domain ${LE_DOMAIN} --challenge dns-01 --hook dehydrated-dns'", + "dockerargs":"--it --env-file /opt/crontab/env/letsencrypt.env", + "volumes":["webapp_nginx_tls_cert:/etc/ssl", "webapp_nginx_acme_challenge:/var/www/.well-known/acme-challenge"], + "image":"willfarrell/letsencrypt", + "trigger":[{ + "command":"sh -c '/etc/scripts/make_hpkp ${NGINX_DOMAIN} && /usr/sbin/nginx -t && /usr/sbin/nginx -s reload'", + "container":"nginx" + }], + "onstart":true + } +} ``` +## Architecture & Security + +### Security Model + +The container is designed with security best practices: + +- **Non-root execution**: Container runs as the `docker` user (not root) for security +- **Privilege separation**: Starts as root to set up directories, then drops to `docker` user via `su-exec` +- **Read-only Docker socket**: Docker socket is mounted read-only to prevent container escape +- **User-writable directories**: Crontab and job files stored in `/opt/crontab/` owned by `docker` user +- **SUID crontab**: The `crontab` command has SUID bit set for proper crontab file management + +### Directory Structure + +- `/opt/crontab/` - Main working directory (can be volume mounted) + - `config.json` (or `.yaml`, `.toml`) - Your configuration file + - `config.working.json` - Normalized configuration (auto-generated) + - `jobs/` - Generated shell scripts for each cron job + - `crontabs/` - Crontab files for BusyBox crond + - `docker` - Crontab file for the `docker` user + ## How to use +### Docker Group ID Configuration + +This container needs to access the Docker socket to manage other containers. To do this, the `docker` user inside the container must have the same group ID (GID) as the `docker` group on the host system. + +By default, the Dockerfile uses GID 999, which is common for the `docker` group on many systems. If your host system uses a different GID, you need to specify it during the build: + +```bash +# Find your host's docker group ID +getent group docker | cut -d: -f3 +# Or alternatively +stat -c '%g' /var/run/docker.sock + +# Then build with the correct GID +docker build --build-arg DOCKER_GID= -t crontab . +``` + +If you encounter the error `failed switching to "docker": operation not permitted`, it means the GIDs don't match. Rebuild the image with the correct GID. + ### Command Line ```bash @@ -75,45 +230,96 @@ docker run -d \ ### Use with docker-compose 1. Figure out which network name used for your docker-compose containers - * use `docker network ls` to see existing networks - * if your `docker-compose.yml` is in `my_dir` directory, you probably has network `my_dir_default` - * otherwise [read the docker-compose docs](https://docs.docker.com/compose/networking/) -2. Add `dockerargs` to your docker-crontab `config.json` - * use `--network NETWORK_NAME` to connect new container into docker-compose network - * use `--rm --name NAME` to use named container - * e.g. `"dockerargs": "--network my_dir_default --rm --name my-best-cron-job"` + - use `docker network ls` to see existing networks + - if your `docker-compose.yml` is in `my_dir` directory, you probably has network `my_dir_default` + - otherwise [read the docker-compose docs](https://docs.docker.com/compose/networking/) +1. Add `dockerargs` to your docker-crontab `config.json` + - use `--network NETWORK_NAME` to connect new container into docker-compose network + - use `--name NAME` to use named container + - e.g. `"dockerargs": "--it"` ### Dockerfile ```Dockerfile -FROM willfarrell/crontab +FROM registry.gitlab.com/simplicityguy/docker/crontab COPY config.json ${HOME_DIR}/ - ``` ### Logrotate Dockerfile +This example shows how to extend the crontab image for custom use cases: + ```Dockerfile -FROM willfarrell/crontab +FROM ghcr.io/simplicityguy/crontab RUN apk add --no-cache logrotate -RUN echo "*/5 * * * * /usr/sbin/logrotate /etc/logrotate.conf" >> /etc/crontabs/logrotate COPY logrotate.conf /etc/logrotate.conf +# Use the config.json approach instead of manually editing crontab files +COPY config.json ${HOME_DIR}/ +``` + +## Troubleshooting + +### Permission Errors + +**Issue**: `failed switching to 'docker': operation not permitted` + +**Cause**: Docker group GID mismatch between host and container. + +**Solution**: Rebuild the image with the correct Docker group ID: -CMD ["crond", "-f"] +```bash +# Find your host's docker group ID +stat -c '%g' /var/run/docker.sock + +# Rebuild with the correct GID +docker build --build-arg DOCKER_GID=$(stat -c '%g' /var/run/docker.sock) -t crontab . ``` -### Logging - In Dev +### Jobs Not Executing + +**Issue**: Cron jobs defined in config but not running. + +**Troubleshooting steps**: + +1. Check if crontab file was generated: -All `stdout` is captured, formatted, and saved to `/var/log/crontab/jobs.log`. Set `LOG_FILE` to `/dev/null` to disable logging. + ```bash + docker exec cat /opt/crontab/crontabs/docker + ``` -example: `e6ced859-1563-493b-b1b1-5a190b29e938 2017-06-18T01:27:10+0000 [info] Start Cronjob **map-a-vol** map a volume` +1. Verify job scripts exist: -grok: `CRONTABLOG %{DATA:request_id} %{TIMESTAMP_ISO8601:timestamp} \[%{LOGLEVEL:severity}\] %{GREEDYDATA:message}` + ```bash + docker exec ls -la /opt/crontab/jobs/ + ``` + +1. Check crond is running: + + ```bash + docker exec ps aux | grep crond + ``` + +1. View container logs for cron execution output: + + ```bash + docker logs + ``` + +### Directory Permission Issues + +**Issue**: Container can't create directories when using volume mounts. + +**Solution**: Ensure the host directory has correct permissions before mounting: + +```bash +# Create directory and set ownership +mkdir -p /path/to/crontab +chown -R $(id -u):$(id -g) /path/to/crontab + +# Then run container with volume mount +docker run -v /path/to/crontab:/opt/crontab:rw ... +``` -## TODO -- [ ] Have ability to auto regenerate crontab on file change (signal HUP?) -- [ ] Run commands on host machine (w/ --privileged?) -- [ ] Write tests -- [ ] Setup TravisCI +Alternatively, let the container create the directories on first run (it starts as root, creates directories, then drops to `docker` user). diff --git a/config-samples/config.sample.toml b/config-samples/config.sample.toml index 5320c3f..b185048 100644 --- a/config-samples/config.sample.toml +++ b/config-samples/config.sample.toml @@ -47,4 +47,3 @@ onstart = true command = "sh -c '/etc/scripts/make_hpkp ${NGINX_DOMAIN} && /usr/sbin/nginx -t && /usr/sbin/nginx -s reload'" project = "conduit" container = "nginx" - diff --git a/docker-compose.yml b/docker-compose.yml index a7cf566..e8aee80 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: "2.1" +version: "3.8" services: myapp: @@ -7,8 +7,24 @@ services: command: "sh -c 'while :; do sleep 1; done'" crontab: - build: . + build: + context: . + args: + # Set this to match your host's docker group ID + # You can find it with: getent group docker | cut -d: -f3 + # Or alternatively: stat -c '%g' /var/run/docker.sock + DOCKER_GID: 999 restart: always + ports: + - "8080:8080" volumes: - "/var/run/docker.sock:/var/run/docker.sock:ro" - - "${PWD}/config-samples/config.sample.mapping.json:/opt/crontab/config.json:rw" + - "${PWD}/config-samples/config.sample.mapping.json:/opt/crontab/config.json:ro" + - "crontab-data:/opt/crontab/data" + environment: + - WEB_UI_PORT=8080 + - JOB_HISTORY_RETENTION_DAYS=30 + - JOB_HISTORY_RETENTION_COUNT=1000 + +volumes: + crontab-data: diff --git a/docker-entrypoint b/docker-entrypoint deleted file mode 100755 index 2696ee5..0000000 --- a/docker-entrypoint +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env bash -set -e - -if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then - export DOCKER_HOST='tcp://docker:2375' -fi - -# for local testing only -#HOME_DIR=. - -if [ "${LOG_FILE}" == "" ]; then - LOG_DIR=/var/log/crontab - LOG_FILE=${LOG_DIR}/jobs.log - mkdir -p ${LOG_DIR} - touch ${LOG_FILE} -fi - -get_config() { - if [ -f "${HOME_DIR}/config.json" ]; then - jq 'map(.)' ${HOME_DIR}/config.json > ${HOME_DIR}/config.working.json - elif [ -f "${HOME_DIR}/config.toml" ]; then - rq -t <<< $(cat ${HOME_DIR}/config.toml) | jq 'map(.)' > ${HOME_DIR}/config.json - elif [ -f "${HOME_DIR}/config.yml" ]; then - rq -y <<< $(cat ${HOME_DIR}/config.yml) | jq 'map(.)' > ${HOME_DIR}/config.json - elif [ -f "${HOME_DIR}/config.yaml" ]; then - rq -y <<< $(cat ${HOME_DIR}/config.yaml) | jq 'map(.)' > ${HOME_DIR}/config.json - fi -} - -DOCKER_SOCK=/var/run/docker.sock -CRONTAB_FILE=/etc/crontabs/docker - -# Ensure dir exist - in case of volume mapping -mkdir -p ${HOME_DIR}/jobs ${HOME_DIR}/projects - -ensure_docker_socket_accessible() { - if ! grep -q "^docker:" /etc/group; then - # Ensure 'docker' user has permissions for docker socket (without changing permissions) - DOCKER_GID=$(stat -c '%g' ${DOCKER_SOCK}) - if [ "${DOCKER_GID}" != "0" ]; then - if ! grep -qE "^[^:]+:[^:]+:${DOCKER_GID}:" /etc/group; then - # No group with such gid exists - create group docker - addgroup -g ${DOCKER_GID} docker - adduser docker docker - else - # Group with such gid exists - add user "docker" to this group - DOCKER_GROUP_NAME=`getent group "${DOCKER_GID}" | awk -F':' '{{ print $1 }}'` - adduser docker $DOCKER_GROUP_NAME - fi - else - # Docker socket belongs to "root" group - add user "docker" to this group - adduser docker root - fi - fi -} - -slugify() { - echo "$@" | iconv -t ascii | sed -r s/[~\^]+//g | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr A-Z a-z -} - -make_image_cmd() { - DOCKERARGS=$(echo ${1} | jq -r .dockerargs) - VOLUMES=$(echo ${1} | jq -r '.volumes | map(" -v " + .) | join("")') - PORTS=$(echo ${1} | jq -r '.ports | map(" -p " + .) | join("")') - EXPOSE=$(echo ${1} | jq -r '.expose | map(" --expose " + .) | join("")') - # We'll add name in, if it exists - NAME=$(echo ${1} | jq -r 'select(.name != null) | .name') - NETWORK=$(echo ${1} | jq -r 'select(.network != null) | .network') - ENVIRONMENT=$(echo ${1} | jq -r '.environment | map(" -e " + .) | join("")') - # echo ${1} | jq -r '.environment | join("\n")' > ${PWD}/${NAME}.env - # ENVIRONMENT=" --env-file ${PWD}/${NAME}.env" - if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi - if [ ! -z "${NAME}" ]; then DOCKERARGS="${DOCKERARGS} --rm --name ${NAME} "; fi - if [ ! -z "${NETWORK}" ]; then DOCKERARGS="${DOCKERARGS} --network ${NETWORK} "; fi - if [ ! -z "${VOLUMES}" ]; then DOCKERARGS="${DOCKERARGS}${VOLUMES}"; fi - if [ ! -z "${ENVIRONMENT}" ]; then DOCKERARGS="${DOCKERARGS}${ENVIRONMENT}"; fi - if [ ! -z "${PORTS}" ]; then DOCKERARGS="${DOCKERARGS}${PORTS}"; fi - if [ ! -z "${EXPOSE}" ]; then DOCKERARGS="${DOCKERARGS}${EXPOSE}"; fi - IMAGE=$(echo ${1} | jq -r .image | envsubst) - TMP_COMMAND=$(echo ${1} | jq -r .command) - echo "docker run ${DOCKERARGS} ${IMAGE} ${TMP_COMMAND}" -} - -make_container_cmd() { - DOCKERARGS=$(echo ${1} | jq -r .dockerargs) - if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi - SCRIPT_NAME=$(echo ${1} | jq -r .name) - SCRIPT_NAME=$(slugify $SCRIPT_NAME) - PROJECT=$(echo ${1} | jq -r .project) - CONTAINER=$(echo ${1} | jq -r .container | envsubst) - TMP_COMMAND=$(echo ${1} | jq -r .command) - - if [ "${PROJECT}" != "null" ]; then - - # create bash script to detect all running containers - if [ "${SCRIPT_NAME}" == "null" ]; then - SCRIPT_NAME=$(cat /proc/sys/kernel/random/uuid) - fi -cat << EOF > ${HOME_DIR}/projects/${SCRIPT_NAME}.sh -#!/usr/bin/env bash -set -e - -CONTAINERS=\$(docker ps --format '{{.Names}}' | grep -E "^${PROJECT}_${CONTAINER}.[0-9]+") -for CONTAINER_NAME in \$CONTAINERS; do - docker exec ${DOCKERARGS} \${CONTAINER_NAME} ${TMP_COMMAND} -done -EOF - echo "/bin/bash ${HOME_DIR}/projects/${SCRIPT_NAME}.sh" - # cat "/bin/bash ${HOME_DIR}/projects/${SCRIPT_NAME}.sh" - else - echo "docker exec ${DOCKERARGS} ${CONTAINER} ${TMP_COMMAND}" - fi -} - -#make_host_cmd() { -# HOST_BINARY=$(echo ${1} | jq -r .host) -# TMP_COMMAND=$(echo ${1} | jq -r .command) -# echo "${HOST_BINARY} ${TMP_COMMAND}" -#} - -make_cmd() { - if [ "$(echo ${1} | jq -r .image)" != "null" ]; then - make_image_cmd "$1" - elif [ "$(echo ${1} | jq -r .container)" != "null" ]; then - make_container_cmd "$1" - #elif [ "$(echo ${1} | jq -r .host)" != "null" ]; then - # make_host_cmd "$1" - else - echo ${1} | jq -r .command - fi -} - -parse_schedule() { - case $1 in - "@yearly") - echo "0 0 1 1 *" - ;; - "@annually") - echo "0 0 1 1 *" - ;; - "@monthly") - echo "0 0 1 * *" - ;; - "@weekly") - echo "0 0 * * 0" - ;; - "@daily") - echo "0 0 * * *" - ;; - "@midnight") - echo "0 0 * * *" - ;; - "@hourly") - echo "0 * * * *" - ;; - "@every") - TIME=$2 - TOTAL=0 - - M=$(echo $TIME | grep -o '[0-9]\+m') - H=$(echo $TIME | grep -o '[0-9]\+h') - D=$(echo $TIME | grep -o '[0-9]\+d') - - if [ -n "${M}" ]; then - TOTAL=$(($TOTAL + ${M::-1})) - fi - if [ -n "${H}" ]; then - TOTAL=$(($TOTAL + ${H::-1} * 60)) - fi - if [ -n "${D}" ]; then - TOTAL=$(($TOTAL + ${D::-1} * 60 * 24)) - fi - - echo "*/${TOTAL} * * * *" - ;; - *) - echo "${@}" - ;; - esac -} - -function build_crontab() { - - rm -rf ${CRONTAB_FILE} - - ONSTART=() - while read i ; do - - SCHEDULE=$(jq -r .[$i].schedule ${CONFIG} | sed 's/\*/\\*/g') - if [ "${SCHEDULE}" == "null" ]; then - echo "Schedule Missing: $(jq -r .[$i].schedule ${CONFIG})" - continue - fi - SCHEDULE=$(parse_schedule ${SCHEDULE} | sed 's/\\//g') - - if [ "$(jq -r .[$i].command ${CONFIG})" == "null" ]; then - echo "Command Missing: $(jq -r .[$i].command ${CONFIG})" - continue - fi - - COMMENT=$(jq -r .[$i].comment ${CONFIG}) - if [ "${COMMENT}" != "null" ]; then - echo "# ${COMMENT}" >> ${CRONTAB_FILE} - fi - - SCRIPT_NAME=$(jq -r .[$i].name ${CONFIG}) - SCRIPT_NAME=$(slugify $SCRIPT_NAME) - if [ "${SCRIPT_NAME}" == "null" ]; then - SCRIPT_NAME=$(cat /proc/sys/kernel/random/uuid) - fi - - COMMAND="/bin/bash ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh" -cat << EOF > ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh -#!/usr/bin/env bash -set -e - -# TODO find workaround -# [error] write /dev/stdout: broken pipe <- when using docker commands -#UUID=\$(cat /proc/sys/kernel/random/uuid) -#exec > >(read message; echo "\${UUID} \$(date -Iseconds) [info] \$message" | tee -a ${LOG_FILE} ) -#exec 2> >(read message; echo "\${UUID} \$(date -Iseconds) [error] \$message" | tee -a ${LOG_FILE} >&2) - -echo "Start Cronjob **${SCRIPT_NAME}** ${COMMENT}" - -$(make_cmd "$(jq -c .[$i] ${CONFIG})") -EOF - - - - if [ "$(jq -r .[$i].trigger ${CONFIG})" != "null" ]; then - while read j ; do - if [ "$(jq .[$i].trigger[$j].command ${CONFIG})" == "null" ]; then - echo "Command Missing: $(jq -r .[$i].trigger[$j].command ${CONFIG})" - continue - fi - #TRIGGER_COMMAND=$(make_cmd "$(jq -c .[$i].trigger[$j] ${CONFIG})") - echo "$(make_cmd "$(jq -c .[$i].trigger[$j] ${CONFIG})")" >> ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh - #COMMAND="${COMMAND} && ${TRIGGER_COMMAND}" - done < <(jq -r '.['$i'].trigger|keys[]' ${CONFIG}) - fi - - echo "echo \"End Cronjob **${SCRIPT_NAME}** ${COMMENT}\"" >> ${HOME_DIR}/jobs/${SCRIPT_NAME}.sh - - echo "${SCHEDULE} ${COMMAND}" >> ${CRONTAB_FILE} - - if [ "$(jq -r .[$i].onstart ${CONFIG})" == "true" ]; then - ONSTART+=("${COMMAND}") - fi - done < <(jq -r '.|keys[]' ${CONFIG}) - - echo "##### crontab generation complete #####" - cat ${CRONTAB_FILE} - - echo "##### run commands with onstart #####" - for COMMAND in "${ONSTART[@]}"; do - echo "${COMMAND}" - ${COMMAND} & - done -} - - -ensure_docker_socket_accessible - -start_app() { - get_config - if [ -f "${HOME_DIR}/config.working.json" ]; then - export CONFIG=${HOME_DIR}/config.working.json - elif [ -f "${HOME_DIR}/config.json" ]; then - export CONFIG=${HOME_DIR}/config.json - else - echo "NO CONFIG FILE FOUND" - fi - if [ "$1" = "crond" ]; then - if [ -f ${CONFIG} ]; then - build_crontab - else - echo "Unable to find ${CONFIG}" - fi - fi - echo "$@" - exec "$@" -} - -start_app "$@" diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100755 index 0000000..5db84a5 --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,337 @@ +#!/bin/bash + +set -e + +CRONTAB_FILE="${HOME_DIR}"/crontab + +if [ -z "${HOME_DIR}" ] && [ -n "${TEST_MODE}" ]; then + HOME_DIR=/tmp/crontab-docker-testing + CRONTAB_FILE=${HOME_DIR}/test +elif [ -z "${HOME_DIR}" ]; then + echo "HOME_DIR not set." + exit 1 +fi + +# Ensure dir exist - in case of volume mapping. +# This needs to run as root to set proper permissions +if [ "$(id -u)" = "0" ]; then + mkdir -p "${HOME_DIR}"/jobs "${HOME_DIR}"/crontabs + # Only chown the directories we create, not the entire HOME_DIR (to avoid issues with read-only mounts) + chown docker:docker "${HOME_DIR}"/jobs "${HOME_DIR}"/crontabs 2>/dev/null || true + # Try to chown HOME_DIR itself, but ignore errors for read-only mounts + chown docker:docker "${HOME_DIR}" 2>/dev/null || true +else + # If not root, try to create directories (may fail if permissions are wrong) + mkdir -p "${HOME_DIR}"/jobs "${HOME_DIR}"/crontabs 2>/dev/null || { + echo "Warning: Cannot create ${HOME_DIR} directories. Ensure proper volume permissions." + echo "Run: sudo chown -R $(id -u docker):$(id -g docker) /path/to/host/directory" + } +fi + +if [ -z "${DOCKER_HOST}" ] && [ -a "${DOCKER_PORT_2375_TCP}" ]; then + export DOCKER_HOST="tcp://docker:2375" +fi + +normalize_config() { + JSON_CONFIG={} + if [ -f "${HOME_DIR}/config.json" ]; then + JSON_CONFIG="$(cat "${HOME_DIR}"/config.json)" + elif [ -f "${HOME_DIR}/config.toml" ]; then + JSON_CONFIG="$(rq -t <<< "$(cat "${HOME_DIR}"/config.toml)")" + elif [ -f "${HOME_DIR}/config.yml" ]; then + JSON_CONFIG="$(rq -y <<< "$(cat "${HOME_DIR}"/config.yml)")" + elif [ -f "${HOME_DIR}/config.yaml" ]; then + JSON_CONFIG="$(rq -y <<< "$(cat "${HOME_DIR}"/config.yaml)")" + fi + + jq -S -r '."~~shared-settings" as $shared | del(."~~shared-settings") | to_entries | map_values(.value + { name: .key } + $shared)' <<< "${JSON_CONFIG}" > "${HOME_DIR}"/config.working.json +} + +slugify() { + echo "${@}" | iconv -t ascii | sed -r s/[~^]+//g | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr '[:upper:]' '[:lower:]' +} + +make_image_cmd() { + DOCKERARGS=$(echo "${1}" | jq -r .dockerargs) + ENVIRONMENT=$(echo "${1}" | jq -r 'select(.environment != null) | .environment | map("--env " + .) | join(" ")') + EXPOSE=$(echo "${1}" | jq -r 'select(.expose != null) | .expose | map("--expose " + .) | join(" ")' ) + NAME=$(echo "${1}" | jq -r 'select(.name != null) | .name') + NETWORKS=$(echo "${1}" | jq -r 'select(.networks != null) | .networks | map("--network " + .) | join(" ")') + PORTS=$(echo "${1}" | jq -r 'select(.ports != null) | .ports | map("--publish " + .) | join(" ")') + VOLUMES=$(echo "${1}" | jq -r 'select(.volumes != null) | .volumes | map("--volume " + .) | join(" ")') + + if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi + DOCKERARGS+=" " + if [ -n "${ENVIRONMENT}" ]; then DOCKERARGS+="${ENVIRONMENT} "; fi + if [ -n "${EXPOSE}" ]; then DOCKERARGS+="${EXPOSE} "; fi + if [ -n "${NAME}" ]; then DOCKERARGS+="--name ${NAME} "; fi + if [ -n "${NETWORKS}" ]; then DOCKERARGS+="${NETWORKS} "; fi + if [ -n "${PORTS}" ]; then DOCKERARGS+="${PORTS} "; fi + if [ -n "${VOLUMES}" ]; then DOCKERARGS+="${VOLUMES} "; fi + + IMAGE=$(echo "${1}" | jq -r .image | envsubst) + if [ "${IMAGE}" == "null" ]; then return; fi + + COMMAND=$(echo "${1}" | jq -r .command) + + echo "docker run ${DOCKERARGS} ${IMAGE} ${COMMAND}" +} + +make_container_cmd() { + DOCKERARGS=$(echo "${1}" | jq -r .dockerargs) + if [ "${DOCKERARGS}" == "null" ]; then DOCKERARGS=; fi + + CONTAINER=$(echo "${1}" | jq -r .container | envsubst) + if [ "${CONTAINER}" == "null" ]; then return; fi + + COMMAND=$(echo "${1}" | jq -r .command ) + if [ "${COMMAND}" == "null" ]; then return; fi + + echo "docker exec ${DOCKERARGS} ${CONTAINER} ${COMMAND}" +} + +make_cmd() { + if [ "$(echo "${1}" | jq -r .image)" != "null" ]; then + make_image_cmd "${1}" + elif [ "$(echo "${1}" | jq -r .container)" != "null" ]; then + make_container_cmd "${1}" + else + echo "${1}" | jq -r .command + fi +} + +parse_schedule() { + IFS=" " + read -r -a params <<< "$@" + + case ${params[0]} in + "@yearly" | "@annually") + echo "0 0 1 1 *" + ;; + "@monthly") + echo "0 0 1 * *" + ;; + "@weekly") + echo "0 0 * * 0" + ;; + "@daily") + echo "0 0 * * *" + ;; + "@midnight") + echo "0 0 * * *" + ;; + "@hourly") + echo "0 * * * *" + ;; + "@random") + M="*" + H="*" + D="*" + + for when in "${params[@]:1}" + do + case $when in + "@m") + M=$(shuf -i 0-59 -n 1) + ;; + "@h") + H=$(shuf -i 0-23 -n 1) + ;; + "@d") + D=$(shuf -i 0-6 -n 1) + ;; + esac + done + + echo "${M} ${H} * * ${D}" + ;; + *) + echo "${params[@]}" + ;; + esac +} + +function build_crontab() { + rm -rf "${CRONTAB_FILE}" + + ONSTART=() + while read -r i ; do + KEY=$(jq -r .["$i"] "${CONFIG}") + + SCHEDULE=$(echo "${KEY}" | jq -r '.schedule' | sed 's/\*/\\*/g') + if [ "${SCHEDULE}" == "null" ]; then + echo "'schedule' missing: '${KEY}" + continue + fi + SCHEDULE=$(parse_schedule "${SCHEDULE}" | sed 's/\\//g') + + COMMAND=$(echo "${KEY}" | jq -r '.command') + if [ "${COMMAND}" == "null" ]; then + echo "'command' missing: '${KEY}'" + continue + fi + + COMMENT=$(echo "${KEY}" | jq -r '.comment') + + SCRIPT_NAME=$(echo "${KEY}" | jq -r '.name') + SCRIPT_NAME=$(slugify "${SCRIPT_NAME}") + if [ "${SCRIPT_NAME}" == "null" ]; then + SCRIPT_NAME=$(cat /proc/sys/kernel/random/uuid) + fi + + CRON_COMMAND=$(make_cmd "${KEY}") + + SCRIPT_PATH="${HOME_DIR}/jobs/${SCRIPT_NAME}.sh" + + touch "${SCRIPT_PATH}" + chmod +x "${SCRIPT_PATH}" + + { + echo "#\!/usr/bin/env bash" + echo "set -e" + echo "" + echo "JOB_NAME=\"${SCRIPT_NAME}\"" + echo "TIMESTAMP=\$(date -u +%Y-%m-%dT%H:%M:%SZ)" + echo "PID=\$\$" + echo "" + echo "# Log job start to database" + echo "python3 /opt/crontab/webapp/db_logger.py start \"\${JOB_NAME}\" \"\${TIMESTAMP}\" \"cron\" \"\${PID}\" 2>&1 || true" + echo "" + echo "# Capture output to temp files" + echo "STDOUT_FILE=\"/tmp/job-\${JOB_NAME}-\$\$.stdout\"" + echo "STDERR_FILE=\"/tmp/job-\${JOB_NAME}-\$\$.stderr\"" + echo "" + echo "echo \"start cron job __${SCRIPT_NAME}__\"" + echo "set +e" + echo "${CRON_COMMAND} > \"\${STDOUT_FILE}\" 2> \"\${STDERR_FILE}\"" + echo "EXIT_CODE=\$?" + echo "set -e" + echo "" + echo "# Log job completion to database" + echo "END_TIMESTAMP=\$(date -u +%Y-%m-%dT%H:%M:%SZ)" + echo "python3 /opt/crontab/webapp/db_logger.py end \"\${JOB_NAME}\" \"\${END_TIMESTAMP}\" \"\${EXIT_CODE}\" \"\${STDOUT_FILE}\" \"\${STDERR_FILE}\" 2>&1 || true" + echo "" + echo "# Output to container logs" + echo "cat \"\${STDOUT_FILE}\" 2>/dev/null || true" + echo "cat \"\${STDERR_FILE}\" >&2 2>/dev/null || true" + echo "" + echo "# Clean up temp files" + echo "rm -f \"\${STDOUT_FILE}\" \"\${STDERR_FILE}\"" + } > "${SCRIPT_PATH}" + + TRIGGER=$(echo "${KEY}" | jq -r '.trigger') + if [ "${TRIGGER}" != "null" ]; then + while read -r j ; do + TRIGGER_KEY=$(echo "${KEY}" | jq -r .trigger["$j"]) + + TRIGGER_COMMAND=$(echo "${TRIGGER_KEY}" | jq -r '.command') + if [ "${TRIGGER_COMMAND}" == "null" ]; then + continue + fi + + make_cmd "${TRIGGER_KEY}" >> "${SCRIPT_PATH}" + done < <(echo "${KEY}" | jq -r '.trigger | keys[]') + fi + + echo "echo \"end cron job __${SCRIPT_NAME}__\"" >> "${SCRIPT_PATH}" + + if [ "${COMMENT}" != "null" ]; then + echo "# ${COMMENT}" >> "${CRONTAB_FILE}" + fi + # Redirect job output to container's stdout (fd/1) and stderr (fd/2) + echo "${SCHEDULE} ${SCRIPT_PATH} >> /proc/1/fd/1 2>> /proc/1/fd/2" >> "${CRONTAB_FILE}" + + ONSTART_COMMAND=$(echo "${KEY}" | jq -r '.onstart') + if [ "${ONSTART_COMMAND}" == "true" ]; then + ONSTART+=("${SCRIPT_PATH}") + fi + done < <(jq -r '. | keys[]' "${CONFIG}") + + printf "##### crontab generated #####\n" + cat "${CRONTAB_FILE}" + + # Copy crontab file to a directory owned by docker user + # BusyBox crond expects files in the crontabs directory to be named after the user + CRONTABS_DIR="${HOME_DIR}/crontabs" + mkdir -p "${CRONTABS_DIR}" + cp "${CRONTAB_FILE}" "${CRONTABS_DIR}/docker" + chmod 600 "${CRONTABS_DIR}/docker" + # Ensure ownership is correct + if [ "$(id -u)" = "0" ]; then + chown docker:docker "${CRONTABS_DIR}" "${CRONTABS_DIR}/docker" + fi + + printf "##### run commands with onstart #####\n" + for ONSTART_COMMAND in "${ONSTART[@]}"; do + printf "%s\n" "${ONSTART_COMMAND}" + ${ONSTART_COMMAND} >> /proc/1/fd/1 2>> /proc/1/fd/2 & + done + + printf "##### cron running #####\n" +} + +init_webapp() { + printf "##### initializing web app #####\n" + + # Initialize database schema + python3 /opt/crontab/webapp/init_db.py + + # Sync jobs from config to database + python3 /opt/crontab/webapp/sync_jobs.py "${CONFIG}" + + printf "##### web app initialized #####\n" +} + +start_app() { + normalize_config + export CONFIG=${HOME_DIR}/config.working.json + if [ ! -f "${CONFIG}" ]; then + printf "missing generated %s. exiting.\n" "${CONFIG}" + exit 1 + fi + if [ "${1}" == "crond" ]; then + build_crontab + init_webapp + fi + + # Use supervisord to manage crond and Flask if we're starting crond + if [ "${1}" == "crond" ]; then + if [ "$(id -u)" = "0" ]; then + exec su-exec docker supervisord -c /opt/crontab/supervisord.conf + else + exec supervisord -c /opt/crontab/supervisord.conf + fi + fi + + # Filter out invalid crond flags + # BusyBox crond doesn't support -s flag + local filtered_args=() + local skip_next=false + + for arg in "$@"; do + if [ "$skip_next" = true ]; then + skip_next=false + continue + fi + + # Skip -s flag if it appears (was used in previous versions but not supported by BusyBox) + if [ "$arg" = "-s" ]; then + echo "Warning: Skipping unsupported -s flag for BusyBox crond" + continue + fi + + filtered_args+=("$arg") + done + + printf "%s\n" "${filtered_args[@]}" + + # Run as docker user for security + if [ "$(id -u)" = "0" ]; then + exec su-exec docker "${filtered_args[@]}" + else + exec "${filtered_args[@]}" + fi +} + +printf "โœจ starting crontab container โœจ\n" +start_app "${@}" diff --git a/scripts/update-dependencies.sh b/scripts/update-dependencies.sh new file mode 100755 index 0000000..1b6a389 --- /dev/null +++ b/scripts/update-dependencies.sh @@ -0,0 +1,334 @@ +#!/usr/bin/env bash +# +# Dependency Update Script for docker-crontab +# +# This script updates various dependencies in the project: +# - Docker base images in Dockerfile +# - rq binary version +# - GitHub Actions versions +# +# Usage: +# ./scripts/update-dependencies.sh [OPTIONS] +# +# Options: +# --no-backup Skip creating backup files +# --dry-run Show what would be updated without making changes +# --help Show this help message +# + +set -euo pipefail + +# Script directory and project root +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" + +# Configuration +BACKUP_SUFFIX=".backup-$(date +%Y%m%d-%H%M%S)" +DRY_RUN=false +NO_BACKUP=false + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { echo -e "${BLUE}โ„น${NC} $*"; } +log_success() { echo -e "${GREEN}โœ“${NC} $*"; } +log_warning() { echo -e "${YELLOW}โš ${NC} $*"; } +log_error() { echo -e "${RED}โœ—${NC} $*" >&2; } + +# Help message +show_help() { + cat << 'EOF' +Dependency Update Script for docker-crontab + +This script updates various dependencies in the project: +- Docker base images in Dockerfile +- rq binary version +- GitHub Actions versions + +Usage: + ./scripts/update-dependencies.sh [OPTIONS] + +Options: + --no-backup Skip creating backup files + --dry-run Show what would be updated without making changes + --help Show this help message + +Examples: + ./scripts/update-dependencies.sh --dry-run + ./scripts/update-dependencies.sh --no-backup +EOF + exit 0 +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --no-backup) + NO_BACKUP=true + shift + ;; + --dry-run) + DRY_RUN=true + shift + ;; + --help|-h) + show_help + ;; + *) + log_error "Unknown option: $1" + show_help + ;; + esac +done + +# Backup file function +backup_file() { + local file="$1" + if [[ "$NO_BACKUP" == "false" ]] && [[ "$DRY_RUN" == "false" ]]; then + cp "$file" "${file}${BACKUP_SUFFIX}" + log_info "Created backup: ${file}${BACKUP_SUFFIX}" + fi +} + +# Update file function +update_file() { + local file="$1" + local old_pattern="$2" + local new_value="$3" + local description="$4" + + if [[ "$DRY_RUN" == "true" ]]; then + log_info "[DRY RUN] Would update $description in $file" + return 0 + fi + + if grep -q "$old_pattern" "$file"; then + backup_file "$file" + sed -i.tmp "s|$old_pattern|$new_value|g" "$file" && rm "${file}.tmp" + log_success "Updated $description in $file" + return 0 + else + log_warning "Pattern not found for $description in $file" + return 1 + fi +} + +# Check for required tools +check_requirements() { + local missing=() + + for cmd in curl jq sed grep; do + if ! command -v "$cmd" &> /dev/null; then + missing+=("$cmd") + fi + done + + if [[ ${#missing[@]} -gt 0 ]]; then + log_error "Missing required tools: ${missing[*]}" + log_error "Please install missing tools and try again" + exit 1 + fi +} + +# Get latest Docker image tag +get_latest_docker_tag() { + local image="$1" + local repo="${image%%:*}" + + # Query Docker Hub API for official images + if [[ "$repo" == "alpine" ]]; then + # For Alpine, get the latest stable version + curl -s "https://hub.docker.com/v2/repositories/library/alpine/tags?page_size=100" | \ + jq -r '.results[].name' | \ + grep -E '^[0-9]+\.[0-9]+$' | \ + sort -V | \ + tail -1 + elif [[ "$repo" == "docker" ]]; then + # For Docker dind, get the latest version matching pattern + curl -s "https://hub.docker.com/v2/repositories/library/docker/tags?page_size=100" | \ + jq -r '.results[].name' | \ + grep -E '^[0-9]+\.[0-9]+\.[0-9]+-dind-alpine[0-9.]+$' | \ + sort -V | \ + tail -1 + else + log_warning "Unknown image repository: $repo" + return 1 + fi +} + +# Get latest rq version from GitHub +get_latest_rq_version() { + curl -s "https://api.github.com/repos/dflemstr/rq/releases/latest" | \ + jq -r '.tag_name' | \ + sed 's/^v//' +} + +# Get latest GitHub Action version (major version only) +get_latest_action_version() { + local repo="$1" + + # Get the latest tag and extract major version only (e.g., v6.0.1 -> v6) + local latest_tag + latest_tag=$(curl -s "https://api.github.com/repos/${repo}/releases/latest" | jq -r '.tag_name') + + # Extract major version (e.g., v6 from v6.0.1) + echo "$latest_tag" | sed -E 's/^(v[0-9]+).*/\1/' +} + +# Update summary tracking +declare -a UPDATE_SUMMARY=() + +add_to_summary() { + UPDATE_SUMMARY+=("$1") +} + +# Main update functions + +update_dockerfile() { + log_info "Checking Dockerfile dependencies..." + + local dockerfile="$PROJECT_ROOT/Dockerfile" + + # Update Alpine base image + local current_alpine + current_alpine=$(grep 'FROM alpine:' "$dockerfile" | head -1 | sed -E 's/.*FROM alpine:([^ ]+).*/\1/') + local latest_alpine + latest_alpine=$(get_latest_docker_tag "alpine") + + if [[ "$current_alpine" != "$latest_alpine" ]]; then + log_info "Alpine: $current_alpine โ†’ $latest_alpine" + update_file "$dockerfile" "FROM alpine:${current_alpine}" "FROM alpine:${latest_alpine}" "Alpine base image" + add_to_summary "โ€ข Alpine: $current_alpine โ†’ $latest_alpine" + else + log_success "Alpine is up to date: $current_alpine" + fi + + # Update Docker dind image + local current_docker + current_docker=$(grep 'FROM docker:' "$dockerfile" | head -1 | sed -E 's/.*FROM docker:([^ ]+).*/\1/') + local latest_docker + latest_docker=$(get_latest_docker_tag "docker") + + if [[ "$current_docker" != "$latest_docker" ]]; then + log_info "Docker: $current_docker โ†’ $latest_docker" + update_file "$dockerfile" "FROM docker:${current_docker}" "FROM docker:${latest_docker}" "Docker dind image" + add_to_summary "โ€ข Docker: $current_docker โ†’ $latest_docker" + else + log_success "Docker is up to date: $current_docker" + fi + + # Update RQ version + local current_rq + current_rq=$(grep 'ENV RQ_VERSION=' "$dockerfile" | sed -E 's/.*ENV RQ_VERSION=([^ ]+).*/\1/') + local latest_rq + latest_rq=$(get_latest_rq_version) + + if [[ "$current_rq" != "$latest_rq" ]]; then + log_info "rq: $current_rq โ†’ $latest_rq" + update_file "$dockerfile" "ENV RQ_VERSION=${current_rq}" "ENV RQ_VERSION=${latest_rq}" "rq version" + add_to_summary "โ€ข rq: $current_rq โ†’ $latest_rq" + else + log_success "rq is up to date: $current_rq" + fi +} + +update_github_actions() { + log_info "Checking GitHub Actions dependencies..." + + local workflow_file="$PROJECT_ROOT/.github/workflows/build.yml" + + # Define actions to update as space-separated pairs: "repo:current_version" + local actions=( + "actions/checkout:v4" + "docker/login-action:v3" + "docker/metadata-action:v5" + "docker/setup-qemu-action:v3" + "docker/setup-buildx-action:v3" + "docker/build-push-action:v6" + ) + + for action in "${actions[@]}"; do + local repo="${action%%:*}" + local current_version="${action##*:}" + local latest_version + latest_version=$(get_latest_action_version "$repo") + + if [[ -n "$latest_version" ]] && [[ "$current_version" != "$latest_version" ]]; then + log_info "$repo: $current_version โ†’ $latest_version" + update_file "$workflow_file" "uses: ${repo}@${current_version}" "uses: ${repo}@${latest_version}" "$repo action" + add_to_summary "โ€ข $repo: $current_version โ†’ $latest_version" + else + log_success "$repo is up to date: $current_version" + fi + done +} + +# Print update summary +print_summary() { + echo "" + echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + echo "Update Summary" + echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" + + if [[ ${#UPDATE_SUMMARY[@]} -eq 0 ]]; then + log_success "All dependencies are already up to date!" + else + echo "" + echo "The following updates were made:" + echo "" + for update in "${UPDATE_SUMMARY[@]}"; do + echo "$update" + done + echo "" + + if [[ "$DRY_RUN" == "false" ]]; then + if [[ "$NO_BACKUP" == "false" ]]; then + echo "Backup files created with suffix: $BACKUP_SUFFIX" + echo "" + fi + + log_info "Next Steps:" + echo " 1. Review the changes with: git diff" + echo " 2. Test the build: docker build -t crontab ." + echo " 3. Commit changes: git commit -am 'chore: update dependencies'" + fi + fi + + echo "โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”" +} + +# Main execution +main() { + log_info "Starting dependency update process..." + + # Check requirements + check_requirements + + # Change to project root + cd "$PROJECT_ROOT" + + if [[ "$DRY_RUN" == "true" ]]; then + log_warning "Running in DRY RUN mode - no changes will be made" + fi + + # Run updates + update_dockerfile + update_github_actions + + # Print summary + print_summary + + if [[ ${#UPDATE_SUMMARY[@]} -gt 0 ]]; then + exit 0 + else + exit 0 + fi +} + +# Run main function +main diff --git a/supervisord.conf b/supervisord.conf new file mode 100644 index 0000000..1d8981b --- /dev/null +++ b/supervisord.conf @@ -0,0 +1,28 @@ +[supervisord] +nodaemon=true +user=docker +logfile=/dev/null +logfile_maxbytes=0 +loglevel=info +pidfile=/tmp/supervisord.pid + +[program:crond] +command=crond -f -d 7 -c /opt/crontab/crontabs +autostart=true +autorestart=true +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/fd/2 +stderr_logfile_maxbytes=0 +priority=10 + +[program:flask] +command=python3 /opt/crontab/webapp/app.py +autostart=true +autorestart=true +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/fd/2 +stderr_logfile_maxbytes=0 +environment=FLASK_ENV="production",WEB_UI_PORT="8080" +priority=20 diff --git a/test_logging b/test_logging deleted file mode 100755 index 97d5645..0000000 --- a/test_logging +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -e - -# This file is for testing the logging of docker output #8 - -LOG_FILE=./jobs.log -touch ${LOG_FILE} -UUID="xxxxxxxxxxxxxxxxx" - -exec > >(read message; echo "${UUID} $(date) [info] $message" | tee -a ${LOG_FILE} ) -exec 2> >(read message; echo "${UUID} $(date) [error] $message" | tee -a ${LOG_FILE} >&2) - -echo "Start" - -docker run alpine sh -c 'while :; do echo "ping"; sleep 1; done' -# [error] write /dev/stdout: broken pipe -# --log-driver syslog <- errors -# --log-driver none <- errors - -echo "End" diff --git a/webapp/app.py b/webapp/app.py new file mode 100755 index 0000000..b3d8b4a --- /dev/null +++ b/webapp/app.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +"""Flask web application for crontab dashboard.""" + +from flask import Flask, render_template, jsonify, request +import os +import subprocess +import re +from models import ( + get_all_jobs, + get_job, + get_job_executions, + get_execution_by_id, + get_dashboard_stats +) + + +app = Flask(__name__) +app.config['DATABASE'] = '/opt/crontab/data/crontab.db' + + +def validate_job_name(job_name): + """ + Validate job name to prevent path traversal attacks. + + Args: + job_name: Job name to validate + + Returns: + str: Validated job name + + Raises: + ValueError: If job name is invalid + """ + if not re.match(r'^[a-zA-Z0-9_-]+$', job_name): + raise ValueError("Invalid job name format") + return job_name + + +@app.route('/') +def index(): + """Render the dashboard UI.""" + return render_template('index.html') + + +@app.route('/api/jobs') +def api_get_jobs(): + """ + Get all jobs with their current status. + + Returns: + JSON list of jobs + """ + try: + jobs = get_all_jobs() + return jsonify(jobs) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/jobs/') +def api_get_job(job_name): + """ + Get details for a specific job. + + Args: + job_name: Name of the job + + Returns: + JSON job object or 404 + """ + try: + job_name = validate_job_name(job_name) + job = get_job(job_name) + if job: + return jsonify(job) + return jsonify({"error": "Job not found"}), 404 + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/executions/') +def api_get_executions(job_name): + """ + Get execution history for a job. + + Args: + job_name: Name of the job + + Query Parameters: + limit: Maximum number of executions to return (default 50) + + Returns: + JSON list of executions + """ + try: + job_name = validate_job_name(job_name) + limit = request.args.get('limit', 50, type=int) + limit = min(max(limit, 1), 1000) # Clamp between 1 and 1000 + + executions = get_job_executions(job_name, limit) + return jsonify(executions) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/executions/id/') +def api_get_execution(execution_id): + """ + Get details for a specific execution. + + Args: + execution_id: Execution ID + + Returns: + JSON execution object or 404 + """ + try: + execution = get_execution_by_id(execution_id) + if execution: + return jsonify(execution) + return jsonify({"error": "Execution not found"}), 404 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/trigger/', methods=['POST']) +def api_trigger_job(job_name): + """ + Manually trigger a job execution. + + Args: + job_name: Name of the job to trigger + + Returns: + JSON status message + """ + # Simple rate limiting (in-memory) + if not hasattr(api_trigger_job, 'rate_limit'): + api_trigger_job.rate_limit = {} + + import time + now = time.time() + job_triggers = api_trigger_job.rate_limit.get(job_name, []) + job_triggers = [t for t in job_triggers if now - t < 60] # Last minute + + if len(job_triggers) >= 5: + return jsonify({"error": "Rate limit exceeded (max 5 triggers per minute)"}), 429 + + try: + job_name = validate_job_name(job_name) + + # Verify job exists + job = get_job(job_name) + if not job: + return jsonify({"error": "Job not found"}), 404 + + # Verify script exists + script_path = f"/opt/crontab/jobs/{job_name}.sh" + real_path = os.path.realpath(script_path) + if not real_path.startswith('/opt/crontab/jobs/'): + return jsonify({"error": "Invalid job path"}), 400 + + if not os.path.exists(script_path): + return jsonify({"error": "Job script not found"}), 404 + + # Execute job in background + subprocess.Popen( + [script_path], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + start_new_session=True + ) + + # Update rate limit + job_triggers.append(now) + api_trigger_job.rate_limit[job_name] = job_triggers + + return jsonify({ + "status": "triggered", + "job": job_name, + "timestamp": time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + }) + + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/stats') +def api_get_stats(): + """ + Get dashboard statistics. + + Returns: + JSON stats object + """ + try: + stats = get_dashboard_stats() + return jsonify(stats) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@app.route('/api/health') +def api_health(): + """ + Health check endpoint. + + Returns: + JSON health status + """ + import time + try: + # Check database accessibility + stats = get_dashboard_stats() + db_ok = True + except Exception: + db_ok = False + + # Check if crond is running + try: + result = subprocess.run( + ['ps', 'aux'], + capture_output=True, + text=True, + timeout=2 + ) + crond_running = 'crond' in result.stdout + except Exception: + crond_running = False + + # Calculate uptime (approximate) + try: + with open('/proc/uptime', 'r') as f: + uptime = float(f.read().split()[0]) + except Exception: + uptime = 0 + + status = "healthy" if (db_ok and crond_running) else "unhealthy" + + return jsonify({ + "status": status, + "crond_running": crond_running, + "database_accessible": db_ok, + "uptime_seconds": int(uptime) + }), 200 if status == "healthy" else 503 + + +if __name__ == '__main__': + # Run Flask server + port = int(os.environ.get('WEB_UI_PORT', 8080)) + debug = os.environ.get('FLASK_ENV') == 'development' + + print(f"๐Ÿš€ Starting web UI on port {port}") + app.run( + host='0.0.0.0', + port=port, + debug=debug, + threaded=False + ) diff --git a/webapp/cron_parser.py b/webapp/cron_parser.py new file mode 100755 index 0000000..f1447b5 --- /dev/null +++ b/webapp/cron_parser.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +"""Parse cron schedules and calculate next run times.""" + +import re +from datetime import datetime, timedelta + + +class CronParser: + """Parser for cron schedule expressions.""" + + # Shortcut mappings + SHORTCUTS = { + '@yearly': '0 0 1 1 *', + '@annually': '0 0 1 1 *', + '@monthly': '0 0 1 * *', + '@weekly': '0 0 * * 0', + '@daily': '0 0 * * *', + '@midnight': '0 0 * * *', + '@hourly': '0 * * * *', + } + + def parse_schedule(self, schedule_str): + """ + Parse crontab schedule and return human-readable next run time. + + Args: + schedule_str: Cron schedule string (e.g., "*/5 * * * *", "@hourly") + + Returns: + str: Human-readable description of next run time + """ + schedule_str = schedule_str.strip() + + # Handle shortcuts + if schedule_str in self.SHORTCUTS: + schedule_str = self.SHORTCUTS[schedule_str] + return self._describe_standard_cron(schedule_str) + + # Handle @every syntax + if schedule_str.startswith('@every'): + return self._parse_every(schedule_str) + + # Handle @random (return placeholder) + if schedule_str.startswith('@random'): + return "Random (varies per container start)" + + # Parse standard cron: minute hour day month weekday + return self._describe_standard_cron(schedule_str) + + def _parse_every(self, schedule_str): + """ + Parse @every syntax (e.g., @every 2m, @every 1h). + + Args: + schedule_str: Schedule string starting with @every + + Returns: + str: Description like "Every 2 minutes" or next execution time + """ + # Extract duration: @every 2m, @every 1h30m, @every 1d + match = re.search(r'@every\s+(\d+)([mhd])', schedule_str) + if match: + value, unit = int(match.group(1)), match.group(2) + + if unit == 'm': + return f"Every {value} minute{'s' if value != 1 else ''}" + elif unit == 'h': + return f"Every {value} hour{'s' if value != 1 else ''}" + elif unit == 'd': + return f"Every {value} day{'s' if value != 1 else ''}" + + return "Invalid @every syntax" + + def _describe_standard_cron(self, schedule_str): + """ + Convert standard cron syntax to human-readable description. + + Args: + schedule_str: Standard cron string (e.g., "0 2 * * *") + + Returns: + str: Human-readable description + """ + parts = schedule_str.split() + if len(parts) != 5: + return f"Invalid cron syntax: {schedule_str}" + + minute, hour, day, month, weekday = parts + + # Handle common patterns + if minute == '*' and hour == '*': + return "Every minute" + + if minute.startswith('*/'): + interval = minute[2:] + return f"Every {interval} minute{'s' if int(interval) != 1 else ''}" + + if hour.startswith('*/') and minute == '0': + interval = hour[2:] + return f"Every {interval} hour{'s' if int(interval) != 1 else ''}" + + if day.startswith('*/') and minute == '0' and hour == '0': + interval = day[2:] + return f"Every {interval} day{'s' if int(interval) != 1 else ''}" + + # Specific time patterns + if minute != '*' and hour != '*' and day == '*' and month == '*' and weekday == '*': + return f"Daily at {hour.zfill(2)}:{minute.zfill(2)}" + + if minute != '*' and hour != '*' and day != '*' and month == '*' and weekday == '*': + return f"Monthly on day {day} at {hour.zfill(2)}:{minute.zfill(2)}" + + # Fallback: show cron expression + return f"Cron: {schedule_str}" + + def calculate_next_run(self, schedule_str, from_time=None): + """ + Calculate next run time for a cron schedule. + + Args: + schedule_str: Cron schedule string + from_time: Reference time (default: now) + + Returns: + datetime: Next execution time (approximate for complex patterns) + """ + if from_time is None: + from_time = datetime.now() + + # Handle @every syntax + if schedule_str.startswith('@every'): + match = re.search(r'@every\s+(\d+)([mhd])', schedule_str) + if match: + value, unit = int(match.group(1)), match.group(2) + if unit == 'm': + return from_time + timedelta(minutes=value) + elif unit == 'h': + return from_time + timedelta(hours=value) + elif unit == 'd': + return from_time + timedelta(days=value) + + # Handle shortcuts + if schedule_str in self.SHORTCUTS: + schedule_str = self.SHORTCUTS[schedule_str] + + # Parse standard cron (simplified - just handle common patterns) + parts = schedule_str.split() + if len(parts) == 5: + minute, hour, day, month, weekday = parts + + # Every N minutes + if minute.startswith('*/'): + interval = int(minute[2:]) + next_run = from_time + timedelta(minutes=interval) + return next_run.replace(second=0, microsecond=0) + + # Every hour + if hour.startswith('*/') and minute.isdigit(): + interval = int(hour[2:]) + next_run = from_time + timedelta(hours=interval) + return next_run.replace(minute=int(minute), second=0, microsecond=0) + + # Specific time + if minute.isdigit() and hour.isdigit(): + target_hour = int(hour) + target_minute = int(minute) + next_run = from_time.replace(hour=target_hour, minute=target_minute, second=0, microsecond=0) + if next_run <= from_time: + next_run += timedelta(days=1) + return next_run + + # Fallback: estimate ~1 hour from now + return from_time + timedelta(hours=1) + + +if __name__ == '__main__': + # Test the parser + parser = CronParser() + + test_cases = [ + '*/5 * * * *', + '0 2 * * *', + '@hourly', + '@every 2m', + '@every 1h', + '43 6,18 * * *', + '* * * * *', + ] + + print("Cron Parser Test Results:") + print("-" * 60) + for schedule in test_cases: + description = parser.parse_schedule(schedule) + print(f"{schedule:20} => {description}") diff --git a/webapp/db_logger.py b/webapp/db_logger.py new file mode 100755 index 0000000..289bbef --- /dev/null +++ b/webapp/db_logger.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +"""Database logging helper called by job scripts to record executions.""" + +import sqlite3 +import sys +from datetime import datetime + + +DB_PATH = '/opt/crontab/data/crontab.db' +MAX_PREVIEW_SIZE = 10 * 1024 # 10KB preview + + +def truncate_output(content, max_size=MAX_PREVIEW_SIZE): + """Truncate output to max size, preserving size information.""" + if len(content) > max_size: + return content[:max_size] + f"\n... (truncated, {len(content)} bytes total)" + return content + + +def log_start(job_name, start_time, triggered_by, pid): + """ + Log the start of a job execution. + + Args: + job_name: Name of the job + start_time: ISO 8601 timestamp + triggered_by: 'cron' or 'manual' + pid: Process ID + """ + try: + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + cursor.execute(''' + INSERT INTO job_executions (job_name, start_time, triggered_by) + VALUES (?, ?, ?) + ''', (job_name, start_time, triggered_by)) + + execution_id = cursor.lastrowid + + # Update job status to 'running' + cursor.execute(''' + UPDATE jobs SET status = 'running', last_run = ? WHERE name = ? + ''', (start_time, job_name)) + + conn.commit() + conn.close() + + # Print execution ID so script can use it + print(f"EXECUTION_ID={execution_id}", file=sys.stderr) + return 0 + + except Exception as e: + print(f"Error logging job start: {e}", file=sys.stderr) + return 1 + + +def log_end(job_name, end_time, exit_code, stdout_file, stderr_file): + """ + Log the completion of a job execution. + + Args: + job_name: Name of the job + end_time: ISO 8601 timestamp + exit_code: Exit code from command + stdout_file: Path to stdout temp file + stderr_file: Path to stderr temp file + """ + try: + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Read output files + stdout_content = "" + stderr_content = "" + stdout_size = 0 + stderr_size = 0 + + try: + with open(stdout_file, 'r', encoding='utf-8', errors='replace') as f: + stdout_content = f.read() + stdout_size = len(stdout_content) + except FileNotFoundError: + pass + except Exception as e: + print(f"Warning: Could not read stdout file: {e}", file=sys.stderr) + + try: + with open(stderr_file, 'r', encoding='utf-8', errors='replace') as f: + stderr_content = f.read() + stderr_size = len(stderr_content) + except FileNotFoundError: + pass + except Exception as e: + print(f"Warning: Could not read stderr file: {e}", file=sys.stderr) + + # Truncate for preview + stdout_preview = truncate_output(stdout_content) + stderr_preview = truncate_output(stderr_content) + + # Find the most recent execution for this job that hasn't ended + cursor.execute(''' + SELECT id, start_time FROM job_executions + WHERE job_name = ? AND end_time IS NULL + ORDER BY start_time DESC LIMIT 1 + ''', (job_name,)) + + row = cursor.fetchone() + if row: + execution_id, start_time = row + + # Calculate duration + start_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00')) + end_dt = datetime.fromisoformat(end_time.replace('Z', '+00:00')) + duration = (end_dt - start_dt).total_seconds() + + # Update execution record + cursor.execute(''' + UPDATE job_executions + SET end_time = ?, + duration_seconds = ?, + exit_code = ?, + stdout_preview = ?, + stderr_preview = ?, + stdout_size = ?, + stderr_size = ? + WHERE id = ? + ''', (end_time, duration, exit_code, stdout_preview, stderr_preview, + stdout_size, stderr_size, execution_id)) + + # Update job status based on exit code + status = 'completed' if exit_code == 0 else 'failed' + cursor.execute(''' + UPDATE jobs SET status = ? WHERE name = ? + ''', (status, job_name)) + + conn.commit() + conn.close() + return 0 + else: + print(f"Warning: No pending execution found for {job_name}", file=sys.stderr) + conn.close() + return 1 + + except Exception as e: + print(f"Error logging job end: {e}", file=sys.stderr) + return 1 + + +def main(): + """Main entry point for db_logger script.""" + if len(sys.argv) < 3: + print("Usage: db_logger.py ...", file=sys.stderr) + print(" start ", file=sys.stderr) + print(" end ", file=sys.stderr) + return 1 + + command = sys.argv[1] + + if command == 'start': + if len(sys.argv) != 6: + print("Usage: db_logger.py start ", file=sys.stderr) + return 1 + return log_start(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) + + elif command == 'end': + if len(sys.argv) != 7: + print("Usage: db_logger.py end ", file=sys.stderr) + return 1 + return log_end(sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], sys.argv[6]) + + else: + print(f"Unknown command: {command}", file=sys.stderr) + return 1 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/webapp/init_db.py b/webapp/init_db.py new file mode 100755 index 0000000..790fa96 --- /dev/null +++ b/webapp/init_db.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +"""Initialize the SQLite database schema for crontab web UI.""" + +import sqlite3 +import os +import sys + + +DB_PATH = '/opt/crontab/data/crontab.db' + + +def init_database(): + """Initialize database schema if not exists.""" + # Create data directory if it doesn't exist + os.makedirs(os.path.dirname(DB_PATH), exist_ok=True) + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Create tables (idempotent - safe to run multiple times) + cursor.executescript(''' + CREATE TABLE IF NOT EXISTS jobs ( + name TEXT PRIMARY KEY, + schedule TEXT NOT NULL, + command TEXT NOT NULL, + image TEXT, + container TEXT, + comment TEXT, + last_run TIMESTAMP, + next_run TEXT, + status TEXT DEFAULT 'scheduled', + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + + CREATE TABLE IF NOT EXISTS job_executions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + job_name TEXT NOT NULL, + start_time TIMESTAMP NOT NULL, + end_time TIMESTAMP, + duration_seconds REAL, + exit_code INTEGER, + stdout_preview TEXT, + stderr_preview TEXT, + stdout_size INTEGER, + stderr_size INTEGER, + triggered_by TEXT DEFAULT 'cron', + parent_job TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (job_name) REFERENCES jobs(name) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS system_events ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_type TEXT NOT NULL, + message TEXT, + metadata TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ); + + CREATE INDEX IF NOT EXISTS idx_executions_job_start + ON job_executions(job_name, start_time DESC); + CREATE INDEX IF NOT EXISTS idx_executions_start_time + ON job_executions(start_time DESC); + CREATE INDEX IF NOT EXISTS idx_events_type_time + ON system_events(event_type, created_at DESC); + ''') + + conn.commit() + conn.close() + print("โœ… Database initialized successfully") + return 0 + + +if __name__ == '__main__': + try: + sys.exit(init_database()) + except Exception as e: + print(f"โŒ Error initializing database: {e}", file=sys.stderr) + sys.exit(1) diff --git a/webapp/models.py b/webapp/models.py new file mode 100755 index 0000000..f47c74b --- /dev/null +++ b/webapp/models.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 +"""Database models and query functions.""" + +import sqlite3 +from typing import List, Dict, Optional +from datetime import datetime, timedelta + + +DB_PATH = '/opt/crontab/data/crontab.db' + + +def get_db(): + """Get database connection with row factory.""" + conn = sqlite3.connect(DB_PATH) + conn.row_factory = sqlite3.Row + return conn + + +def get_all_jobs() -> List[Dict]: + """ + Get all jobs with their status. + + Returns: + List of job dictionaries + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM jobs ORDER BY name + ''') + jobs = [dict(row) for row in cursor.fetchall()] + db.close() + return jobs + + +def get_job(job_name: str) -> Optional[Dict]: + """ + Get a single job by name. + + Args: + job_name: Name of the job + + Returns: + Job dictionary or None if not found + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM jobs WHERE name = ? + ''', (job_name,)) + row = cursor.fetchone() + db.close() + return dict(row) if row else None + + +def get_job_executions(job_name: str, limit: int = 50) -> List[Dict]: + """ + Get execution history for a job. + + Args: + job_name: Name of the job + limit: Maximum number of executions to return + + Returns: + List of execution dictionaries + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM job_executions + WHERE job_name = ? + ORDER BY start_time DESC + LIMIT ? + ''', (job_name, limit)) + executions = [dict(row) for row in cursor.fetchall()] + db.close() + return executions + + +def get_execution_by_id(execution_id: int) -> Optional[Dict]: + """ + Get a single execution by ID. + + Args: + execution_id: Execution ID + + Returns: + Execution dictionary or None if not found + """ + db = get_db() + cursor = db.execute(''' + SELECT * FROM job_executions WHERE id = ? + ''', (execution_id,)) + row = cursor.fetchone() + db.close() + return dict(row) if row else None + + +def get_dashboard_stats() -> Dict: + """ + Get dashboard statistics. + + Returns: + Dictionary with stats + """ + db = get_db() + + # Total jobs + cursor = db.execute('SELECT COUNT(*) as count FROM jobs') + total_jobs = cursor.fetchone()['count'] + + # Active jobs (not failed) + cursor = db.execute(''' + SELECT COUNT(*) as count FROM jobs WHERE status != 'failed' + ''') + active_jobs = cursor.fetchone()['count'] + + # Total executions + cursor = db.execute('SELECT COUNT(*) as count FROM job_executions') + total_executions = cursor.fetchone()['count'] + + # Recent failures (last 24 hours) + yesterday = (datetime.now() - timedelta(days=1)).isoformat() + cursor = db.execute(''' + SELECT COUNT(*) as count FROM job_executions + WHERE exit_code != 0 AND start_time > ? + ''', (yesterday,)) + recent_failures = cursor.fetchone()['count'] + + # Executions in last 24 hours + cursor = db.execute(''' + SELECT COUNT(*) as count FROM job_executions + WHERE start_time > ? + ''', (yesterday,)) + last_24h_executions = cursor.fetchone()['count'] + + db.close() + + return { + 'total_jobs': total_jobs, + 'active_jobs': active_jobs, + 'total_executions': total_executions, + 'recent_failures': recent_failures, + 'last_24h_executions': last_24h_executions + } + + +def cleanup_old_executions(retention_days: int = 30, retention_count: int = 1000): + """ + Clean up old job executions based on retention policy. + + Args: + retention_days: Keep executions newer than this many days + retention_count: Keep at least this many recent executions + """ + db = get_db() + + # Delete executions that are: + # 1. Not in the most recent N executions AND + # 2. Older than retention_days + cutoff_date = (datetime.now() - timedelta(days=retention_days)).isoformat() + + db.execute(''' + DELETE FROM job_executions + WHERE id NOT IN ( + SELECT id FROM job_executions + ORDER BY start_time DESC LIMIT ? + ) AND start_time < ? + ''', (retention_count, cutoff_date)) + + deleted = db.total_changes + db.commit() + db.close() + + if deleted > 0: + print(f"โœ… Cleaned up {deleted} old job executions") + + return deleted + + +if __name__ == '__main__': + # Test database queries + print("Testing database models...") + print("-" * 60) + + try: + stats = get_dashboard_stats() + print(f"Dashboard Stats: {stats}") + + jobs = get_all_jobs() + print(f"\nFound {len(jobs)} jobs") + for job in jobs: + print(f" - {job['name']}: {job['schedule']}") + + except Exception as e: + print(f"Error: {e}") diff --git a/webapp/static/css/style.css b/webapp/static/css/style.css new file mode 100644 index 0000000..58e238b --- /dev/null +++ b/webapp/static/css/style.css @@ -0,0 +1,512 @@ +/* Docker Crontab Dashboard Styles */ + +:root { + --primary: #2563eb; + --primary-hover: #1d4ed8; + --success: #10b981; + --success-hover: #059669; + --warning: #f59e0b; + --danger: #ef4444; + --danger-hover: #dc2626; + --bg-light: #f9fafb; + --bg-dark: #1f2937; + --bg-card: #ffffff; + --text-primary: #111827; + --text-secondary: #6b7280; + --border: #e5e7eb; + --shadow: rgba(0, 0, 0, 0.1); +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; + background: var(--bg-light); + color: var(--text-primary); + line-height: 1.6; + min-height: 100vh; + display: flex; + flex-direction: column; +} + +.container { + max-width: 1200px; + margin: 0 auto; + padding: 0 1.5rem; + width: 100%; +} + +/* Header */ +header { + background: var(--bg-card); + padding: 1.5rem 0; + box-shadow: 0 2px 4px var(--shadow); + margin-bottom: 2rem; +} + +header h1 { + font-size: 1.875rem; + margin-bottom: 1rem; + color: var(--text-primary); +} + +#stats-bar { + display: flex; + gap: 2rem; + flex-wrap: wrap; +} + +.stat-item { + display: flex; + flex-direction: column; + gap: 0.25rem; +} + +.stat-label { + font-size: 0.875rem; + color: var(--text-secondary); + font-weight: 500; +} + +.stat-value { + font-size: 1.5rem; + font-weight: 700; + color: var(--primary); +} + +/* Main Content */ +main { + flex: 1; + padding-bottom: 2rem; +} + +.section-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 1.5rem; +} + +.section-header h2 { + font-size: 1.5rem; + color: var(--text-primary); +} + +.hidden { + display: none !important; +} + +/* Job Cards */ +.job-card { + background: var(--bg-card); + padding: 1.5rem; + margin: 1rem 0; + border-radius: 8px; + box-shadow: 0 1px 3px var(--shadow); + display: flex; + justify-content: space-between; + align-items: center; + border-left: 4px solid var(--border); + transition: transform 0.2s, box-shadow 0.2s; +} + +.job-card:hover { + transform: translateY(-2px); + box-shadow: 0 4px 6px var(--shadow); +} + +.job-card.status-running { + border-left-color: var(--primary); +} + +.job-card.status-completed { + border-left-color: var(--success); +} + +.job-card.status-failed { + border-left-color: var(--danger); +} + +.job-card.status-scheduled { + border-left-color: var(--text-secondary); +} + +.job-info { + flex: 1; +} + +.job-info h3 { + font-size: 1.125rem; + margin-bottom: 0.5rem; + color: var(--text-primary); +} + +.job-info p { + color: var(--text-secondary); + margin-bottom: 0.5rem; + font-size: 0.875rem; +} + +.job-info small { + color: var(--text-secondary); + font-size: 0.8rem; +} + +.job-meta { + display: flex; + gap: 1.5rem; + margin-top: 0.5rem; +} + +.job-meta-item { + display: flex; + flex-direction: column; +} + +.job-meta-item strong { + font-size: 0.75rem; + color: var(--text-secondary); + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.job-actions { + display: flex; + gap: 0.75rem; +} + +/* Execution History */ +.history-table { + background: var(--bg-card); + border-radius: 8px; + overflow: hidden; + box-shadow: 0 1px 3px var(--shadow); +} + +.history-header { + display: grid; + grid-template-columns: 2fr 1fr 100px 120px 150px; + gap: 1rem; + padding: 1rem 1.5rem; + background: var(--bg-light); + font-weight: 600; + font-size: 0.875rem; + color: var(--text-secondary); + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.execution-row { + display: grid; + grid-template-columns: 2fr 1fr 100px 120px 150px; + gap: 1rem; + padding: 1rem 1.5rem; + border-bottom: 1px solid var(--border); + align-items: center; + transition: background-color 0.2s; +} + +.execution-row:last-child { + border-bottom: none; +} + +.execution-row:hover { + background: var(--bg-light); +} + +.status-badge { + display: inline-block; + padding: 0.25rem 0.75rem; + border-radius: 12px; + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.status-badge.success { + background: #d1fae5; + color: #065f46; +} + +.status-badge.failed { + background: #fee2e2; + color: #991b1b; +} + +/* Buttons */ +.btn { + padding: 0.5rem 1rem; + border: none; + border-radius: 6px; + cursor: pointer; + font-size: 0.875rem; + font-weight: 600; + transition: background-color 0.2s, transform 0.1s; + white-space: nowrap; +} + +.btn:hover { + transform: translateY(-1px); +} + +.btn:active { + transform: translateY(0); +} + +.btn-primary { + background: var(--primary); + color: white; +} + +.btn-primary:hover { + background: var(--primary-hover); +} + +.btn-success { + background: var(--success); + color: white; +} + +.btn-success:hover { + background: var(--success-hover); +} + +.btn-danger { + background: var(--danger); + color: white; +} + +.btn-danger:hover { + background: var(--danger-hover); +} + +.btn-secondary { + background: var(--bg-light); + color: var(--text-primary); + border: 1px solid var(--border); +} + +.btn-secondary:hover { + background: var(--border); +} + +.btn-sm { + padding: 0.375rem 0.75rem; + font-size: 0.8rem; +} + +/* Modal */ +.modal { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: rgba(0, 0, 0, 0.5); + display: flex; + justify-content: center; + align-items: center; + z-index: 1000; +} + +.modal-content { + background: var(--bg-card); + border-radius: 12px; + width: 90%; + max-width: 800px; + max-height: 90vh; + display: flex; + flex-direction: column; + box-shadow: 0 20px 25px -5px rgba(0, 0, 0, 0.1); +} + +.modal-header { + padding: 1.5rem; + border-bottom: 1px solid var(--border); + display: flex; + justify-content: space-between; + align-items: center; +} + +.modal-header h3 { + font-size: 1.25rem; + color: var(--text-primary); +} + +.btn-close { + background: none; + border: none; + font-size: 1.5rem; + cursor: pointer; + color: var(--text-secondary); + padding: 0; + width: 2rem; + height: 2rem; + display: flex; + align-items: center; + justify-content: center; + border-radius: 4px; +} + +.btn-close:hover { + background: var(--bg-light); + color: var(--text-primary); +} + +.modal-body { + padding: 1.5rem; + overflow-y: auto; + flex: 1; +} + +.log-info { + display: grid; + grid-template-columns: repeat(2, 1fr); + gap: 1rem; + margin-bottom: 1.5rem; + padding: 1rem; + background: var(--bg-light); + border-radius: 6px; +} + +.log-tabs { + display: flex; + gap: 0.5rem; + margin-bottom: 1rem; + border-bottom: 2px solid var(--border); +} + +.tab-btn { + padding: 0.75rem 1.5rem; + border: none; + background: none; + cursor: pointer; + font-weight: 600; + color: var(--text-secondary); + border-bottom: 2px solid transparent; + margin-bottom: -2px; + transition: color 0.2s, border-color 0.2s; +} + +.tab-btn:hover { + color: var(--text-primary); +} + +.tab-btn.active { + color: var(--primary); + border-bottom-color: var(--primary); +} + +.log-content { + position: relative; + min-height: 300px; +} + +.log-pane { + background: var(--bg-dark); + color: #d1d5db; + padding: 1rem; + border-radius: 6px; + overflow-x: auto; + font-family: 'Courier New', Courier, monospace; + font-size: 0.875rem; + line-height: 1.5; + white-space: pre-wrap; + word-wrap: break-word; +} + +.log-pane.hidden { + display: none; +} + +/* Loading */ +.loading { + text-align: center; + padding: 3rem; + color: var(--text-secondary); + font-size: 1.125rem; +} + +.empty-state { + text-align: center; + padding: 3rem; + color: var(--text-secondary); +} + +.empty-state h3 { + margin-bottom: 0.5rem; + color: var(--text-primary); +} + +/* Footer */ +footer { + background: var(--bg-card); + padding: 1.5rem 0; + margin-top: 2rem; + border-top: 1px solid var(--border); + text-align: center; + color: var(--text-secondary); + font-size: 0.875rem; +} + +footer a { + color: var(--primary); + text-decoration: none; +} + +footer a:hover { + text-decoration: underline; +} + +/* Responsive */ +@media (max-width: 768px) { + .job-card { + flex-direction: column; + align-items: flex-start; + gap: 1rem; + } + + .job-actions { + width: 100%; + } + + .job-actions .btn { + flex: 1; + } + + .history-header, + .execution-row { + grid-template-columns: 1fr; + gap: 0.5rem; + } + + .history-header { + display: none; + } + + .execution-row > div::before { + content: attr(data-label); + font-weight: 600; + margin-right: 0.5rem; + color: var(--text-secondary); + font-size: 0.75rem; + } + + #stats-bar { + gap: 1rem; + } + + .modal-content { + width: 95%; + max-height: 95vh; + } + + .log-info { + grid-template-columns: 1fr; + } +} diff --git a/webapp/static/js/app.js b/webapp/static/js/app.js new file mode 100644 index 0000000..91bdd54 --- /dev/null +++ b/webapp/static/js/app.js @@ -0,0 +1,398 @@ +// Docker Crontab Dashboard - Vanilla JavaScript Application + +const App = { + currentView: 'jobs', + refreshInterval: null, + refreshDelay: 30000, // 30 seconds + + /** + * Initialize the application + */ + init() { + this.setupEventListeners(); + this.loadStats(); + this.loadJobs(); + this.startAutoRefresh(); + console.log('๐Ÿš€ Dashboard initialized'); + }, + + /** + * Set up event listeners + */ + setupEventListeners() { + // Back to jobs button + document.getElementById('back-to-jobs').addEventListener('click', () => { + this.showJobsList(); + }); + + // Refresh button + document.getElementById('refresh-btn').addEventListener('click', () => { + this.refresh(); + }); + + // Modal close button + document.getElementById('close-modal').addEventListener('click', () => { + this.closeModal(); + }); + + // Close modal on background click + document.getElementById('log-modal').addEventListener('click', (e) => { + if (e.target.id === 'log-modal') { + this.closeModal(); + } + }); + + // Tab buttons + document.querySelectorAll('.tab-btn').forEach(btn => { + btn.addEventListener('click', (e) => { + this.switchTab(e.target.dataset.tab); + }); + }); + + // Keyboard shortcuts + document.addEventListener('keydown', (e) => { + if (e.key === 'Escape') { + this.closeModal(); + } + }); + }, + + /** + * Start auto-refresh timer + */ + startAutoRefresh() { + this.refreshInterval = setInterval(() => { + this.refresh(); + }, this.refreshDelay); + }, + + /** + * Stop auto-refresh timer + */ + stopAutoRefresh() { + if (this.refreshInterval) { + clearInterval(this.refreshInterval); + this.refreshInterval = null; + } + }, + + /** + * Refresh current view + */ + async refresh() { + await this.loadStats(); + if (this.currentView === 'jobs') { + await this.loadJobs(); + } + console.log('๐Ÿ”„ Refreshed'); + }, + + /** + * Load dashboard statistics + */ + async loadStats() { + try { + const resp = await fetch('/api/stats'); + const stats = await resp.json(); + + document.getElementById('total-jobs').textContent = stats.total_jobs; + document.getElementById('active-jobs').textContent = stats.active_jobs; + document.getElementById('recent-failures').textContent = stats.recent_failures; + document.getElementById('last-24h-executions').textContent = stats.last_24h_executions; + } catch (error) { + console.error('Error loading stats:', error); + } + }, + + /** + * Load all jobs + */ + async loadJobs() { + const container = document.getElementById('jobs-container'); + container.innerHTML = '
Loading jobs...
'; + + try { + const resp = await fetch('/api/jobs'); + const jobs = await resp.json(); + + if (jobs.length === 0) { + container.innerHTML = ` +
+

No Jobs Found

+

No cron jobs are currently configured.

+
+ `; + return; + } + + container.innerHTML = jobs.map(job => this.renderJobCard(job)).join(''); + + // Attach event listeners to job cards + jobs.forEach(job => { + const historyBtn = document.getElementById(`history-${job.name}`); + const triggerBtn = document.getElementById(`trigger-${job.name}`); + + if (historyBtn) { + historyBtn.addEventListener('click', () => this.viewHistory(job.name)); + } + + if (triggerBtn) { + triggerBtn.addEventListener('click', () => this.triggerJob(job.name)); + } + }); + + } catch (error) { + console.error('Error loading jobs:', error); + container.innerHTML = ` +
+

Error Loading Jobs

+

${error.message}

+
+ `; + } + }, + + /** + * Render a job card + */ + renderJobCard(job) { + const lastRun = job.last_run ? new Date(job.last_run).toLocaleString() : 'Never'; + const status = job.status || 'scheduled'; + + return ` +
+
+

${this.escapeHtml(job.name)}

+

${this.escapeHtml(job.comment || job.command || 'No description')}

+
+
+ Schedule + ${this.escapeHtml(job.schedule)} +
+
+ Next Run + ${this.escapeHtml(job.next_run || 'Calculating...')} +
+
+ Last Run + ${lastRun} +
+
+ Status + ${status} +
+
+
+
+ + +
+
+ `; + }, + + /** + * View execution history for a job + */ + async viewHistory(jobName) { + this.currentView = 'history'; + document.getElementById('jobs-list').classList.add('hidden'); + document.getElementById('execution-history').classList.remove('hidden'); + document.getElementById('current-job-name').textContent = jobName; + + const container = document.getElementById('history-container'); + container.innerHTML = '
Loading history...
'; + + try { + const resp = await fetch(`/api/executions/${encodeURIComponent(jobName)}?limit=100`); + const executions = await resp.json(); + + if (executions.length === 0) { + container.innerHTML = ` +
+

No Execution History

+

This job hasn't been executed yet.

+
+ `; + return; + } + + container.innerHTML = ` +
+
+
Start Time
+
Duration
+
Exit Code
+
Triggered By
+
Actions
+
+ ${executions.map(ex => this.renderExecutionRow(ex)).join('')} +
+ `; + + // Attach event listeners to log buttons + executions.forEach(ex => { + const logBtn = document.getElementById(`logs-${ex.id}`); + if (logBtn) { + logBtn.addEventListener('click', () => this.viewLogs(ex)); + } + }); + + } catch (error) { + console.error('Error loading history:', error); + container.innerHTML = ` +
+

Error Loading History

+

${error.message}

+
+ `; + } + }, + + /** + * Render an execution row + */ + renderExecutionRow(execution) { + const startTime = new Date(execution.start_time).toLocaleString(); + const duration = execution.duration_seconds + ? `${execution.duration_seconds.toFixed(2)}s` + : 'In progress'; + const exitCode = execution.exit_code !== null ? execution.exit_code : '-'; + const statusClass = execution.exit_code === 0 ? 'success' : 'failed'; + + return ` +
+
${startTime}
+
${duration}
+
+ + ${exitCode} + +
+
${this.escapeHtml(execution.triggered_by || 'cron')}
+
+ +
+
+ `; + }, + + /** + * View logs for an execution + */ + viewLogs(execution) { + document.getElementById('log-job-name').textContent = execution.job_name; + document.getElementById('log-start-time').textContent = new Date(execution.start_time).toLocaleString(); + document.getElementById('log-duration').textContent = execution.duration_seconds + ? `${execution.duration_seconds.toFixed(2)}s` + : 'In progress'; + document.getElementById('log-exit-code').textContent = execution.exit_code !== null + ? execution.exit_code + : '-'; + + const stdout = execution.stdout_preview || '(empty)'; + const stderr = execution.stderr_preview || '(empty)'; + + document.getElementById('log-stdout').textContent = stdout; + document.getElementById('log-stderr').textContent = stderr; + + // Show stdout tab by default + this.switchTab('stdout'); + + // Show modal + document.getElementById('log-modal').classList.remove('hidden'); + }, + + /** + * Close log modal + */ + closeModal() { + document.getElementById('log-modal').classList.add('hidden'); + }, + + /** + * Switch log tabs + */ + switchTab(tabName) { + // Update tab buttons + document.querySelectorAll('.tab-btn').forEach(btn => { + if (btn.dataset.tab === tabName) { + btn.classList.add('active'); + } else { + btn.classList.remove('active'); + } + }); + + // Update tab panes + document.querySelectorAll('.log-pane').forEach(pane => { + if (pane.id === `log-${tabName}`) { + pane.classList.remove('hidden'); + pane.classList.add('active'); + } else { + pane.classList.add('hidden'); + pane.classList.remove('active'); + } + }); + }, + + /** + * Show jobs list + */ + showJobsList() { + this.currentView = 'jobs'; + document.getElementById('execution-history').classList.add('hidden'); + document.getElementById('jobs-list').classList.remove('hidden'); + this.loadJobs(); + }, + + /** + * Trigger a job manually + */ + async triggerJob(jobName) { + if (!confirm(`Trigger job "${jobName}" now?`)) { + return; + } + + try { + const resp = await fetch(`/api/trigger/${encodeURIComponent(jobName)}`, { + method: 'POST', + headers: { + 'X-Requested-With': 'XMLHttpRequest' + } + }); + + const result = await resp.json(); + + if (resp.ok) { + alert(`โœ… Job "${jobName}" triggered successfully!`); + // Refresh jobs after a short delay + setTimeout(() => this.loadJobs(), 1000); + } else { + alert(`โŒ Error: ${result.error}`); + } + } catch (error) { + alert(`โŒ Error triggering job: ${error.message}`); + } + }, + + /** + * Escape HTML to prevent XSS + */ + escapeHtml(text) { + if (text === null || text === undefined) return ''; + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; + } +}; + +// Initialize app when DOM is ready +document.addEventListener('DOMContentLoaded', () => { + App.init(); +}); diff --git a/webapp/sync_jobs.py b/webapp/sync_jobs.py new file mode 100755 index 0000000..cdd4959 --- /dev/null +++ b/webapp/sync_jobs.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +"""Sync jobs table from config.working.json.""" + +import sqlite3 +import json +import sys +from cron_parser import CronParser + + +DB_PATH = '/opt/crontab/data/crontab.db' + + +def sync_jobs_from_config(config_path): + """ + Sync jobs table from config.working.json. + + Args: + config_path: Path to config.working.json + + Returns: + int: 0 on success, 1 on error + """ + try: + with open(config_path, 'r') as f: + jobs = json.load(f) + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + parser = CronParser() + + # Get existing job names + cursor.execute('SELECT name FROM jobs') + existing_jobs = {row[0] for row in cursor.fetchall()} + + config_jobs = set() + + # Process each job from config + for job in jobs: + name = job.get('name', 'unnamed') + config_jobs.add(name) + + # Calculate next run description + schedule = job.get('schedule', '* * * * *') + next_run = parser.parse_schedule(schedule) + + # Upsert job (insert or update) + cursor.execute(''' + INSERT INTO jobs (name, schedule, command, image, container, comment, next_run) + VALUES (?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(name) DO UPDATE SET + schedule = excluded.schedule, + command = excluded.command, + image = excluded.image, + container = excluded.container, + comment = excluded.comment, + next_run = excluded.next_run, + updated_at = CURRENT_TIMESTAMP + ''', ( + name, + schedule, + job.get('command'), + job.get('image'), + job.get('container'), + job.get('comment'), + next_run + )) + + # Remove jobs that no longer exist in config + removed_jobs = existing_jobs - config_jobs + for job_name in removed_jobs: + cursor.execute('DELETE FROM jobs WHERE name = ?', (job_name,)) + + conn.commit() + conn.close() + + print(f"โœ… Synced {len(config_jobs)} jobs to database") + if removed_jobs: + print(f" Removed {len(removed_jobs)} stale jobs: {', '.join(removed_jobs)}") + + return 0 + + except FileNotFoundError: + print(f"โŒ Error: Config file not found: {config_path}", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"โŒ Error parsing config JSON: {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"โŒ Error syncing jobs: {e}", file=sys.stderr) + return 1 + + +if __name__ == '__main__': + if len(sys.argv) < 2: + print("Usage: sync_jobs.py ", file=sys.stderr) + sys.exit(1) + + sys.exit(sync_jobs_from_config(sys.argv[1])) diff --git a/webapp/templates/index.html b/webapp/templates/index.html new file mode 100644 index 0000000..1667110 --- /dev/null +++ b/webapp/templates/index.html @@ -0,0 +1,94 @@ + + + + + + Docker Crontab Dashboard + + + +
+
+

๐Ÿ• Docker Crontab Dashboard

+
+
+ Total Jobs + 0 +
+
+ Active + 0 +
+
+ Recent Failures + 0 +
+
+ Last 24h Executions + 0 +
+
+ Auto-Refresh + 30s +
+
+
+
+ +
+
+
+

Scheduled Jobs

+ +
+
+
Loading jobs...
+
+
+ + +
+ + + + + + + + +